repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
adeepv/android-kernel-zte-v9a | sound/isa/es18xx.c | 3974 | 70964 | /*
* Driver for generic ESS AudioDrive ES18xx soundcards
* Copyright (c) by Christian Fischbach <fishbach@pool.informatik.rwth-aachen.de>
* Copyright (c) by Abramo Bagnara <abramo@alsa-project.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/* GENERAL NOTES:
*
* BUGS:
* - There are pops (we can't delay in trigger function, cause midlevel
* often need to trigger down and then up very quickly).
* Any ideas?
* - Support for 16 bit DMA seems to be broken. I've no hardware to tune it.
*/
/*
* ES1868 NOTES:
* - The chip has one half duplex pcm (with very limited full duplex support).
*
* - Duplex stereophonic sound is impossible.
* - Record and playback must share the same frequency rate.
*
* - The driver use dma2 for playback and dma1 for capture.
*/
/*
* ES1869 NOTES:
*
* - there are a first full duplex pcm and a second playback only pcm
* (incompatible with first pcm capture)
*
* - there is support for the capture volume and ESS Spatializer 3D effect.
*
* - contrarily to some pages in DS_1869.PDF the rates can be set
* independently.
*
* - Zoom Video is implemented by sharing the FM DAC, thus the user can
* have either FM playback or Video playback but not both simultaneously.
* The Video Playback Switch mixer control toggles this choice.
*
* BUGS:
*
* - There is a major trouble I noted:
*
* using both channel for playback stereo 16 bit samples at 44100 Hz
* the second pcm (Audio1) DMA slows down irregularly and sound is garbled.
*
* The same happens using Audio1 for captureing.
*
* The Windows driver does not suffer of this (although it use Audio1
* only for captureing). I'm unable to discover why.
*
*/
/*
* ES1879 NOTES:
* - When Zoom Video is enabled (reg 0x71 bit 6 toggled on) the PCM playback
* seems to be effected (speaker_test plays a lower frequency). Can't find
* anything in the datasheet to account for this, so a Video Playback Switch
* control has been included to allow ZV to be enabled only when necessary.
* Then again on at least one test system the 0x71 bit 6 enable bit is not
* needed for ZV, so maybe the datasheet is entirely wrong here.
*/
#include <linux/init.h>
#include <linux/err.h>
#include <linux/isa.h>
#include <linux/pnp.h>
#include <linux/isapnp.h>
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/mpu401.h>
#include <sound/opl3.h>
#define SNDRV_LEGACY_FIND_FREE_IRQ
#define SNDRV_LEGACY_FIND_FREE_DMA
#include <sound/initval.h>
#define PFX "es18xx: "
struct snd_es18xx {
unsigned long port; /* port of ESS chip */
unsigned long ctrl_port; /* Control port of ESS chip */
struct resource *res_port;
struct resource *res_mpu_port;
struct resource *res_ctrl_port;
int irq; /* IRQ number of ESS chip */
int dma1; /* DMA1 */
int dma2; /* DMA2 */
unsigned short version; /* version of ESS chip */
int caps; /* Chip capabilities */
unsigned short audio2_vol; /* volume level of audio2 */
unsigned short active; /* active channel mask */
unsigned int dma1_shift;
unsigned int dma2_shift;
struct snd_pcm *pcm;
struct snd_pcm_substream *playback_a_substream;
struct snd_pcm_substream *capture_a_substream;
struct snd_pcm_substream *playback_b_substream;
struct snd_rawmidi *rmidi;
struct snd_kcontrol *hw_volume;
struct snd_kcontrol *hw_switch;
struct snd_kcontrol *master_volume;
struct snd_kcontrol *master_switch;
spinlock_t reg_lock;
spinlock_t mixer_lock;
#ifdef CONFIG_PM
unsigned char pm_reg;
#endif
#ifdef CONFIG_PNP
struct pnp_dev *dev;
struct pnp_dev *devc;
#endif
};
#define AUDIO1_IRQ 0x01
#define AUDIO2_IRQ 0x02
#define HWV_IRQ 0x04
#define MPU_IRQ 0x08
#define ES18XX_PCM2 0x0001 /* Has two useable PCM */
#define ES18XX_SPATIALIZER 0x0002 /* Has 3D Spatializer */
#define ES18XX_RECMIX 0x0004 /* Has record mixer */
#define ES18XX_DUPLEX_MONO 0x0008 /* Has mono duplex only */
#define ES18XX_DUPLEX_SAME 0x0010 /* Playback and record must share the same rate */
#define ES18XX_NEW_RATE 0x0020 /* More precise rate setting */
#define ES18XX_AUXB 0x0040 /* AuxB mixer control */
#define ES18XX_HWV 0x0080 /* Has separate hardware volume mixer controls*/
#define ES18XX_MONO 0x0100 /* Mono_in mixer control */
#define ES18XX_I2S 0x0200 /* I2S mixer control */
#define ES18XX_MUTEREC 0x0400 /* Record source can be muted */
#define ES18XX_CONTROL 0x0800 /* Has control ports */
/* Power Management */
#define ES18XX_PM 0x07
#define ES18XX_PM_GPO0 0x01
#define ES18XX_PM_GPO1 0x02
#define ES18XX_PM_PDR 0x04
#define ES18XX_PM_ANA 0x08
#define ES18XX_PM_FM 0x020
#define ES18XX_PM_SUS 0x080
/* Lowlevel */
#define DAC1 0x01
#define ADC1 0x02
#define DAC2 0x04
#define MILLISECOND 10000
static int snd_es18xx_dsp_command(struct snd_es18xx *chip, unsigned char val)
{
int i;
for(i = MILLISECOND; i; i--)
if ((inb(chip->port + 0x0C) & 0x80) == 0) {
outb(val, chip->port + 0x0C);
return 0;
}
snd_printk(KERN_ERR "dsp_command: timeout (0x%x)\n", val);
return -EINVAL;
}
static int snd_es18xx_dsp_get_byte(struct snd_es18xx *chip)
{
int i;
for(i = MILLISECOND/10; i; i--)
if (inb(chip->port + 0x0C) & 0x40)
return inb(chip->port + 0x0A);
snd_printk(KERN_ERR "dsp_get_byte failed: 0x%lx = 0x%x!!!\n",
chip->port + 0x0A, inb(chip->port + 0x0A));
return -ENODEV;
}
#undef REG_DEBUG
static int snd_es18xx_write(struct snd_es18xx *chip,
unsigned char reg, unsigned char data)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&chip->reg_lock, flags);
ret = snd_es18xx_dsp_command(chip, reg);
if (ret < 0)
goto end;
ret = snd_es18xx_dsp_command(chip, data);
end:
spin_unlock_irqrestore(&chip->reg_lock, flags);
#ifdef REG_DEBUG
snd_printk(KERN_DEBUG "Reg %02x set to %02x\n", reg, data);
#endif
return ret;
}
static int snd_es18xx_read(struct snd_es18xx *chip, unsigned char reg)
{
unsigned long flags;
int ret, data;
spin_lock_irqsave(&chip->reg_lock, flags);
ret = snd_es18xx_dsp_command(chip, 0xC0);
if (ret < 0)
goto end;
ret = snd_es18xx_dsp_command(chip, reg);
if (ret < 0)
goto end;
data = snd_es18xx_dsp_get_byte(chip);
ret = data;
#ifdef REG_DEBUG
snd_printk(KERN_DEBUG "Reg %02x now is %02x (%d)\n", reg, data, ret);
#endif
end:
spin_unlock_irqrestore(&chip->reg_lock, flags);
return ret;
}
/* Return old value */
static int snd_es18xx_bits(struct snd_es18xx *chip, unsigned char reg,
unsigned char mask, unsigned char val)
{
int ret;
unsigned char old, new, oval;
unsigned long flags;
spin_lock_irqsave(&chip->reg_lock, flags);
ret = snd_es18xx_dsp_command(chip, 0xC0);
if (ret < 0)
goto end;
ret = snd_es18xx_dsp_command(chip, reg);
if (ret < 0)
goto end;
ret = snd_es18xx_dsp_get_byte(chip);
if (ret < 0) {
goto end;
}
old = ret;
oval = old & mask;
if (val != oval) {
ret = snd_es18xx_dsp_command(chip, reg);
if (ret < 0)
goto end;
new = (old & ~mask) | (val & mask);
ret = snd_es18xx_dsp_command(chip, new);
if (ret < 0)
goto end;
#ifdef REG_DEBUG
snd_printk(KERN_DEBUG "Reg %02x was %02x, set to %02x (%d)\n",
reg, old, new, ret);
#endif
}
ret = oval;
end:
spin_unlock_irqrestore(&chip->reg_lock, flags);
return ret;
}
static inline void snd_es18xx_mixer_write(struct snd_es18xx *chip,
unsigned char reg, unsigned char data)
{
unsigned long flags;
spin_lock_irqsave(&chip->mixer_lock, flags);
outb(reg, chip->port + 0x04);
outb(data, chip->port + 0x05);
spin_unlock_irqrestore(&chip->mixer_lock, flags);
#ifdef REG_DEBUG
snd_printk(KERN_DEBUG "Mixer reg %02x set to %02x\n", reg, data);
#endif
}
static inline int snd_es18xx_mixer_read(struct snd_es18xx *chip, unsigned char reg)
{
unsigned long flags;
int data;
spin_lock_irqsave(&chip->mixer_lock, flags);
outb(reg, chip->port + 0x04);
data = inb(chip->port + 0x05);
spin_unlock_irqrestore(&chip->mixer_lock, flags);
#ifdef REG_DEBUG
snd_printk(KERN_DEBUG "Mixer reg %02x now is %02x\n", reg, data);
#endif
return data;
}
/* Return old value */
static inline int snd_es18xx_mixer_bits(struct snd_es18xx *chip, unsigned char reg,
unsigned char mask, unsigned char val)
{
unsigned char old, new, oval;
unsigned long flags;
spin_lock_irqsave(&chip->mixer_lock, flags);
outb(reg, chip->port + 0x04);
old = inb(chip->port + 0x05);
oval = old & mask;
if (val != oval) {
new = (old & ~mask) | (val & mask);
outb(new, chip->port + 0x05);
#ifdef REG_DEBUG
snd_printk(KERN_DEBUG "Mixer reg %02x was %02x, set to %02x\n",
reg, old, new);
#endif
}
spin_unlock_irqrestore(&chip->mixer_lock, flags);
return oval;
}
static inline int snd_es18xx_mixer_writable(struct snd_es18xx *chip, unsigned char reg,
unsigned char mask)
{
int old, expected, new;
unsigned long flags;
spin_lock_irqsave(&chip->mixer_lock, flags);
outb(reg, chip->port + 0x04);
old = inb(chip->port + 0x05);
expected = old ^ mask;
outb(expected, chip->port + 0x05);
new = inb(chip->port + 0x05);
spin_unlock_irqrestore(&chip->mixer_lock, flags);
#ifdef REG_DEBUG
snd_printk(KERN_DEBUG "Mixer reg %02x was %02x, set to %02x, now is %02x\n",
reg, old, expected, new);
#endif
return expected == new;
}
static int __devinit snd_es18xx_reset(struct snd_es18xx *chip)
{
int i;
outb(0x03, chip->port + 0x06);
inb(chip->port + 0x06);
outb(0x00, chip->port + 0x06);
for(i = 0; i < MILLISECOND && !(inb(chip->port + 0x0E) & 0x80); i++);
if (inb(chip->port + 0x0A) != 0xAA)
return -1;
return 0;
}
static int snd_es18xx_reset_fifo(struct snd_es18xx *chip)
{
outb(0x02, chip->port + 0x06);
inb(chip->port + 0x06);
outb(0x00, chip->port + 0x06);
return 0;
}
static struct snd_ratnum new_clocks[2] = {
{
.num = 793800,
.den_min = 1,
.den_max = 128,
.den_step = 1,
},
{
.num = 768000,
.den_min = 1,
.den_max = 128,
.den_step = 1,
}
};
static struct snd_pcm_hw_constraint_ratnums new_hw_constraints_clocks = {
.nrats = 2,
.rats = new_clocks,
};
static struct snd_ratnum old_clocks[2] = {
{
.num = 795444,
.den_min = 1,
.den_max = 128,
.den_step = 1,
},
{
.num = 397722,
.den_min = 1,
.den_max = 128,
.den_step = 1,
}
};
static struct snd_pcm_hw_constraint_ratnums old_hw_constraints_clocks = {
.nrats = 2,
.rats = old_clocks,
};
static void snd_es18xx_rate_set(struct snd_es18xx *chip,
struct snd_pcm_substream *substream,
int mode)
{
unsigned int bits, div0;
struct snd_pcm_runtime *runtime = substream->runtime;
if (chip->caps & ES18XX_NEW_RATE) {
if (runtime->rate_num == new_clocks[0].num)
bits = 128 - runtime->rate_den;
else
bits = 256 - runtime->rate_den;
} else {
if (runtime->rate_num == old_clocks[0].num)
bits = 256 - runtime->rate_den;
else
bits = 128 - runtime->rate_den;
}
/* set filter register */
div0 = 256 - 7160000*20/(8*82*runtime->rate);
if ((chip->caps & ES18XX_PCM2) && mode == DAC2) {
snd_es18xx_mixer_write(chip, 0x70, bits);
/*
* Comment from kernel oss driver:
* FKS: fascinating: 0x72 doesn't seem to work.
*/
snd_es18xx_write(chip, 0xA2, div0);
snd_es18xx_mixer_write(chip, 0x72, div0);
} else {
snd_es18xx_write(chip, 0xA1, bits);
snd_es18xx_write(chip, 0xA2, div0);
}
}
static int snd_es18xx_playback_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_es18xx *chip = snd_pcm_substream_chip(substream);
int shift, err;
shift = 0;
if (params_channels(hw_params) == 2)
shift++;
if (snd_pcm_format_width(params_format(hw_params)) == 16)
shift++;
if (substream->number == 0 && (chip->caps & ES18XX_PCM2)) {
if ((chip->caps & ES18XX_DUPLEX_MONO) &&
(chip->capture_a_substream) &&
params_channels(hw_params) != 1) {
_snd_pcm_hw_param_setempty(hw_params, SNDRV_PCM_HW_PARAM_CHANNELS);
return -EBUSY;
}
chip->dma2_shift = shift;
} else {
chip->dma1_shift = shift;
}
if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0)
return err;
return 0;
}
static int snd_es18xx_pcm_hw_free(struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
static int snd_es18xx_playback1_prepare(struct snd_es18xx *chip,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int size = snd_pcm_lib_buffer_bytes(substream);
unsigned int count = snd_pcm_lib_period_bytes(substream);
snd_es18xx_rate_set(chip, substream, DAC2);
/* Transfer Count Reload */
count = 0x10000 - count;
snd_es18xx_mixer_write(chip, 0x74, count & 0xff);
snd_es18xx_mixer_write(chip, 0x76, count >> 8);
/* Set format */
snd_es18xx_mixer_bits(chip, 0x7A, 0x07,
((runtime->channels == 1) ? 0x00 : 0x02) |
(snd_pcm_format_width(runtime->format) == 16 ? 0x01 : 0x00) |
(snd_pcm_format_unsigned(runtime->format) ? 0x00 : 0x04));
/* Set DMA controller */
snd_dma_program(chip->dma2, runtime->dma_addr, size, DMA_MODE_WRITE | DMA_AUTOINIT);
return 0;
}
static int snd_es18xx_playback1_trigger(struct snd_es18xx *chip,
struct snd_pcm_substream *substream,
int cmd)
{
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
if (chip->active & DAC2)
return 0;
chip->active |= DAC2;
/* Start DMA */
if (chip->dma2 >= 4)
snd_es18xx_mixer_write(chip, 0x78, 0xb3);
else
snd_es18xx_mixer_write(chip, 0x78, 0x93);
#ifdef AVOID_POPS
/* Avoid pops */
udelay(100000);
if (chip->caps & ES18XX_PCM2)
/* Restore Audio 2 volume */
snd_es18xx_mixer_write(chip, 0x7C, chip->audio2_vol);
else
/* Enable PCM output */
snd_es18xx_dsp_command(chip, 0xD1);
#endif
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
if (!(chip->active & DAC2))
return 0;
chip->active &= ~DAC2;
/* Stop DMA */
snd_es18xx_mixer_write(chip, 0x78, 0x00);
#ifdef AVOID_POPS
udelay(25000);
if (chip->caps & ES18XX_PCM2)
/* Set Audio 2 volume to 0 */
snd_es18xx_mixer_write(chip, 0x7C, 0);
else
/* Disable PCM output */
snd_es18xx_dsp_command(chip, 0xD3);
#endif
break;
default:
return -EINVAL;
}
return 0;
}
static int snd_es18xx_capture_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_es18xx *chip = snd_pcm_substream_chip(substream);
int shift, err;
shift = 0;
if ((chip->caps & ES18XX_DUPLEX_MONO) &&
chip->playback_a_substream &&
params_channels(hw_params) != 1) {
_snd_pcm_hw_param_setempty(hw_params, SNDRV_PCM_HW_PARAM_CHANNELS);
return -EBUSY;
}
if (params_channels(hw_params) == 2)
shift++;
if (snd_pcm_format_width(params_format(hw_params)) == 16)
shift++;
chip->dma1_shift = shift;
if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0)
return err;
return 0;
}
static int snd_es18xx_capture_prepare(struct snd_pcm_substream *substream)
{
struct snd_es18xx *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int size = snd_pcm_lib_buffer_bytes(substream);
unsigned int count = snd_pcm_lib_period_bytes(substream);
snd_es18xx_reset_fifo(chip);
/* Set stereo/mono */
snd_es18xx_bits(chip, 0xA8, 0x03, runtime->channels == 1 ? 0x02 : 0x01);
snd_es18xx_rate_set(chip, substream, ADC1);
/* Transfer Count Reload */
count = 0x10000 - count;
snd_es18xx_write(chip, 0xA4, count & 0xff);
snd_es18xx_write(chip, 0xA5, count >> 8);
#ifdef AVOID_POPS
udelay(100000);
#endif
/* Set format */
snd_es18xx_write(chip, 0xB7,
snd_pcm_format_unsigned(runtime->format) ? 0x51 : 0x71);
snd_es18xx_write(chip, 0xB7, 0x90 |
((runtime->channels == 1) ? 0x40 : 0x08) |
(snd_pcm_format_width(runtime->format) == 16 ? 0x04 : 0x00) |
(snd_pcm_format_unsigned(runtime->format) ? 0x00 : 0x20));
/* Set DMA controller */
snd_dma_program(chip->dma1, runtime->dma_addr, size, DMA_MODE_READ | DMA_AUTOINIT);
return 0;
}
static int snd_es18xx_capture_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct snd_es18xx *chip = snd_pcm_substream_chip(substream);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
if (chip->active & ADC1)
return 0;
chip->active |= ADC1;
/* Start DMA */
snd_es18xx_write(chip, 0xB8, 0x0f);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
if (!(chip->active & ADC1))
return 0;
chip->active &= ~ADC1;
/* Stop DMA */
snd_es18xx_write(chip, 0xB8, 0x00);
break;
default:
return -EINVAL;
}
return 0;
}
static int snd_es18xx_playback2_prepare(struct snd_es18xx *chip,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int size = snd_pcm_lib_buffer_bytes(substream);
unsigned int count = snd_pcm_lib_period_bytes(substream);
snd_es18xx_reset_fifo(chip);
/* Set stereo/mono */
snd_es18xx_bits(chip, 0xA8, 0x03, runtime->channels == 1 ? 0x02 : 0x01);
snd_es18xx_rate_set(chip, substream, DAC1);
/* Transfer Count Reload */
count = 0x10000 - count;
snd_es18xx_write(chip, 0xA4, count & 0xff);
snd_es18xx_write(chip, 0xA5, count >> 8);
/* Set format */
snd_es18xx_write(chip, 0xB6,
snd_pcm_format_unsigned(runtime->format) ? 0x80 : 0x00);
snd_es18xx_write(chip, 0xB7,
snd_pcm_format_unsigned(runtime->format) ? 0x51 : 0x71);
snd_es18xx_write(chip, 0xB7, 0x90 |
(runtime->channels == 1 ? 0x40 : 0x08) |
(snd_pcm_format_width(runtime->format) == 16 ? 0x04 : 0x00) |
(snd_pcm_format_unsigned(runtime->format) ? 0x00 : 0x20));
/* Set DMA controller */
snd_dma_program(chip->dma1, runtime->dma_addr, size, DMA_MODE_WRITE | DMA_AUTOINIT);
return 0;
}
static int snd_es18xx_playback2_trigger(struct snd_es18xx *chip,
struct snd_pcm_substream *substream,
int cmd)
{
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
if (chip->active & DAC1)
return 0;
chip->active |= DAC1;
/* Start DMA */
snd_es18xx_write(chip, 0xB8, 0x05);
#ifdef AVOID_POPS
/* Avoid pops */
udelay(100000);
/* Enable Audio 1 */
snd_es18xx_dsp_command(chip, 0xD1);
#endif
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
if (!(chip->active & DAC1))
return 0;
chip->active &= ~DAC1;
/* Stop DMA */
snd_es18xx_write(chip, 0xB8, 0x00);
#ifdef AVOID_POPS
/* Avoid pops */
udelay(25000);
/* Disable Audio 1 */
snd_es18xx_dsp_command(chip, 0xD3);
#endif
break;
default:
return -EINVAL;
}
return 0;
}
static int snd_es18xx_playback_prepare(struct snd_pcm_substream *substream)
{
struct snd_es18xx *chip = snd_pcm_substream_chip(substream);
if (substream->number == 0 && (chip->caps & ES18XX_PCM2))
return snd_es18xx_playback1_prepare(chip, substream);
else
return snd_es18xx_playback2_prepare(chip, substream);
}
static int snd_es18xx_playback_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct snd_es18xx *chip = snd_pcm_substream_chip(substream);
if (substream->number == 0 && (chip->caps & ES18XX_PCM2))
return snd_es18xx_playback1_trigger(chip, substream, cmd);
else
return snd_es18xx_playback2_trigger(chip, substream, cmd);
}
static irqreturn_t snd_es18xx_interrupt(int irq, void *dev_id)
{
struct snd_card *card = dev_id;
struct snd_es18xx *chip = card->private_data;
unsigned char status;
if (chip->caps & ES18XX_CONTROL) {
/* Read Interrupt status */
status = inb(chip->ctrl_port + 6);
} else {
/* Read Interrupt status */
status = snd_es18xx_mixer_read(chip, 0x7f) >> 4;
}
#if 0
else {
status = 0;
if (inb(chip->port + 0x0C) & 0x01)
status |= AUDIO1_IRQ;
if (snd_es18xx_mixer_read(chip, 0x7A) & 0x80)
status |= AUDIO2_IRQ;
if ((chip->caps & ES18XX_HWV) &&
snd_es18xx_mixer_read(chip, 0x64) & 0x10)
status |= HWV_IRQ;
}
#endif
/* Audio 1 & Audio 2 */
if (status & AUDIO2_IRQ) {
if (chip->active & DAC2)
snd_pcm_period_elapsed(chip->playback_a_substream);
/* ack interrupt */
snd_es18xx_mixer_bits(chip, 0x7A, 0x80, 0x00);
}
if (status & AUDIO1_IRQ) {
/* ok.. capture is active */
if (chip->active & ADC1)
snd_pcm_period_elapsed(chip->capture_a_substream);
/* ok.. playback2 is active */
else if (chip->active & DAC1)
snd_pcm_period_elapsed(chip->playback_b_substream);
/* ack interrupt */
inb(chip->port + 0x0E);
}
/* MPU */
if ((status & MPU_IRQ) && chip->rmidi)
snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data);
/* Hardware volume */
if (status & HWV_IRQ) {
int split = 0;
if (chip->caps & ES18XX_HWV) {
split = snd_es18xx_mixer_read(chip, 0x64) & 0x80;
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&chip->hw_switch->id);
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&chip->hw_volume->id);
}
if (!split) {
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&chip->master_switch->id);
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&chip->master_volume->id);
}
/* ack interrupt */
snd_es18xx_mixer_write(chip, 0x66, 0x00);
}
return IRQ_HANDLED;
}
static snd_pcm_uframes_t snd_es18xx_playback_pointer(struct snd_pcm_substream *substream)
{
struct snd_es18xx *chip = snd_pcm_substream_chip(substream);
unsigned int size = snd_pcm_lib_buffer_bytes(substream);
int pos;
if (substream->number == 0 && (chip->caps & ES18XX_PCM2)) {
if (!(chip->active & DAC2))
return 0;
pos = snd_dma_pointer(chip->dma2, size);
return pos >> chip->dma2_shift;
} else {
if (!(chip->active & DAC1))
return 0;
pos = snd_dma_pointer(chip->dma1, size);
return pos >> chip->dma1_shift;
}
}
static snd_pcm_uframes_t snd_es18xx_capture_pointer(struct snd_pcm_substream *substream)
{
struct snd_es18xx *chip = snd_pcm_substream_chip(substream);
unsigned int size = snd_pcm_lib_buffer_bytes(substream);
int pos;
if (!(chip->active & ADC1))
return 0;
pos = snd_dma_pointer(chip->dma1, size);
return pos >> chip->dma1_shift;
}
static struct snd_pcm_hardware snd_es18xx_playback =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE),
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
.rate_min = 4000,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = 65536,
.period_bytes_min = 64,
.period_bytes_max = 65536,
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
static struct snd_pcm_hardware snd_es18xx_capture =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE),
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
.rate_min = 4000,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = 65536,
.period_bytes_min = 64,
.period_bytes_max = 65536,
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
static int snd_es18xx_playback_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_es18xx *chip = snd_pcm_substream_chip(substream);
if (substream->number == 0 && (chip->caps & ES18XX_PCM2)) {
if ((chip->caps & ES18XX_DUPLEX_MONO) &&
chip->capture_a_substream &&
chip->capture_a_substream->runtime->channels != 1)
return -EAGAIN;
chip->playback_a_substream = substream;
} else if (substream->number <= 1) {
if (chip->capture_a_substream)
return -EAGAIN;
chip->playback_b_substream = substream;
} else {
snd_BUG();
return -EINVAL;
}
substream->runtime->hw = snd_es18xx_playback;
snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
(chip->caps & ES18XX_NEW_RATE) ? &new_hw_constraints_clocks : &old_hw_constraints_clocks);
return 0;
}
static int snd_es18xx_capture_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_es18xx *chip = snd_pcm_substream_chip(substream);
if (chip->playback_b_substream)
return -EAGAIN;
if ((chip->caps & ES18XX_DUPLEX_MONO) &&
chip->playback_a_substream &&
chip->playback_a_substream->runtime->channels != 1)
return -EAGAIN;
chip->capture_a_substream = substream;
substream->runtime->hw = snd_es18xx_capture;
snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
(chip->caps & ES18XX_NEW_RATE) ? &new_hw_constraints_clocks : &old_hw_constraints_clocks);
return 0;
}
static int snd_es18xx_playback_close(struct snd_pcm_substream *substream)
{
struct snd_es18xx *chip = snd_pcm_substream_chip(substream);
if (substream->number == 0 && (chip->caps & ES18XX_PCM2))
chip->playback_a_substream = NULL;
else
chip->playback_b_substream = NULL;
snd_pcm_lib_free_pages(substream);
return 0;
}
static int snd_es18xx_capture_close(struct snd_pcm_substream *substream)
{
struct snd_es18xx *chip = snd_pcm_substream_chip(substream);
chip->capture_a_substream = NULL;
snd_pcm_lib_free_pages(substream);
return 0;
}
/*
* MIXER part
*/
/* Record source mux routines:
* Depending on the chipset this mux switches between 4, 5, or 8 possible inputs.
* bit table for the 4/5 source mux:
* reg 1C:
* b2 b1 b0 muxSource
* x 0 x microphone
* 0 1 x CD
* 1 1 0 line
* 1 1 1 mixer
* if it's "mixer" and it's a 5 source mux chipset then reg 7A bit 3 determines
* either the play mixer or the capture mixer.
*
* "map4Source" translates from source number to reg bit pattern
* "invMap4Source" translates from reg bit pattern to source number
*/
static int snd_es18xx_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static char *texts5Source[5] = {
"Mic", "CD", "Line", "Master", "Mix"
};
static char *texts8Source[8] = {
"Mic", "Mic Master", "CD", "AOUT",
"Mic1", "Mix", "Line", "Master"
};
struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
switch (chip->version) {
case 0x1868:
case 0x1878:
uinfo->value.enumerated.items = 4;
if (uinfo->value.enumerated.item > 3)
uinfo->value.enumerated.item = 3;
strcpy(uinfo->value.enumerated.name,
texts5Source[uinfo->value.enumerated.item]);
break;
case 0x1887:
case 0x1888:
uinfo->value.enumerated.items = 5;
if (uinfo->value.enumerated.item > 4)
uinfo->value.enumerated.item = 4;
strcpy(uinfo->value.enumerated.name, texts5Source[uinfo->value.enumerated.item]);
break;
case 0x1869: /* DS somewhat contradictory for 1869: could be be 5 or 8 */
case 0x1879:
uinfo->value.enumerated.items = 8;
if (uinfo->value.enumerated.item > 7)
uinfo->value.enumerated.item = 7;
strcpy(uinfo->value.enumerated.name, texts8Source[uinfo->value.enumerated.item]);
break;
default:
return -EINVAL;
}
return 0;
}
static int snd_es18xx_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
static unsigned char invMap4Source[8] = {0, 0, 1, 1, 0, 0, 2, 3};
struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol);
int muxSource = snd_es18xx_mixer_read(chip, 0x1c) & 0x07;
if (!(chip->version == 0x1869 || chip->version == 0x1879)) {
muxSource = invMap4Source[muxSource];
if (muxSource==3 &&
(chip->version == 0x1887 || chip->version == 0x1888) &&
(snd_es18xx_mixer_read(chip, 0x7a) & 0x08)
)
muxSource = 4;
}
ucontrol->value.enumerated.item[0] = muxSource;
return 0;
}
static int snd_es18xx_put_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
static unsigned char map4Source[4] = {0, 2, 6, 7};
struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol);
unsigned char val = ucontrol->value.enumerated.item[0];
unsigned char retVal = 0;
switch (chip->version) {
/* 5 source chips */
case 0x1887:
case 0x1888:
if (val > 4)
return -EINVAL;
if (val == 4) {
retVal = snd_es18xx_mixer_bits(chip, 0x7a, 0x08, 0x08) != 0x08;
val = 3;
} else
retVal = snd_es18xx_mixer_bits(chip, 0x7a, 0x08, 0x00) != 0x00;
/* 4 source chips */
case 0x1868:
case 0x1878:
if (val > 3)
return -EINVAL;
val = map4Source[val];
break;
/* 8 source chips */
case 0x1869:
case 0x1879:
if (val > 7)
return -EINVAL;
break;
default:
return -EINVAL;
}
return (snd_es18xx_mixer_bits(chip, 0x1c, 0x07, val) != val) || retVal;
}
#define snd_es18xx_info_spatializer_enable snd_ctl_boolean_mono_info
static int snd_es18xx_get_spatializer_enable(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol);
unsigned char val = snd_es18xx_mixer_read(chip, 0x50);
ucontrol->value.integer.value[0] = !!(val & 8);
return 0;
}
static int snd_es18xx_put_spatializer_enable(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol);
unsigned char oval, nval;
int change;
nval = ucontrol->value.integer.value[0] ? 0x0c : 0x04;
oval = snd_es18xx_mixer_read(chip, 0x50) & 0x0c;
change = nval != oval;
if (change) {
snd_es18xx_mixer_write(chip, 0x50, nval & ~0x04);
snd_es18xx_mixer_write(chip, 0x50, nval);
}
return change;
}
static int snd_es18xx_info_hw_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 63;
return 0;
}
static int snd_es18xx_get_hw_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = snd_es18xx_mixer_read(chip, 0x61) & 0x3f;
ucontrol->value.integer.value[1] = snd_es18xx_mixer_read(chip, 0x63) & 0x3f;
return 0;
}
#define snd_es18xx_info_hw_switch snd_ctl_boolean_stereo_info
static int snd_es18xx_get_hw_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = !(snd_es18xx_mixer_read(chip, 0x61) & 0x40);
ucontrol->value.integer.value[1] = !(snd_es18xx_mixer_read(chip, 0x63) & 0x40);
return 0;
}
static void snd_es18xx_hwv_free(struct snd_kcontrol *kcontrol)
{
struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol);
chip->master_volume = NULL;
chip->master_switch = NULL;
chip->hw_volume = NULL;
chip->hw_switch = NULL;
}
static int snd_es18xx_reg_bits(struct snd_es18xx *chip, unsigned char reg,
unsigned char mask, unsigned char val)
{
if (reg < 0xa0)
return snd_es18xx_mixer_bits(chip, reg, mask, val);
else
return snd_es18xx_bits(chip, reg, mask, val);
}
static int snd_es18xx_reg_read(struct snd_es18xx *chip, unsigned char reg)
{
if (reg < 0xa0)
return snd_es18xx_mixer_read(chip, reg);
else
return snd_es18xx_read(chip, reg);
}
#define ES18XX_SINGLE(xname, xindex, reg, shift, mask, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
.info = snd_es18xx_info_single, \
.get = snd_es18xx_get_single, .put = snd_es18xx_put_single, \
.private_value = reg | (shift << 8) | (mask << 16) | (invert << 24) }
static int snd_es18xx_info_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
int mask = (kcontrol->private_value >> 16) & 0xff;
uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = mask;
return 0;
}
static int snd_es18xx_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol);
int reg = kcontrol->private_value & 0xff;
int shift = (kcontrol->private_value >> 8) & 0xff;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
int val;
val = snd_es18xx_reg_read(chip, reg);
ucontrol->value.integer.value[0] = (val >> shift) & mask;
if (invert)
ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0];
return 0;
}
static int snd_es18xx_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol);
int reg = kcontrol->private_value & 0xff;
int shift = (kcontrol->private_value >> 8) & 0xff;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
unsigned char val;
val = (ucontrol->value.integer.value[0] & mask);
if (invert)
val = mask - val;
mask <<= shift;
val <<= shift;
return snd_es18xx_reg_bits(chip, reg, mask, val) != val;
}
#define ES18XX_DOUBLE(xname, xindex, left_reg, right_reg, shift_left, shift_right, mask, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
.info = snd_es18xx_info_double, \
.get = snd_es18xx_get_double, .put = snd_es18xx_put_double, \
.private_value = left_reg | (right_reg << 8) | (shift_left << 16) | (shift_right << 19) | (mask << 24) | (invert << 22) }
static int snd_es18xx_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
int mask = (kcontrol->private_value >> 24) & 0xff;
uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = mask;
return 0;
}
static int snd_es18xx_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol);
int left_reg = kcontrol->private_value & 0xff;
int right_reg = (kcontrol->private_value >> 8) & 0xff;
int shift_left = (kcontrol->private_value >> 16) & 0x07;
int shift_right = (kcontrol->private_value >> 19) & 0x07;
int mask = (kcontrol->private_value >> 24) & 0xff;
int invert = (kcontrol->private_value >> 22) & 1;
unsigned char left, right;
left = snd_es18xx_reg_read(chip, left_reg);
if (left_reg != right_reg)
right = snd_es18xx_reg_read(chip, right_reg);
else
right = left;
ucontrol->value.integer.value[0] = (left >> shift_left) & mask;
ucontrol->value.integer.value[1] = (right >> shift_right) & mask;
if (invert) {
ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0];
ucontrol->value.integer.value[1] = mask - ucontrol->value.integer.value[1];
}
return 0;
}
static int snd_es18xx_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol);
int left_reg = kcontrol->private_value & 0xff;
int right_reg = (kcontrol->private_value >> 8) & 0xff;
int shift_left = (kcontrol->private_value >> 16) & 0x07;
int shift_right = (kcontrol->private_value >> 19) & 0x07;
int mask = (kcontrol->private_value >> 24) & 0xff;
int invert = (kcontrol->private_value >> 22) & 1;
int change;
unsigned char val1, val2, mask1, mask2;
val1 = ucontrol->value.integer.value[0] & mask;
val2 = ucontrol->value.integer.value[1] & mask;
if (invert) {
val1 = mask - val1;
val2 = mask - val2;
}
val1 <<= shift_left;
val2 <<= shift_right;
mask1 = mask << shift_left;
mask2 = mask << shift_right;
if (left_reg != right_reg) {
change = 0;
if (snd_es18xx_reg_bits(chip, left_reg, mask1, val1) != val1)
change = 1;
if (snd_es18xx_reg_bits(chip, right_reg, mask2, val2) != val2)
change = 1;
} else {
change = (snd_es18xx_reg_bits(chip, left_reg, mask1 | mask2,
val1 | val2) != (val1 | val2));
}
return change;
}
/* Mixer controls
* These arrays contain setup data for mixer controls.
*
* The controls that are universal to all chipsets are fully initialized
* here.
*/
static struct snd_kcontrol_new snd_es18xx_base_controls[] = {
ES18XX_DOUBLE("Master Playback Volume", 0, 0x60, 0x62, 0, 0, 63, 0),
ES18XX_DOUBLE("Master Playback Switch", 0, 0x60, 0x62, 6, 6, 1, 1),
ES18XX_DOUBLE("Line Playback Volume", 0, 0x3e, 0x3e, 4, 0, 15, 0),
ES18XX_DOUBLE("CD Playback Volume", 0, 0x38, 0x38, 4, 0, 15, 0),
ES18XX_DOUBLE("FM Playback Volume", 0, 0x36, 0x36, 4, 0, 15, 0),
ES18XX_DOUBLE("Mic Playback Volume", 0, 0x1a, 0x1a, 4, 0, 15, 0),
ES18XX_DOUBLE("Aux Playback Volume", 0, 0x3a, 0x3a, 4, 0, 15, 0),
ES18XX_SINGLE("Record Monitor", 0, 0xa8, 3, 1, 0),
ES18XX_DOUBLE("Capture Volume", 0, 0xb4, 0xb4, 4, 0, 15, 0),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Source",
.info = snd_es18xx_info_mux,
.get = snd_es18xx_get_mux,
.put = snd_es18xx_put_mux,
}
};
static struct snd_kcontrol_new snd_es18xx_recmix_controls[] = {
ES18XX_DOUBLE("PCM Capture Volume", 0, 0x69, 0x69, 4, 0, 15, 0),
ES18XX_DOUBLE("Mic Capture Volume", 0, 0x68, 0x68, 4, 0, 15, 0),
ES18XX_DOUBLE("Line Capture Volume", 0, 0x6e, 0x6e, 4, 0, 15, 0),
ES18XX_DOUBLE("FM Capture Volume", 0, 0x6b, 0x6b, 4, 0, 15, 0),
ES18XX_DOUBLE("CD Capture Volume", 0, 0x6a, 0x6a, 4, 0, 15, 0),
ES18XX_DOUBLE("Aux Capture Volume", 0, 0x6c, 0x6c, 4, 0, 15, 0)
};
/*
* The chipset specific mixer controls
*/
static struct snd_kcontrol_new snd_es18xx_opt_speaker =
ES18XX_SINGLE("Beep Playback Volume", 0, 0x3c, 0, 7, 0);
static struct snd_kcontrol_new snd_es18xx_opt_1869[] = {
ES18XX_SINGLE("Capture Switch", 0, 0x1c, 4, 1, 1),
ES18XX_SINGLE("Video Playback Switch", 0, 0x7f, 0, 1, 0),
ES18XX_DOUBLE("Mono Playback Volume", 0, 0x6d, 0x6d, 4, 0, 15, 0),
ES18XX_DOUBLE("Mono Capture Volume", 0, 0x6f, 0x6f, 4, 0, 15, 0)
};
static struct snd_kcontrol_new snd_es18xx_opt_1878 =
ES18XX_DOUBLE("Video Playback Volume", 0, 0x68, 0x68, 4, 0, 15, 0);
static struct snd_kcontrol_new snd_es18xx_opt_1879[] = {
ES18XX_SINGLE("Video Playback Switch", 0, 0x71, 6, 1, 0),
ES18XX_DOUBLE("Video Playback Volume", 0, 0x6d, 0x6d, 4, 0, 15, 0),
ES18XX_DOUBLE("Video Capture Volume", 0, 0x6f, 0x6f, 4, 0, 15, 0)
};
static struct snd_kcontrol_new snd_es18xx_pcm1_controls[] = {
ES18XX_DOUBLE("PCM Playback Volume", 0, 0x14, 0x14, 4, 0, 15, 0),
};
static struct snd_kcontrol_new snd_es18xx_pcm2_controls[] = {
ES18XX_DOUBLE("PCM Playback Volume", 0, 0x7c, 0x7c, 4, 0, 15, 0),
ES18XX_DOUBLE("PCM Playback Volume", 1, 0x14, 0x14, 4, 0, 15, 0)
};
static struct snd_kcontrol_new snd_es18xx_spatializer_controls[] = {
ES18XX_SINGLE("3D Control - Level", 0, 0x52, 0, 63, 0),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "3D Control - Switch",
.info = snd_es18xx_info_spatializer_enable,
.get = snd_es18xx_get_spatializer_enable,
.put = snd_es18xx_put_spatializer_enable,
}
};
static struct snd_kcontrol_new snd_es18xx_micpre1_control =
ES18XX_SINGLE("Mic Boost (+26dB)", 0, 0xa9, 2, 1, 0);
static struct snd_kcontrol_new snd_es18xx_micpre2_control =
ES18XX_SINGLE("Mic Boost (+26dB)", 0, 0x7d, 3, 1, 0);
static struct snd_kcontrol_new snd_es18xx_hw_volume_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Hardware Master Playback Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.info = snd_es18xx_info_hw_volume,
.get = snd_es18xx_get_hw_volume,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Hardware Master Playback Switch",
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.info = snd_es18xx_info_hw_switch,
.get = snd_es18xx_get_hw_switch,
},
ES18XX_SINGLE("Hardware Master Volume Split", 0, 0x64, 7, 1, 0),
};
static int __devinit snd_es18xx_config_read(struct snd_es18xx *chip, unsigned char reg)
{
int data;
outb(reg, chip->ctrl_port);
data = inb(chip->ctrl_port + 1);
return data;
}
static void __devinit snd_es18xx_config_write(struct snd_es18xx *chip,
unsigned char reg, unsigned char data)
{
/* No need for spinlocks, this function is used only in
otherwise protected init code */
outb(reg, chip->ctrl_port);
outb(data, chip->ctrl_port + 1);
#ifdef REG_DEBUG
snd_printk(KERN_DEBUG "Config reg %02x set to %02x\n", reg, data);
#endif
}
static int __devinit snd_es18xx_initialize(struct snd_es18xx *chip,
unsigned long mpu_port,
unsigned long fm_port)
{
int mask = 0;
/* enable extended mode */
snd_es18xx_dsp_command(chip, 0xC6);
/* Reset mixer registers */
snd_es18xx_mixer_write(chip, 0x00, 0x00);
/* Audio 1 DMA demand mode (4 bytes/request) */
snd_es18xx_write(chip, 0xB9, 2);
if (chip->caps & ES18XX_CONTROL) {
/* Hardware volume IRQ */
snd_es18xx_config_write(chip, 0x27, chip->irq);
if (fm_port > 0 && fm_port != SNDRV_AUTO_PORT) {
/* FM I/O */
snd_es18xx_config_write(chip, 0x62, fm_port >> 8);
snd_es18xx_config_write(chip, 0x63, fm_port & 0xff);
}
if (mpu_port > 0 && mpu_port != SNDRV_AUTO_PORT) {
/* MPU-401 I/O */
snd_es18xx_config_write(chip, 0x64, mpu_port >> 8);
snd_es18xx_config_write(chip, 0x65, mpu_port & 0xff);
/* MPU-401 IRQ */
snd_es18xx_config_write(chip, 0x28, chip->irq);
}
/* Audio1 IRQ */
snd_es18xx_config_write(chip, 0x70, chip->irq);
/* Audio2 IRQ */
snd_es18xx_config_write(chip, 0x72, chip->irq);
/* Audio1 DMA */
snd_es18xx_config_write(chip, 0x74, chip->dma1);
/* Audio2 DMA */
snd_es18xx_config_write(chip, 0x75, chip->dma2);
/* Enable Audio 1 IRQ */
snd_es18xx_write(chip, 0xB1, 0x50);
/* Enable Audio 2 IRQ */
snd_es18xx_mixer_write(chip, 0x7A, 0x40);
/* Enable Audio 1 DMA */
snd_es18xx_write(chip, 0xB2, 0x50);
/* Enable MPU and hardware volume interrupt */
snd_es18xx_mixer_write(chip, 0x64, 0x42);
/* Enable ESS wavetable input */
snd_es18xx_mixer_bits(chip, 0x48, 0x10, 0x10);
}
else {
int irqmask, dma1mask, dma2mask;
switch (chip->irq) {
case 2:
case 9:
irqmask = 0;
break;
case 5:
irqmask = 1;
break;
case 7:
irqmask = 2;
break;
case 10:
irqmask = 3;
break;
default:
snd_printk(KERN_ERR "invalid irq %d\n", chip->irq);
return -ENODEV;
}
switch (chip->dma1) {
case 0:
dma1mask = 1;
break;
case 1:
dma1mask = 2;
break;
case 3:
dma1mask = 3;
break;
default:
snd_printk(KERN_ERR "invalid dma1 %d\n", chip->dma1);
return -ENODEV;
}
switch (chip->dma2) {
case 0:
dma2mask = 0;
break;
case 1:
dma2mask = 1;
break;
case 3:
dma2mask = 2;
break;
case 5:
dma2mask = 3;
break;
default:
snd_printk(KERN_ERR "invalid dma2 %d\n", chip->dma2);
return -ENODEV;
}
/* Enable and set Audio 1 IRQ */
snd_es18xx_write(chip, 0xB1, 0x50 | (irqmask << 2));
/* Enable and set Audio 1 DMA */
snd_es18xx_write(chip, 0xB2, 0x50 | (dma1mask << 2));
/* Set Audio 2 DMA */
snd_es18xx_mixer_bits(chip, 0x7d, 0x07, 0x04 | dma2mask);
/* Enable Audio 2 IRQ and DMA
Set capture mixer input */
snd_es18xx_mixer_write(chip, 0x7A, 0x68);
/* Enable and set hardware volume interrupt */
snd_es18xx_mixer_write(chip, 0x64, 0x06);
if (mpu_port > 0 && mpu_port != SNDRV_AUTO_PORT) {
/* MPU401 share irq with audio
Joystick enabled
FM enabled */
snd_es18xx_mixer_write(chip, 0x40,
0x43 | (mpu_port & 0xf0) >> 1);
}
snd_es18xx_mixer_write(chip, 0x7f, ((irqmask + 1) << 1) | 0x01);
}
if (chip->caps & ES18XX_NEW_RATE) {
/* Change behaviour of register A1
4x oversampling
2nd channel DAC asynchronous */
snd_es18xx_mixer_write(chip, 0x71, 0x32);
}
if (!(chip->caps & ES18XX_PCM2)) {
/* Enable DMA FIFO */
snd_es18xx_write(chip, 0xB7, 0x80);
}
if (chip->caps & ES18XX_SPATIALIZER) {
/* Set spatializer parameters to recommended values */
snd_es18xx_mixer_write(chip, 0x54, 0x8f);
snd_es18xx_mixer_write(chip, 0x56, 0x95);
snd_es18xx_mixer_write(chip, 0x58, 0x94);
snd_es18xx_mixer_write(chip, 0x5a, 0x80);
}
/* Flip the "enable I2S" bits for those chipsets that need it */
switch (chip->version) {
case 0x1879:
//Leaving I2S enabled on the 1879 screws up the PCM playback (rate effected somehow)
//so a Switch control has been added to toggle this 0x71 bit on/off:
//snd_es18xx_mixer_bits(chip, 0x71, 0x40, 0x40);
/* Note: we fall through on purpose here. */
case 0x1878:
snd_es18xx_config_write(chip, 0x29, snd_es18xx_config_read(chip, 0x29) | 0x40);
break;
}
/* Mute input source */
if (chip->caps & ES18XX_MUTEREC)
mask = 0x10;
if (chip->caps & ES18XX_RECMIX)
snd_es18xx_mixer_write(chip, 0x1c, 0x05 | mask);
else {
snd_es18xx_mixer_write(chip, 0x1c, 0x00 | mask);
snd_es18xx_write(chip, 0xb4, 0x00);
}
#ifndef AVOID_POPS
/* Enable PCM output */
snd_es18xx_dsp_command(chip, 0xD1);
#endif
return 0;
}
static int __devinit snd_es18xx_identify(struct snd_es18xx *chip)
{
int hi,lo;
/* reset */
if (snd_es18xx_reset(chip) < 0) {
snd_printk(KERN_ERR "reset at 0x%lx failed!!!\n", chip->port);
return -ENODEV;
}
snd_es18xx_dsp_command(chip, 0xe7);
hi = snd_es18xx_dsp_get_byte(chip);
if (hi < 0) {
return hi;
}
lo = snd_es18xx_dsp_get_byte(chip);
if ((lo & 0xf0) != 0x80) {
return -ENODEV;
}
if (hi == 0x48) {
chip->version = 0x488;
return 0;
}
if (hi != 0x68) {
return -ENODEV;
}
if ((lo & 0x0f) < 8) {
chip->version = 0x688;
return 0;
}
outb(0x40, chip->port + 0x04);
udelay(10);
hi = inb(chip->port + 0x05);
udelay(10);
lo = inb(chip->port + 0x05);
if (hi != lo) {
chip->version = hi << 8 | lo;
chip->ctrl_port = inb(chip->port + 0x05) << 8;
udelay(10);
chip->ctrl_port += inb(chip->port + 0x05);
if ((chip->res_ctrl_port = request_region(chip->ctrl_port, 8, "ES18xx - CTRL")) == NULL) {
snd_printk(KERN_ERR PFX "unable go grab port 0x%lx\n", chip->ctrl_port);
return -EBUSY;
}
return 0;
}
/* If has Hardware volume */
if (snd_es18xx_mixer_writable(chip, 0x64, 0x04)) {
/* If has Audio2 */
if (snd_es18xx_mixer_writable(chip, 0x70, 0x7f)) {
/* If has volume count */
if (snd_es18xx_mixer_writable(chip, 0x64, 0x20)) {
chip->version = 0x1887;
} else {
chip->version = 0x1888;
}
} else {
chip->version = 0x1788;
}
}
else
chip->version = 0x1688;
return 0;
}
static int __devinit snd_es18xx_probe(struct snd_es18xx *chip,
unsigned long mpu_port,
unsigned long fm_port)
{
if (snd_es18xx_identify(chip) < 0) {
snd_printk(KERN_ERR PFX "[0x%lx] ESS chip not found\n", chip->port);
return -ENODEV;
}
switch (chip->version) {
case 0x1868:
chip->caps = ES18XX_DUPLEX_MONO | ES18XX_DUPLEX_SAME | ES18XX_CONTROL;
break;
case 0x1869:
chip->caps = ES18XX_PCM2 | ES18XX_SPATIALIZER | ES18XX_RECMIX | ES18XX_NEW_RATE | ES18XX_AUXB | ES18XX_MONO | ES18XX_MUTEREC | ES18XX_CONTROL | ES18XX_HWV;
break;
case 0x1878:
chip->caps = ES18XX_DUPLEX_MONO | ES18XX_DUPLEX_SAME | ES18XX_I2S | ES18XX_CONTROL;
break;
case 0x1879:
chip->caps = ES18XX_PCM2 | ES18XX_SPATIALIZER | ES18XX_RECMIX | ES18XX_NEW_RATE | ES18XX_AUXB | ES18XX_I2S | ES18XX_CONTROL | ES18XX_HWV;
break;
case 0x1887:
case 0x1888:
chip->caps = ES18XX_PCM2 | ES18XX_RECMIX | ES18XX_AUXB | ES18XX_DUPLEX_SAME;
break;
default:
snd_printk(KERN_ERR "[0x%lx] unsupported chip ES%x\n",
chip->port, chip->version);
return -ENODEV;
}
snd_printd("[0x%lx] ESS%x chip found\n", chip->port, chip->version);
if (chip->dma1 == chip->dma2)
chip->caps &= ~(ES18XX_PCM2 | ES18XX_DUPLEX_SAME);
return snd_es18xx_initialize(chip, mpu_port, fm_port);
}
static struct snd_pcm_ops snd_es18xx_playback_ops = {
.open = snd_es18xx_playback_open,
.close = snd_es18xx_playback_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_es18xx_playback_hw_params,
.hw_free = snd_es18xx_pcm_hw_free,
.prepare = snd_es18xx_playback_prepare,
.trigger = snd_es18xx_playback_trigger,
.pointer = snd_es18xx_playback_pointer,
};
static struct snd_pcm_ops snd_es18xx_capture_ops = {
.open = snd_es18xx_capture_open,
.close = snd_es18xx_capture_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_es18xx_capture_hw_params,
.hw_free = snd_es18xx_pcm_hw_free,
.prepare = snd_es18xx_capture_prepare,
.trigger = snd_es18xx_capture_trigger,
.pointer = snd_es18xx_capture_pointer,
};
static int __devinit snd_es18xx_pcm(struct snd_card *card, int device,
struct snd_pcm **rpcm)
{
struct snd_es18xx *chip = card->private_data;
struct snd_pcm *pcm;
char str[16];
int err;
if (rpcm)
*rpcm = NULL;
sprintf(str, "ES%x", chip->version);
if (chip->caps & ES18XX_PCM2)
err = snd_pcm_new(card, str, device, 2, 1, &pcm);
else
err = snd_pcm_new(card, str, device, 1, 1, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_es18xx_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_es18xx_capture_ops);
/* global setup */
pcm->private_data = chip;
pcm->info_flags = 0;
if (chip->caps & ES18XX_DUPLEX_SAME)
pcm->info_flags |= SNDRV_PCM_INFO_JOINT_DUPLEX;
if (! (chip->caps & ES18XX_PCM2))
pcm->info_flags |= SNDRV_PCM_INFO_HALF_DUPLEX;
sprintf(pcm->name, "ESS AudioDrive ES%x", chip->version);
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_isa_data(),
64*1024,
chip->dma1 > 3 || chip->dma2 > 3 ? 128*1024 : 64*1024);
if (rpcm)
*rpcm = pcm;
return 0;
}
/* Power Management support functions */
#ifdef CONFIG_PM
static int snd_es18xx_suspend(struct snd_card *card, pm_message_t state)
{
struct snd_es18xx *chip = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
snd_pcm_suspend_all(chip->pcm);
/* power down */
chip->pm_reg = (unsigned char)snd_es18xx_read(chip, ES18XX_PM);
chip->pm_reg |= (ES18XX_PM_FM | ES18XX_PM_SUS);
snd_es18xx_write(chip, ES18XX_PM, chip->pm_reg);
snd_es18xx_write(chip, ES18XX_PM, chip->pm_reg ^= ES18XX_PM_SUS);
return 0;
}
static int snd_es18xx_resume(struct snd_card *card)
{
struct snd_es18xx *chip = card->private_data;
/* restore PM register, we won't wake till (not 0x07) i/o activity though */
snd_es18xx_write(chip, ES18XX_PM, chip->pm_reg ^= ES18XX_PM_FM);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
#endif /* CONFIG_PM */
static int snd_es18xx_free(struct snd_card *card)
{
struct snd_es18xx *chip = card->private_data;
release_and_free_resource(chip->res_port);
release_and_free_resource(chip->res_ctrl_port);
release_and_free_resource(chip->res_mpu_port);
if (chip->irq >= 0)
free_irq(chip->irq, (void *) card);
if (chip->dma1 >= 0) {
disable_dma(chip->dma1);
free_dma(chip->dma1);
}
if (chip->dma2 >= 0 && chip->dma1 != chip->dma2) {
disable_dma(chip->dma2);
free_dma(chip->dma2);
}
return 0;
}
static int snd_es18xx_dev_free(struct snd_device *device)
{
return snd_es18xx_free(device->card);
}
static int __devinit snd_es18xx_new_device(struct snd_card *card,
unsigned long port,
unsigned long mpu_port,
unsigned long fm_port,
int irq, int dma1, int dma2)
{
struct snd_es18xx *chip = card->private_data;
static struct snd_device_ops ops = {
.dev_free = snd_es18xx_dev_free,
};
int err;
spin_lock_init(&chip->reg_lock);
spin_lock_init(&chip->mixer_lock);
chip->port = port;
chip->irq = -1;
chip->dma1 = -1;
chip->dma2 = -1;
chip->audio2_vol = 0x00;
chip->active = 0;
chip->res_port = request_region(port, 16, "ES18xx");
if (chip->res_port == NULL) {
snd_es18xx_free(card);
snd_printk(KERN_ERR PFX "unable to grap ports 0x%lx-0x%lx\n", port, port + 16 - 1);
return -EBUSY;
}
if (request_irq(irq, snd_es18xx_interrupt, IRQF_DISABLED, "ES18xx",
(void *) card)) {
snd_es18xx_free(card);
snd_printk(KERN_ERR PFX "unable to grap IRQ %d\n", irq);
return -EBUSY;
}
chip->irq = irq;
if (request_dma(dma1, "ES18xx DMA 1")) {
snd_es18xx_free(card);
snd_printk(KERN_ERR PFX "unable to grap DMA1 %d\n", dma1);
return -EBUSY;
}
chip->dma1 = dma1;
if (dma2 != dma1 && request_dma(dma2, "ES18xx DMA 2")) {
snd_es18xx_free(card);
snd_printk(KERN_ERR PFX "unable to grap DMA2 %d\n", dma2);
return -EBUSY;
}
chip->dma2 = dma2;
if (snd_es18xx_probe(chip, mpu_port, fm_port) < 0) {
snd_es18xx_free(card);
return -ENODEV;
}
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
if (err < 0) {
snd_es18xx_free(card);
return err;
}
return 0;
}
static int __devinit snd_es18xx_mixer(struct snd_card *card)
{
struct snd_es18xx *chip = card->private_data;
int err;
unsigned int idx;
strcpy(card->mixername, chip->pcm->name);
for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_base_controls); idx++) {
struct snd_kcontrol *kctl;
kctl = snd_ctl_new1(&snd_es18xx_base_controls[idx], chip);
if (chip->caps & ES18XX_HWV) {
switch (idx) {
case 0:
chip->master_volume = kctl;
kctl->private_free = snd_es18xx_hwv_free;
break;
case 1:
chip->master_switch = kctl;
kctl->private_free = snd_es18xx_hwv_free;
break;
}
}
if ((err = snd_ctl_add(card, kctl)) < 0)
return err;
}
if (chip->caps & ES18XX_PCM2) {
for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_pcm2_controls); idx++) {
if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_pcm2_controls[idx], chip))) < 0)
return err;
}
} else {
for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_pcm1_controls); idx++) {
if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_pcm1_controls[idx], chip))) < 0)
return err;
}
}
if (chip->caps & ES18XX_RECMIX) {
for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_recmix_controls); idx++) {
if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_recmix_controls[idx], chip))) < 0)
return err;
}
}
switch (chip->version) {
default:
if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_micpre1_control, chip))) < 0)
return err;
break;
case 0x1869:
case 0x1879:
if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_micpre2_control, chip))) < 0)
return err;
break;
}
if (chip->caps & ES18XX_SPATIALIZER) {
for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_spatializer_controls); idx++) {
if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_spatializer_controls[idx], chip))) < 0)
return err;
}
}
if (chip->caps & ES18XX_HWV) {
for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_hw_volume_controls); idx++) {
struct snd_kcontrol *kctl;
kctl = snd_ctl_new1(&snd_es18xx_hw_volume_controls[idx], chip);
if (idx == 0)
chip->hw_volume = kctl;
else
chip->hw_switch = kctl;
kctl->private_free = snd_es18xx_hwv_free;
if ((err = snd_ctl_add(card, kctl)) < 0)
return err;
}
}
/* finish initializing other chipset specific controls
*/
if (chip->version != 0x1868) {
err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_opt_speaker,
chip));
if (err < 0)
return err;
}
if (chip->version == 0x1869) {
for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_opt_1869); idx++) {
err = snd_ctl_add(card,
snd_ctl_new1(&snd_es18xx_opt_1869[idx],
chip));
if (err < 0)
return err;
}
} else if (chip->version == 0x1878) {
err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_opt_1878,
chip));
if (err < 0)
return err;
} else if (chip->version == 0x1879) {
for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_opt_1879); idx++) {
err = snd_ctl_add(card,
snd_ctl_new1(&snd_es18xx_opt_1879[idx],
chip));
if (err < 0)
return err;
}
}
return 0;
}
/* Card level */
MODULE_AUTHOR("Christian Fischbach <fishbach@pool.informatik.rwth-aachen.de>, Abramo Bagnara <abramo@alsa-project.org>");
MODULE_DESCRIPTION("ESS ES18xx AudioDrive");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{ESS,ES1868 PnP AudioDrive},"
"{ESS,ES1869 PnP AudioDrive},"
"{ESS,ES1878 PnP AudioDrive},"
"{ESS,ES1879 PnP AudioDrive},"
"{ESS,ES1887 PnP AudioDrive},"
"{ESS,ES1888 PnP AudioDrive},"
"{ESS,ES1887 AudioDrive},"
"{ESS,ES1888 AudioDrive}}");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */
#ifdef CONFIG_PNP
static int isapnp[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP;
#endif
static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x220,0x240,0x260,0x280 */
#ifndef CONFIG_PNP
static long mpu_port[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -1};
#else
static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
#endif
static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,10 */
static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3 */
static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3 */
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for ES18xx soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for ES18xx soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable ES18xx soundcard.");
#ifdef CONFIG_PNP
module_param_array(isapnp, bool, NULL, 0444);
MODULE_PARM_DESC(isapnp, "PnP detection for specified soundcard.");
#endif
module_param_array(port, long, NULL, 0444);
MODULE_PARM_DESC(port, "Port # for ES18xx driver.");
module_param_array(mpu_port, long, NULL, 0444);
MODULE_PARM_DESC(mpu_port, "MPU-401 port # for ES18xx driver.");
module_param_array(fm_port, long, NULL, 0444);
MODULE_PARM_DESC(fm_port, "FM port # for ES18xx driver.");
module_param_array(irq, int, NULL, 0444);
MODULE_PARM_DESC(irq, "IRQ # for ES18xx driver.");
module_param_array(dma1, int, NULL, 0444);
MODULE_PARM_DESC(dma1, "DMA 1 # for ES18xx driver.");
module_param_array(dma2, int, NULL, 0444);
MODULE_PARM_DESC(dma2, "DMA 2 # for ES18xx driver.");
#ifdef CONFIG_PNP
static int isa_registered;
static int pnp_registered;
static int pnpc_registered;
static struct pnp_device_id snd_audiodrive_pnpbiosids[] = {
{ .id = "ESS1869" },
{ .id = "ESS1879" },
{ .id = "" } /* end */
};
MODULE_DEVICE_TABLE(pnp, snd_audiodrive_pnpbiosids);
/* PnP main device initialization */
static int __devinit snd_audiodrive_pnp_init_main(int dev, struct pnp_dev *pdev)
{
if (pnp_activate_dev(pdev) < 0) {
snd_printk(KERN_ERR PFX "PnP configure failure (out of resources?)\n");
return -EBUSY;
}
/* ok. hack using Vendor-Defined Card-Level registers */
/* skip csn and logdev initialization - already done in isapnp_configure */
if (pnp_device_is_isapnp(pdev)) {
isapnp_cfg_begin(isapnp_card_number(pdev), isapnp_csn_number(pdev));
isapnp_write_byte(0x27, pnp_irq(pdev, 0)); /* Hardware Volume IRQ Number */
if (mpu_port[dev] != SNDRV_AUTO_PORT)
isapnp_write_byte(0x28, pnp_irq(pdev, 0)); /* MPU-401 IRQ Number */
isapnp_write_byte(0x72, pnp_irq(pdev, 0)); /* second IRQ */
isapnp_cfg_end();
}
port[dev] = pnp_port_start(pdev, 0);
fm_port[dev] = pnp_port_start(pdev, 1);
mpu_port[dev] = pnp_port_start(pdev, 2);
dma1[dev] = pnp_dma(pdev, 0);
dma2[dev] = pnp_dma(pdev, 1);
irq[dev] = pnp_irq(pdev, 0);
snd_printdd("PnP ES18xx: port=0x%lx, fm port=0x%lx, mpu port=0x%lx\n", port[dev], fm_port[dev], mpu_port[dev]);
snd_printdd("PnP ES18xx: dma1=%i, dma2=%i, irq=%i\n", dma1[dev], dma2[dev], irq[dev]);
return 0;
}
static int __devinit snd_audiodrive_pnp(int dev, struct snd_es18xx *chip,
struct pnp_dev *pdev)
{
chip->dev = pdev;
if (snd_audiodrive_pnp_init_main(dev, chip->dev) < 0)
return -EBUSY;
return 0;
}
static struct pnp_card_device_id snd_audiodrive_pnpids[] = {
/* ESS 1868 (integrated on Compaq dual P-Pro motherboard and Genius 18PnP 3D) */
{ .id = "ESS1868", .devs = { { "ESS1868" }, { "ESS0000" } } },
/* ESS 1868 (integrated on Maxisound Cards) */
{ .id = "ESS1868", .devs = { { "ESS8601" }, { "ESS8600" } } },
/* ESS 1868 (integrated on Maxisound Cards) */
{ .id = "ESS1868", .devs = { { "ESS8611" }, { "ESS8610" } } },
/* ESS ES1869 Plug and Play AudioDrive */
{ .id = "ESS0003", .devs = { { "ESS1869" }, { "ESS0006" } } },
/* ESS 1869 */
{ .id = "ESS1869", .devs = { { "ESS1869" }, { "ESS0006" } } },
/* ESS 1878 */
{ .id = "ESS1878", .devs = { { "ESS1878" }, { "ESS0004" } } },
/* ESS 1879 */
{ .id = "ESS1879", .devs = { { "ESS1879" }, { "ESS0009" } } },
/* --- */
{ .id = "" } /* end */
};
MODULE_DEVICE_TABLE(pnp_card, snd_audiodrive_pnpids);
static int __devinit snd_audiodrive_pnpc(int dev, struct snd_es18xx *chip,
struct pnp_card_link *card,
const struct pnp_card_device_id *id)
{
chip->dev = pnp_request_card_device(card, id->devs[0].id, NULL);
if (chip->dev == NULL)
return -EBUSY;
chip->devc = pnp_request_card_device(card, id->devs[1].id, NULL);
if (chip->devc == NULL)
return -EBUSY;
/* Control port initialization */
if (pnp_activate_dev(chip->devc) < 0) {
snd_printk(KERN_ERR PFX "PnP control configure failure (out of resources?)\n");
return -EAGAIN;
}
snd_printdd("pnp: port=0x%llx\n",
(unsigned long long)pnp_port_start(chip->devc, 0));
if (snd_audiodrive_pnp_init_main(dev, chip->dev) < 0)
return -EBUSY;
return 0;
}
#endif /* CONFIG_PNP */
#ifdef CONFIG_PNP
#define is_isapnp_selected(dev) isapnp[dev]
#else
#define is_isapnp_selected(dev) 0
#endif
static int snd_es18xx_card_new(int dev, struct snd_card **cardp)
{
return snd_card_create(index[dev], id[dev], THIS_MODULE,
sizeof(struct snd_es18xx), cardp);
}
static int __devinit snd_audiodrive_probe(struct snd_card *card, int dev)
{
struct snd_es18xx *chip = card->private_data;
struct snd_opl3 *opl3;
int err;
err = snd_es18xx_new_device(card,
port[dev], mpu_port[dev], fm_port[dev],
irq[dev], dma1[dev], dma2[dev]);
if (err < 0)
return err;
sprintf(card->driver, "ES%x", chip->version);
sprintf(card->shortname, "ESS AudioDrive ES%x", chip->version);
if (dma1[dev] != dma2[dev])
sprintf(card->longname, "%s at 0x%lx, irq %d, dma1 %d, dma2 %d",
card->shortname,
chip->port,
irq[dev], dma1[dev], dma2[dev]);
else
sprintf(card->longname, "%s at 0x%lx, irq %d, dma %d",
card->shortname,
chip->port,
irq[dev], dma1[dev]);
err = snd_es18xx_pcm(card, 0, NULL);
if (err < 0)
return err;
err = snd_es18xx_mixer(card);
if (err < 0)
return err;
if (fm_port[dev] > 0 && fm_port[dev] != SNDRV_AUTO_PORT) {
if (snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2,
OPL3_HW_OPL3, 0, &opl3) < 0) {
snd_printk(KERN_WARNING PFX
"opl3 not detected at 0x%lx\n",
fm_port[dev]);
} else {
err = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
if (err < 0)
return err;
}
}
if (mpu_port[dev] > 0 && mpu_port[dev] != SNDRV_AUTO_PORT) {
err = snd_mpu401_uart_new(card, 0, MPU401_HW_ES18XX,
mpu_port[dev], 0,
irq[dev], 0, &chip->rmidi);
if (err < 0)
return err;
}
return snd_card_register(card);
}
static int __devinit snd_es18xx_isa_match(struct device *pdev, unsigned int dev)
{
return enable[dev] && !is_isapnp_selected(dev);
}
static int __devinit snd_es18xx_isa_probe1(int dev, struct device *devptr)
{
struct snd_card *card;
int err;
err = snd_es18xx_card_new(dev, &card);
if (err < 0)
return err;
snd_card_set_dev(card, devptr);
if ((err = snd_audiodrive_probe(card, dev)) < 0) {
snd_card_free(card);
return err;
}
dev_set_drvdata(devptr, card);
return 0;
}
static int __devinit snd_es18xx_isa_probe(struct device *pdev, unsigned int dev)
{
int err;
static int possible_irqs[] = {5, 9, 10, 7, 11, 12, -1};
static int possible_dmas[] = {1, 0, 3, 5, -1};
if (irq[dev] == SNDRV_AUTO_IRQ) {
if ((irq[dev] = snd_legacy_find_free_irq(possible_irqs)) < 0) {
snd_printk(KERN_ERR PFX "unable to find a free IRQ\n");
return -EBUSY;
}
}
if (dma1[dev] == SNDRV_AUTO_DMA) {
if ((dma1[dev] = snd_legacy_find_free_dma(possible_dmas)) < 0) {
snd_printk(KERN_ERR PFX "unable to find a free DMA1\n");
return -EBUSY;
}
}
if (dma2[dev] == SNDRV_AUTO_DMA) {
if ((dma2[dev] = snd_legacy_find_free_dma(possible_dmas)) < 0) {
snd_printk(KERN_ERR PFX "unable to find a free DMA2\n");
return -EBUSY;
}
}
if (port[dev] != SNDRV_AUTO_PORT) {
return snd_es18xx_isa_probe1(dev, pdev);
} else {
static unsigned long possible_ports[] = {0x220, 0x240, 0x260, 0x280};
int i;
for (i = 0; i < ARRAY_SIZE(possible_ports); i++) {
port[dev] = possible_ports[i];
err = snd_es18xx_isa_probe1(dev, pdev);
if (! err)
return 0;
}
return err;
}
}
static int __devexit snd_es18xx_isa_remove(struct device *devptr,
unsigned int dev)
{
snd_card_free(dev_get_drvdata(devptr));
dev_set_drvdata(devptr, NULL);
return 0;
}
#ifdef CONFIG_PM
static int snd_es18xx_isa_suspend(struct device *dev, unsigned int n,
pm_message_t state)
{
return snd_es18xx_suspend(dev_get_drvdata(dev), state);
}
static int snd_es18xx_isa_resume(struct device *dev, unsigned int n)
{
return snd_es18xx_resume(dev_get_drvdata(dev));
}
#endif
#define DEV_NAME "es18xx"
static struct isa_driver snd_es18xx_isa_driver = {
.match = snd_es18xx_isa_match,
.probe = snd_es18xx_isa_probe,
.remove = __devexit_p(snd_es18xx_isa_remove),
#ifdef CONFIG_PM
.suspend = snd_es18xx_isa_suspend,
.resume = snd_es18xx_isa_resume,
#endif
.driver = {
.name = DEV_NAME
},
};
#ifdef CONFIG_PNP
static int __devinit snd_audiodrive_pnp_detect(struct pnp_dev *pdev,
const struct pnp_device_id *id)
{
static int dev;
int err;
struct snd_card *card;
if (pnp_device_is_isapnp(pdev))
return -ENOENT; /* we have another procedure - card */
for (; dev < SNDRV_CARDS; dev++) {
if (enable[dev] && isapnp[dev])
break;
}
if (dev >= SNDRV_CARDS)
return -ENODEV;
err = snd_es18xx_card_new(dev, &card);
if (err < 0)
return err;
if ((err = snd_audiodrive_pnp(dev, card->private_data, pdev)) < 0) {
snd_card_free(card);
return err;
}
snd_card_set_dev(card, &pdev->dev);
if ((err = snd_audiodrive_probe(card, dev)) < 0) {
snd_card_free(card);
return err;
}
pnp_set_drvdata(pdev, card);
dev++;
return 0;
}
static void __devexit snd_audiodrive_pnp_remove(struct pnp_dev * pdev)
{
snd_card_free(pnp_get_drvdata(pdev));
pnp_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
static int snd_audiodrive_pnp_suspend(struct pnp_dev *pdev, pm_message_t state)
{
return snd_es18xx_suspend(pnp_get_drvdata(pdev), state);
}
static int snd_audiodrive_pnp_resume(struct pnp_dev *pdev)
{
return snd_es18xx_resume(pnp_get_drvdata(pdev));
}
#endif
static struct pnp_driver es18xx_pnp_driver = {
.name = "es18xx-pnpbios",
.id_table = snd_audiodrive_pnpbiosids,
.probe = snd_audiodrive_pnp_detect,
.remove = __devexit_p(snd_audiodrive_pnp_remove),
#ifdef CONFIG_PM
.suspend = snd_audiodrive_pnp_suspend,
.resume = snd_audiodrive_pnp_resume,
#endif
};
static int __devinit snd_audiodrive_pnpc_detect(struct pnp_card_link *pcard,
const struct pnp_card_device_id *pid)
{
static int dev;
struct snd_card *card;
int res;
for ( ; dev < SNDRV_CARDS; dev++) {
if (enable[dev] && isapnp[dev])
break;
}
if (dev >= SNDRV_CARDS)
return -ENODEV;
res = snd_es18xx_card_new(dev, &card);
if (res < 0)
return res;
if ((res = snd_audiodrive_pnpc(dev, card->private_data, pcard, pid)) < 0) {
snd_card_free(card);
return res;
}
snd_card_set_dev(card, &pcard->card->dev);
if ((res = snd_audiodrive_probe(card, dev)) < 0) {
snd_card_free(card);
return res;
}
pnp_set_card_drvdata(pcard, card);
dev++;
return 0;
}
static void __devexit snd_audiodrive_pnpc_remove(struct pnp_card_link * pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
}
#ifdef CONFIG_PM
static int snd_audiodrive_pnpc_suspend(struct pnp_card_link *pcard, pm_message_t state)
{
return snd_es18xx_suspend(pnp_get_card_drvdata(pcard), state);
}
static int snd_audiodrive_pnpc_resume(struct pnp_card_link *pcard)
{
return snd_es18xx_resume(pnp_get_card_drvdata(pcard));
}
#endif
static struct pnp_card_driver es18xx_pnpc_driver = {
.flags = PNP_DRIVER_RES_DISABLE,
.name = "es18xx",
.id_table = snd_audiodrive_pnpids,
.probe = snd_audiodrive_pnpc_detect,
.remove = __devexit_p(snd_audiodrive_pnpc_remove),
#ifdef CONFIG_PM
.suspend = snd_audiodrive_pnpc_suspend,
.resume = snd_audiodrive_pnpc_resume,
#endif
};
#endif /* CONFIG_PNP */
static int __init alsa_card_es18xx_init(void)
{
int err;
err = isa_register_driver(&snd_es18xx_isa_driver, SNDRV_CARDS);
#ifdef CONFIG_PNP
if (!err)
isa_registered = 1;
err = pnp_register_driver(&es18xx_pnp_driver);
if (!err)
pnp_registered = 1;
err = pnp_register_card_driver(&es18xx_pnpc_driver);
if (!err)
pnpc_registered = 1;
if (isa_registered || pnp_registered)
err = 0;
#endif
return err;
}
static void __exit alsa_card_es18xx_exit(void)
{
#ifdef CONFIG_PNP
if (pnpc_registered)
pnp_unregister_card_driver(&es18xx_pnpc_driver);
if (pnp_registered)
pnp_unregister_driver(&es18xx_pnp_driver);
if (isa_registered)
#endif
isa_unregister_driver(&snd_es18xx_isa_driver);
}
module_init(alsa_card_es18xx_init)
module_exit(alsa_card_es18xx_exit)
| gpl-2.0 |
MassStash/android_kernel_htc_msm8974 | arch/c6x/kernel/traps.c | 4742 | 14961 | /*
* Port on Texas Instruments TMS320C6x architecture
*
* Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
* Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/kallsyms.h>
#include <linux/bug.h>
#include <asm/soc.h>
#include <asm/special_insns.h>
#include <asm/traps.h>
int (*c6x_nmi_handler)(struct pt_regs *regs);
void __init trap_init(void)
{
ack_exception(EXCEPT_TYPE_NXF);
ack_exception(EXCEPT_TYPE_EXC);
ack_exception(EXCEPT_TYPE_IXF);
ack_exception(EXCEPT_TYPE_SXF);
enable_exception();
}
void show_regs(struct pt_regs *regs)
{
pr_err("\n");
pr_err("PC: %08lx SP: %08lx\n", regs->pc, regs->sp);
pr_err("Status: %08lx ORIG_A4: %08lx\n", regs->csr, regs->orig_a4);
pr_err("A0: %08lx B0: %08lx\n", regs->a0, regs->b0);
pr_err("A1: %08lx B1: %08lx\n", regs->a1, regs->b1);
pr_err("A2: %08lx B2: %08lx\n", regs->a2, regs->b2);
pr_err("A3: %08lx B3: %08lx\n", regs->a3, regs->b3);
pr_err("A4: %08lx B4: %08lx\n", regs->a4, regs->b4);
pr_err("A5: %08lx B5: %08lx\n", regs->a5, regs->b5);
pr_err("A6: %08lx B6: %08lx\n", regs->a6, regs->b6);
pr_err("A7: %08lx B7: %08lx\n", regs->a7, regs->b7);
pr_err("A8: %08lx B8: %08lx\n", regs->a8, regs->b8);
pr_err("A9: %08lx B9: %08lx\n", regs->a9, regs->b9);
pr_err("A10: %08lx B10: %08lx\n", regs->a10, regs->b10);
pr_err("A11: %08lx B11: %08lx\n", regs->a11, regs->b11);
pr_err("A12: %08lx B12: %08lx\n", regs->a12, regs->b12);
pr_err("A13: %08lx B13: %08lx\n", regs->a13, regs->b13);
pr_err("A14: %08lx B14: %08lx\n", regs->a14, regs->dp);
pr_err("A15: %08lx B15: %08lx\n", regs->a15, regs->sp);
pr_err("A16: %08lx B16: %08lx\n", regs->a16, regs->b16);
pr_err("A17: %08lx B17: %08lx\n", regs->a17, regs->b17);
pr_err("A18: %08lx B18: %08lx\n", regs->a18, regs->b18);
pr_err("A19: %08lx B19: %08lx\n", regs->a19, regs->b19);
pr_err("A20: %08lx B20: %08lx\n", regs->a20, regs->b20);
pr_err("A21: %08lx B21: %08lx\n", regs->a21, regs->b21);
pr_err("A22: %08lx B22: %08lx\n", regs->a22, regs->b22);
pr_err("A23: %08lx B23: %08lx\n", regs->a23, regs->b23);
pr_err("A24: %08lx B24: %08lx\n", regs->a24, regs->b24);
pr_err("A25: %08lx B25: %08lx\n", regs->a25, regs->b25);
pr_err("A26: %08lx B26: %08lx\n", regs->a26, regs->b26);
pr_err("A27: %08lx B27: %08lx\n", regs->a27, regs->b27);
pr_err("A28: %08lx B28: %08lx\n", regs->a28, regs->b28);
pr_err("A29: %08lx B29: %08lx\n", regs->a29, regs->b29);
pr_err("A30: %08lx B30: %08lx\n", regs->a30, regs->b30);
pr_err("A31: %08lx B31: %08lx\n", regs->a31, regs->b31);
}
void dump_stack(void)
{
unsigned long stack;
show_stack(current, &stack);
}
EXPORT_SYMBOL(dump_stack);
void die(char *str, struct pt_regs *fp, int nr)
{
console_verbose();
pr_err("%s: %08x\n", str, nr);
show_regs(fp);
pr_err("Process %s (pid: %d, stackpage=%08lx)\n",
current->comm, current->pid, (PAGE_SIZE +
(unsigned long) current));
dump_stack();
while (1)
;
}
static void die_if_kernel(char *str, struct pt_regs *fp, int nr)
{
if (user_mode(fp))
return;
die(str, fp, nr);
}
/* Internal exceptions */
static struct exception_info iexcept_table[10] = {
{ "Oops - instruction fetch", SIGBUS, BUS_ADRERR },
{ "Oops - fetch packet", SIGBUS, BUS_ADRERR },
{ "Oops - execute packet", SIGILL, ILL_ILLOPC },
{ "Oops - undefined instruction", SIGILL, ILL_ILLOPC },
{ "Oops - resource conflict", SIGILL, ILL_ILLOPC },
{ "Oops - resource access", SIGILL, ILL_PRVREG },
{ "Oops - privilege", SIGILL, ILL_PRVOPC },
{ "Oops - loops buffer", SIGILL, ILL_ILLOPC },
{ "Oops - software exception", SIGILL, ILL_ILLTRP },
{ "Oops - unknown exception", SIGILL, ILL_ILLOPC }
};
/* External exceptions */
static struct exception_info eexcept_table[128] = {
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - CPU memory protection fault", SIGSEGV, SEGV_ACCERR },
{ "Oops - CPU memory protection fault in L1P", SIGSEGV, SEGV_ACCERR },
{ "Oops - DMA memory protection fault in L1P", SIGSEGV, SEGV_ACCERR },
{ "Oops - CPU memory protection fault in L1D", SIGSEGV, SEGV_ACCERR },
{ "Oops - DMA memory protection fault in L1D", SIGSEGV, SEGV_ACCERR },
{ "Oops - CPU memory protection fault in L2", SIGSEGV, SEGV_ACCERR },
{ "Oops - DMA memory protection fault in L2", SIGSEGV, SEGV_ACCERR },
{ "Oops - EMC CPU memory protection fault", SIGSEGV, SEGV_ACCERR },
{ "Oops - EMC bus error", SIGBUS, BUS_ADRERR }
};
static void do_trap(struct exception_info *except_info, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
siginfo_t info;
if (except_info->code != TRAP_BRKPT)
pr_err("TRAP: %s PC[0x%lx] signo[%d] code[%d]\n",
except_info->kernel_str, regs->pc,
except_info->signo, except_info->code);
die_if_kernel(except_info->kernel_str, regs, addr);
info.si_signo = except_info->signo;
info.si_errno = 0;
info.si_code = except_info->code;
info.si_addr = (void __user *)addr;
force_sig_info(except_info->signo, &info, current);
}
/*
* Process an internal exception (non maskable)
*/
static int process_iexcept(struct pt_regs *regs)
{
unsigned int iexcept_report = get_iexcept();
unsigned int iexcept_num;
ack_exception(EXCEPT_TYPE_IXF);
pr_err("IEXCEPT: PC[0x%lx]\n", regs->pc);
while (iexcept_report) {
iexcept_num = __ffs(iexcept_report);
iexcept_report &= ~(1 << iexcept_num);
set_iexcept(iexcept_report);
if (*(unsigned int *)regs->pc == BKPT_OPCODE) {
/* This is a breakpoint */
struct exception_info bkpt_exception = {
"Oops - undefined instruction",
SIGTRAP, TRAP_BRKPT
};
do_trap(&bkpt_exception, regs);
iexcept_report &= ~(0xFF);
set_iexcept(iexcept_report);
continue;
}
do_trap(&iexcept_table[iexcept_num], regs);
}
return 0;
}
/*
* Process an external exception (maskable)
*/
static void process_eexcept(struct pt_regs *regs)
{
int evt;
pr_err("EEXCEPT: PC[0x%lx]\n", regs->pc);
while ((evt = soc_get_exception()) >= 0)
do_trap(&eexcept_table[evt], regs);
ack_exception(EXCEPT_TYPE_EXC);
}
/*
* Main exception processing
*/
asmlinkage int process_exception(struct pt_regs *regs)
{
unsigned int type;
unsigned int type_num;
unsigned int ie_num = 9; /* default is unknown exception */
while ((type = get_except_type()) != 0) {
type_num = fls(type) - 1;
switch (type_num) {
case EXCEPT_TYPE_NXF:
ack_exception(EXCEPT_TYPE_NXF);
if (c6x_nmi_handler)
(c6x_nmi_handler)(regs);
else
pr_alert("NMI interrupt!\n");
break;
case EXCEPT_TYPE_IXF:
if (process_iexcept(regs))
return 1;
break;
case EXCEPT_TYPE_EXC:
process_eexcept(regs);
break;
case EXCEPT_TYPE_SXF:
ie_num = 8;
default:
ack_exception(type_num);
do_trap(&iexcept_table[ie_num], regs);
break;
}
}
return 0;
}
static int kstack_depth_to_print = 48;
static void show_trace(unsigned long *stack, unsigned long *endstack)
{
unsigned long addr;
int i;
pr_debug("Call trace:");
i = 0;
while (stack + 1 <= endstack) {
addr = *stack++;
/*
* If the address is either in the text segment of the
* kernel, or in the region which contains vmalloc'ed
* memory, it *may* be the address of a calling
* routine; if so, print it so that someone tracing
* down the cause of the crash will be able to figure
* out the call path that was taken.
*/
if (__kernel_text_address(addr)) {
#ifndef CONFIG_KALLSYMS
if (i % 5 == 0)
pr_debug("\n ");
#endif
pr_debug(" [<%08lx>]", addr);
print_symbol(" %s\n", addr);
i++;
}
}
pr_debug("\n");
}
void show_stack(struct task_struct *task, unsigned long *stack)
{
unsigned long *p, *endstack;
int i;
if (!stack) {
if (task && task != current)
/* We know this is a kernel stack,
so this is the start/end */
stack = (unsigned long *)thread_saved_ksp(task);
else
stack = (unsigned long *)&stack;
}
endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1)
& -THREAD_SIZE);
pr_debug("Stack from %08lx:", (unsigned long)stack);
for (i = 0, p = stack; i < kstack_depth_to_print; i++) {
if (p + 1 > endstack)
break;
if (i % 8 == 0)
pr_cont("\n ");
pr_cont(" %08lx", *p++);
}
pr_cont("\n");
show_trace(stack, endstack);
}
int is_valid_bugaddr(unsigned long addr)
{
return __kernel_text_address(addr);
}
| gpl-2.0 |
noobnl/msm-jf-kernel | drivers/gpu/drm/nouveau/nouveau_mem.c | 5254 | 30880 | /*
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
* Copyright 2005 Stephane Marchesin
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
* initial release of the Radeon 8500 driver under the XFree86 license.
* This notice must be preserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Ben Skeggs <bskeggs@redhat.com>
* Roy Spliet <r.spliet@student.tudelft.nl>
*/
#include "drmP.h"
#include "drm.h"
#include "drm_sarea.h"
#include "nouveau_drv.h"
#include "nouveau_pm.h"
#include "nouveau_mm.h"
#include "nouveau_vm.h"
/*
* NV10-NV40 tiling helpers
*/
static void
nv10_mem_update_tile_region(struct drm_device *dev,
struct nouveau_tile_reg *tile, uint32_t addr,
uint32_t size, uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
int i = tile - dev_priv->tile.reg, j;
unsigned long save;
nouveau_fence_unref(&tile->fence);
if (tile->pitch)
pfb->free_tile_region(dev, i);
if (pitch)
pfb->init_tile_region(dev, i, addr, size, pitch, flags);
spin_lock_irqsave(&dev_priv->context_switch_lock, save);
pfifo->reassign(dev, false);
pfifo->cache_pull(dev, false);
nouveau_wait_for_idle(dev);
pfb->set_tile_region(dev, i);
for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
dev_priv->eng[j]->set_tile_region(dev, i);
}
pfifo->cache_pull(dev, true);
pfifo->reassign(dev, true);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
}
static struct nouveau_tile_reg *
nv10_mem_get_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
spin_lock(&dev_priv->tile.lock);
if (!tile->used &&
(!tile->fence || nouveau_fence_signalled(tile->fence)))
tile->used = true;
else
tile = NULL;
spin_unlock(&dev_priv->tile.lock);
return tile;
}
void
nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
struct nouveau_fence *fence)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (tile) {
spin_lock(&dev_priv->tile.lock);
if (fence) {
/* Mark it as pending. */
tile->fence = fence;
nouveau_fence_ref(fence);
}
tile->used = false;
spin_unlock(&dev_priv->tile.lock);
}
}
struct nouveau_tile_reg *
nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nouveau_tile_reg *tile, *found = NULL;
int i;
for (i = 0; i < pfb->num_tiles; i++) {
tile = nv10_mem_get_tile_region(dev, i);
if (pitch && !found) {
found = tile;
continue;
} else if (tile && tile->pitch) {
/* Kill an unused tile region. */
nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
}
nv10_mem_put_tile_region(dev, tile, NULL);
}
if (found)
nv10_mem_update_tile_region(dev, found, addr, size,
pitch, flags);
return found;
}
/*
* Cleanup everything
*/
void
nouveau_mem_vram_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
ttm_bo_device_release(&dev_priv->ttm.bdev);
nouveau_ttm_global_release(dev_priv);
if (dev_priv->fb_mtrr >= 0) {
drm_mtrr_del(dev_priv->fb_mtrr,
pci_resource_start(dev->pdev, 1),
pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
dev_priv->fb_mtrr = -1;
}
}
void
nouveau_mem_gart_fini(struct drm_device *dev)
{
nouveau_sgdma_takedown(dev);
if (drm_core_has_AGP(dev) && dev->agp) {
struct drm_agp_mem *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
intact until drv_cleanup is called. */
list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
if (entry->bound)
drm_unbind_agp(entry->memory);
drm_free_agp(entry->memory, entry->pages);
kfree(entry);
}
INIT_LIST_HEAD(&dev->agp->memory);
if (dev->agp->acquired)
drm_agp_release(dev);
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
}
bool
nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
{
if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
return true;
return false;
}
#if __OS_HAS_AGP
static unsigned long
get_agp_mode(struct drm_device *dev, unsigned long mode)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
/*
* FW seems to be broken on nv18, it makes the card lock up
* randomly.
*/
if (dev_priv->chipset == 0x18)
mode &= ~PCI_AGP_COMMAND_FW;
/*
* AGP mode set in the command line.
*/
if (nouveau_agpmode > 0) {
bool agpv3 = mode & 0x8;
int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
mode = (mode & ~0x7) | (rate & 0x7);
}
return mode;
}
#endif
int
nouveau_mem_reset_agp(struct drm_device *dev)
{
#if __OS_HAS_AGP
uint32_t saved_pci_nv_1, pmc_enable;
int ret;
/* First of all, disable fast writes, otherwise if it's
* already enabled in the AGP bridge and we disable the card's
* AGP controller we might be locking ourselves out of it. */
if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
dev->agp->mode) & PCI_AGP_COMMAND_FW) {
struct drm_agp_info info;
struct drm_agp_mode mode;
ret = drm_agp_info(dev, &info);
if (ret)
return ret;
mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
ret = drm_agp_enable(dev, mode);
if (ret)
return ret;
}
saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
/* clear busmaster bit */
nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
/* disable AGP */
nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0);
/* power cycle pgraph, if enabled */
pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
nv_wr32(dev, NV03_PMC_ENABLE,
pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
}
/* and restore (gives effect of resetting AGP) */
nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
#endif
return 0;
}
int
nouveau_mem_init_agp(struct drm_device *dev)
{
#if __OS_HAS_AGP
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_agp_info info;
struct drm_agp_mode mode;
int ret;
if (!dev->agp->acquired) {
ret = drm_agp_acquire(dev);
if (ret) {
NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
return ret;
}
}
nouveau_mem_reset_agp(dev);
ret = drm_agp_info(dev, &info);
if (ret) {
NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
return ret;
}
/* see agp.h for the AGPSTAT_* modes available */
mode.mode = get_agp_mode(dev, info.mode);
ret = drm_agp_enable(dev, mode);
if (ret) {
NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
return ret;
}
dev_priv->gart_info.type = NOUVEAU_GART_AGP;
dev_priv->gart_info.aper_base = info.aperture_base;
dev_priv->gart_info.aper_size = info.aperture_size;
#endif
return 0;
}
static const struct vram_types {
int value;
const char *name;
} vram_type_map[] = {
{ NV_MEM_TYPE_STOLEN , "stolen system memory" },
{ NV_MEM_TYPE_SGRAM , "SGRAM" },
{ NV_MEM_TYPE_SDRAM , "SDRAM" },
{ NV_MEM_TYPE_DDR1 , "DDR1" },
{ NV_MEM_TYPE_DDR2 , "DDR2" },
{ NV_MEM_TYPE_DDR3 , "DDR3" },
{ NV_MEM_TYPE_GDDR2 , "GDDR2" },
{ NV_MEM_TYPE_GDDR3 , "GDDR3" },
{ NV_MEM_TYPE_GDDR4 , "GDDR4" },
{ NV_MEM_TYPE_GDDR5 , "GDDR5" },
{ NV_MEM_TYPE_UNKNOWN, "unknown type" }
};
int
nouveau_mem_vram_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
const struct vram_types *vram_type;
int ret, dma_bits;
dma_bits = 32;
if (dev_priv->card_type >= NV_50) {
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
dma_bits = 40;
} else
if (0 && pci_is_pcie(dev->pdev) &&
dev_priv->chipset > 0x40 &&
dev_priv->chipset != 0x45) {
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
dma_bits = 39;
}
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret)
return ret;
ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret) {
/* Reset to default value. */
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
}
ret = nouveau_ttm_global_init(dev_priv);
if (ret)
return ret;
ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
dev_priv->ttm.bo_global_ref.ref.object,
&nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
dma_bits <= 32 ? true : false);
if (ret) {
NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
return ret;
}
vram_type = vram_type_map;
while (vram_type->value != NV_MEM_TYPE_UNKNOWN) {
if (nouveau_vram_type) {
if (!strcasecmp(nouveau_vram_type, vram_type->name))
break;
dev_priv->vram_type = vram_type->value;
} else {
if (vram_type->value == dev_priv->vram_type)
break;
}
vram_type++;
}
NV_INFO(dev, "Detected %dMiB VRAM (%s)\n",
(int)(dev_priv->vram_size >> 20), vram_type->name);
if (dev_priv->vram_sys_base) {
NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
dev_priv->vram_sys_base);
}
dev_priv->fb_available_size = dev_priv->vram_size;
dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
dev_priv->fb_aper_free = dev_priv->fb_available_size;
/* mappable vram */
ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
dev_priv->fb_available_size >> PAGE_SHIFT);
if (ret) {
NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
return ret;
}
if (dev_priv->card_type < NV_50) {
ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
0, 0, &dev_priv->vga_ram);
if (ret == 0)
ret = nouveau_bo_pin(dev_priv->vga_ram,
TTM_PL_FLAG_VRAM);
if (ret) {
NV_WARN(dev, "failed to reserve VGA memory\n");
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
}
}
dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
pci_resource_len(dev->pdev, 1),
DRM_MTRR_WC);
return 0;
}
int
nouveau_mem_gart_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
int ret;
dev_priv->gart_info.type = NOUVEAU_GART_NONE;
#if !defined(__powerpc__) && !defined(__ia64__)
if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
ret = nouveau_mem_init_agp(dev);
if (ret)
NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
}
#endif
if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
ret = nouveau_sgdma_init(dev);
if (ret) {
NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
return ret;
}
}
NV_INFO(dev, "%d MiB GART (aperture)\n",
(int)(dev_priv->gart_info.aper_size >> 20));
dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
dev_priv->gart_info.aper_size >> PAGE_SHIFT);
if (ret) {
NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
return ret;
}
return 0;
}
static int
nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
struct nouveau_pm_tbl_entry *e, u8 len,
struct nouveau_pm_memtiming *boot,
struct nouveau_pm_memtiming *t)
{
t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
/* XXX: I don't trust the -1's and +1's... they must come
* from somewhere! */
t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
1 << 16 |
(e->tWTR + 2 + (t->tCWL - 1)) << 8 |
(e->tCL + 2 - (t->tCWL - 1));
t->reg[2] = 0x20200000 |
((t->tCWL - 1) << 24 |
e->tRRD << 16 |
e->tRCDWR << 8 |
e->tRCDRD);
NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id,
t->reg[0], t->reg[1], t->reg[2]);
return 0;
}
static int
nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
struct nouveau_pm_tbl_entry *e, u8 len,
struct nouveau_pm_memtiming *boot,
struct nouveau_pm_memtiming *t)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct bit_entry P;
uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
if (bit_table(dev, 'P', &P))
return -EINVAL;
switch (min(len, (u8) 22)) {
case 22:
unk21 = e->tUNK_21;
case 21:
unk20 = e->tUNK_20;
case 20:
if (e->tCWL > 0)
t->tCWL = e->tCWL;
case 19:
unk18 = e->tUNK_18;
break;
}
t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
max(unk18, (u8) 1) << 16 |
(e->tWTR + 2 + (t->tCWL - 1)) << 8;
t->reg[2] = ((t->tCWL - 1) << 24 |
e->tRRD << 16 |
e->tRCDWR << 8 |
e->tRCDRD);
t->reg[4] = e->tUNK_13 << 8 | e->tUNK_13;
t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
t->reg[8] = boot->reg[8] & 0xffffff00;
if (P.version == 1) {
t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
t->reg[3] = (0x14 + e->tCL) << 24 |
0x16 << 16 |
(e->tCL - 1) << 8 |
(e->tCL - 1);
t->reg[4] |= boot->reg[4] & 0xffff0000;
t->reg[6] = (0x33 - t->tCWL) << 16 |
t->tCWL << 8 |
(0x2e + e->tCL - t->tCWL);
t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
/* XXX: P.version == 1 only has DDR2 and GDDR3? */
if (dev_priv->vram_type == NV_MEM_TYPE_DDR2) {
t->reg[5] |= (e->tCL + 3) << 8;
t->reg[6] |= (t->tCWL - 2) << 8;
t->reg[8] |= (e->tCL - 4);
} else {
t->reg[5] |= (e->tCL + 2) << 8;
t->reg[6] |= t->tCWL << 8;
t->reg[8] |= (e->tCL - 2);
}
} else {
t->reg[1] |= (5 + e->tCL - (t->tCWL));
/* XXX: 0xb? 0x30? */
t->reg[3] = (0x30 + e->tCL) << 24 |
(boot->reg[3] & 0x00ff0000)|
(0xb + e->tCL) << 8 |
(e->tCL - 1);
t->reg[4] |= (unk20 << 24 | unk21 << 16);
/* XXX: +6? */
t->reg[5] |= (t->tCWL + 6) << 8;
t->reg[6] = (0x5a + e->tCL) << 16 |
(6 - e->tCL + t->tCWL) << 8 |
(0x50 + e->tCL - t->tCWL);
tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
t->reg[7] = (tmp7_3 << 24) |
((tmp7_3 - 6 + e->tCL) << 16) |
0x202;
}
NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
NV_DEBUG(dev, " 240: %08x\n", t->reg[8]);
return 0;
}
static int
nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
struct nouveau_pm_tbl_entry *e, u8 len,
struct nouveau_pm_memtiming *boot,
struct nouveau_pm_memtiming *t)
{
if (e->tCWL > 0)
t->tCWL = e->tCWL;
t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
e->tRFC << 8 | e->tRC);
t->reg[1] = (boot->reg[1] & 0xff000000) |
(e->tRCDWR & 0x0f) << 20 |
(e->tRCDRD & 0x0f) << 14 |
(t->tCWL << 7) |
(e->tCL & 0x0f);
t->reg[2] = (boot->reg[2] & 0xff0000ff) |
e->tWR << 16 | e->tWTR << 8;
t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
(e->tUNK_21 & 0xf) << 5 |
(e->tUNK_13 & 0x1f);
t->reg[4] = (boot->reg[4] & 0xfff00fff) |
(e->tRRD&0x1f) << 15;
NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
NV_DEBUG(dev, " 2a0: %08x\n", t->reg[4]);
return 0;
}
/**
* MR generation methods
*/
static int
nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
struct nouveau_pm_tbl_entry *e, u8 len,
struct nouveau_pm_memtiming *boot,
struct nouveau_pm_memtiming *t)
{
t->drive_strength = 0;
if (len < 15) {
t->odt = boot->odt;
} else {
t->odt = e->RAM_FT1 & 0x07;
}
if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
return -ERANGE;
}
if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
return -ERANGE;
}
if (t->odt > 3) {
NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x",
t->id, t->odt);
t->odt = 0;
}
t->mr[0] = (boot->mr[0] & 0x100f) |
(e->tCL) << 4 |
(e->tWR - 1) << 9;
t->mr[1] = (boot->mr[1] & 0x101fbb) |
(t->odt & 0x1) << 2 |
(t->odt & 0x2) << 5;
NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]);
return 0;
}
uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
static int
nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
struct nouveau_pm_tbl_entry *e, u8 len,
struct nouveau_pm_memtiming *boot,
struct nouveau_pm_memtiming *t)
{
u8 cl = e->tCL - 4;
t->drive_strength = 0;
if (len < 15) {
t->odt = boot->odt;
} else {
t->odt = e->RAM_FT1 & 0x07;
}
if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
return -ERANGE;
}
if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
return -ERANGE;
}
if (e->tCWL < 5) {
NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
return -ERANGE;
}
t->mr[0] = (boot->mr[0] & 0x180b) |
/* CAS */
(cl & 0x7) << 4 |
(cl & 0x8) >> 1 |
(nv_mem_wr_lut_ddr3[e->tWR]) << 9;
t->mr[1] = (boot->mr[1] & 0x101dbb) |
(t->odt & 0x1) << 2 |
(t->odt & 0x2) << 5 |
(t->odt & 0x4) << 7;
t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
return 0;
}
uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
static int
nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
struct nouveau_pm_tbl_entry *e, u8 len,
struct nouveau_pm_memtiming *boot,
struct nouveau_pm_memtiming *t)
{
if (len < 15) {
t->drive_strength = boot->drive_strength;
t->odt = boot->odt;
} else {
t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
t->odt = e->RAM_FT1 & 0x07;
}
if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
return -ERANGE;
}
if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
return -ERANGE;
}
if (t->odt > 3) {
NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
t->id, t->odt);
t->odt = 0;
}
t->mr[0] = (boot->mr[0] & 0xe0b) |
/* CAS */
((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
(t->odt << 2) |
(nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
t->mr[2] = boot->mr[2];
NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id,
t->mr[0], t->mr[1], t->mr[2]);
return 0;
}
static int
nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
struct nouveau_pm_tbl_entry *e, u8 len,
struct nouveau_pm_memtiming *boot,
struct nouveau_pm_memtiming *t)
{
if (len < 15) {
t->drive_strength = boot->drive_strength;
t->odt = boot->odt;
} else {
t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
t->odt = e->RAM_FT1 & 0x03;
}
if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
return -ERANGE;
}
if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
return -ERANGE;
}
if (t->odt > 3) {
NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
t->id, t->odt);
t->odt = 0;
}
t->mr[0] = (boot->mr[0] & 0x007) |
((e->tCL - 5) << 3) |
((e->tWR - 4) << 8);
t->mr[1] = (boot->mr[1] & 0x1007f0) |
t->drive_strength |
(t->odt << 2);
NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
return 0;
}
int
nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
struct nouveau_pm_memtiming *t)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct nouveau_pm_memtiming *boot = &pm->boot.timing;
struct nouveau_pm_tbl_entry *e;
u8 ver, len, *ptr, *ramcfg;
int ret;
ptr = nouveau_perf_timing(dev, freq, &ver, &len);
if (!ptr || ptr[0] == 0x00) {
*t = *boot;
return 0;
}
e = (struct nouveau_pm_tbl_entry *)ptr;
t->tCWL = boot->tCWL;
switch (dev_priv->card_type) {
case NV_40:
ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
break;
case NV_50:
ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
break;
case NV_C0:
ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
break;
default:
ret = -ENODEV;
break;
}
switch (dev_priv->vram_type * !ret) {
case NV_MEM_TYPE_GDDR3:
ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
break;
case NV_MEM_TYPE_GDDR5:
ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
break;
case NV_MEM_TYPE_DDR2:
ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
break;
case NV_MEM_TYPE_DDR3:
ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
break;
default:
ret = -EINVAL;
break;
}
ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
if (ramcfg) {
int dll_off;
if (ver == 0x00)
dll_off = !!(ramcfg[3] & 0x04);
else
dll_off = !!(ramcfg[2] & 0x40);
switch (dev_priv->vram_type) {
case NV_MEM_TYPE_GDDR3:
t->mr[1] &= ~0x00000040;
t->mr[1] |= 0x00000040 * dll_off;
break;
default:
t->mr[1] &= ~0x00000001;
t->mr[1] |= 0x00000001 * dll_off;
break;
}
}
return ret;
}
void
nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
u32 timing_base, timing_regs, mr_base;
int i;
if (dev_priv->card_type >= 0xC0) {
timing_base = 0x10f290;
mr_base = 0x10f300;
} else {
timing_base = 0x100220;
mr_base = 0x1002c0;
}
t->id = -1;
switch (dev_priv->card_type) {
case NV_50:
timing_regs = 9;
break;
case NV_C0:
case NV_D0:
timing_regs = 5;
break;
case NV_30:
case NV_40:
timing_regs = 3;
break;
default:
timing_regs = 0;
return;
}
for(i = 0; i < timing_regs; i++)
t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i));
t->tCWL = 0;
if (dev_priv->card_type < NV_C0) {
t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1;
} else if (dev_priv->card_type <= NV_D0) {
t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7);
}
t->mr[0] = nv_rd32(dev, mr_base);
t->mr[1] = nv_rd32(dev, mr_base + 0x04);
t->mr[2] = nv_rd32(dev, mr_base + 0x20);
t->mr[3] = nv_rd32(dev, mr_base + 0x24);
t->odt = 0;
t->drive_strength = 0;
switch (dev_priv->vram_type) {
case NV_MEM_TYPE_DDR3:
t->odt |= (t->mr[1] & 0x200) >> 7;
case NV_MEM_TYPE_DDR2:
t->odt |= (t->mr[1] & 0x04) >> 2 |
(t->mr[1] & 0x40) >> 5;
break;
case NV_MEM_TYPE_GDDR3:
case NV_MEM_TYPE_GDDR5:
t->drive_strength = t->mr[1] & 0x03;
t->odt = (t->mr[1] & 0x0c) >> 2;
break;
default:
break;
}
}
int
nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
struct nouveau_pm_level *perflvl)
{
struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
struct nouveau_pm_memtiming *info = &perflvl->timing;
u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
u32 mr1_dlloff;
switch (dev_priv->vram_type) {
case NV_MEM_TYPE_DDR2:
tDLLK = 2000;
mr1_dlloff = 0x00000001;
break;
case NV_MEM_TYPE_DDR3:
tDLLK = 12000;
mr1_dlloff = 0x00000001;
break;
case NV_MEM_TYPE_GDDR3:
tDLLK = 40000;
mr1_dlloff = 0x00000040;
break;
default:
NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n");
return -ENODEV;
}
/* fetch current MRs */
switch (dev_priv->vram_type) {
case NV_MEM_TYPE_GDDR3:
case NV_MEM_TYPE_DDR3:
mr[2] = exec->mrg(exec, 2);
default:
mr[1] = exec->mrg(exec, 1);
mr[0] = exec->mrg(exec, 0);
break;
}
/* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh */
if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
exec->precharge(exec);
exec->mrs (exec, 1, mr[1] | mr1_dlloff);
exec->wait(exec, tMRD);
}
/* enter self-refresh mode */
exec->precharge(exec);
exec->refresh(exec);
exec->refresh(exec);
exec->refresh_auto(exec, false);
exec->refresh_self(exec, true);
exec->wait(exec, tCKSRE);
/* modify input clock frequency */
exec->clock_set(exec);
/* exit self-refresh mode */
exec->wait(exec, tCKSRX);
exec->precharge(exec);
exec->refresh_self(exec, false);
exec->refresh_auto(exec, true);
exec->wait(exec, tXS);
/* update MRs */
if (mr[2] != info->mr[2]) {
exec->mrs (exec, 2, info->mr[2]);
exec->wait(exec, tMRD);
}
if (mr[1] != info->mr[1]) {
/* need to keep DLL off until later, at least on GDDR3 */
exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
exec->wait(exec, tMRD);
}
if (mr[0] != info->mr[0]) {
exec->mrs (exec, 0, info->mr[0]);
exec->wait(exec, tMRD);
}
/* update PFB timing registers */
exec->timing_set(exec);
/* DLL (enable + ) reset */
if (!(info->mr[1] & mr1_dlloff)) {
if (mr[1] & mr1_dlloff) {
exec->mrs (exec, 1, info->mr[1]);
exec->wait(exec, tMRD);
}
exec->mrs (exec, 0, info->mr[0] | 0x00000100);
exec->wait(exec, tMRD);
exec->mrs (exec, 0, info->mr[0] | 0x00000000);
exec->wait(exec, tMRD);
exec->wait(exec, tDLLK);
if (dev_priv->vram_type == NV_MEM_TYPE_GDDR3)
exec->precharge(exec);
}
return 0;
}
int
nouveau_mem_vbios_type(struct drm_device *dev)
{
struct bit_entry M;
u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) {
u8 *table = ROMPTR(dev, M.data[3]);
if (table && table[0] == 0x10 && ramcfg < table[3]) {
u8 *entry = table + table[1] + (ramcfg * table[2]);
switch (entry[0] & 0x0f) {
case 0: return NV_MEM_TYPE_DDR2;
case 1: return NV_MEM_TYPE_DDR3;
case 2: return NV_MEM_TYPE_GDDR3;
case 3: return NV_MEM_TYPE_GDDR5;
default:
break;
}
}
}
return NV_MEM_TYPE_UNKNOWN;
}
static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
/* nothing to do */
return 0;
}
static int
nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
{
/* nothing to do */
return 0;
}
static inline void
nouveau_mem_node_cleanup(struct nouveau_mem *node)
{
if (node->vma[0].node) {
nouveau_vm_unmap(&node->vma[0]);
nouveau_vm_put(&node->vma[0]);
}
if (node->vma[1].node) {
nouveau_vm_unmap(&node->vma[1]);
nouveau_vm_put(&node->vma[1]);
}
}
static void
nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct drm_device *dev = dev_priv->dev;
nouveau_mem_node_cleanup(mem->mm_node);
vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
}
static int
nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_mem *node;
u32 size_nc = 0;
int ret;
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
size_nc = 1 << nvbo->page_shift;
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
(nvbo->tile_flags >> 8) & 0x3ff, &node);
if (ret) {
mem->mm_node = NULL;
return (ret == -ENOSPC) ? 0 : ret;
}
node->page_shift = nvbo->page_shift;
mem->mm_node = node;
mem->start = node->offset >> PAGE_SHIFT;
return 0;
}
void
nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
u32 total = 0, free = 0;
mutex_lock(&mm->mutex);
list_for_each_entry(r, &mm->nodes, nl_entry) {
printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
prefix, r->type, ((u64)r->offset << 12),
(((u64)r->offset + r->length) << 12));
total += r->length;
if (!r->type)
free += r->length;
}
mutex_unlock(&mm->mutex);
printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
prefix, (u64)total << 12, (u64)free << 12);
printk(KERN_DEBUG "%s block: 0x%08x\n",
prefix, mm->block_size << 12);
}
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
nouveau_vram_manager_init,
nouveau_vram_manager_fini,
nouveau_vram_manager_new,
nouveau_vram_manager_del,
nouveau_vram_manager_debug
};
static int
nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
return 0;
}
static int
nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
{
return 0;
}
static void
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
nouveau_mem_node_cleanup(mem->mm_node);
kfree(mem->mm_node);
mem->mm_node = NULL;
}
static int
nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_mem *node;
if (unlikely((mem->num_pages << PAGE_SHIFT) >=
dev_priv->gart_info.aper_size))
return -ENOMEM;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
node->page_shift = 12;
mem->mm_node = node;
mem->start = 0;
return 0;
}
void
nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
}
const struct ttm_mem_type_manager_func nouveau_gart_manager = {
nouveau_gart_manager_init,
nouveau_gart_manager_fini,
nouveau_gart_manager_new,
nouveau_gart_manager_del,
nouveau_gart_manager_debug
};
| gpl-2.0 |
cybermx/linux-2.6-imx | arch/sparc/prom/init_64.c | 7558 | 1409 | /*
* init.c: Initialize internal variables used by the PROM
* library functions.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
/* OBP version string. */
char prom_version[80];
/* The root node of the prom device tree. */
int prom_stdout;
phandle prom_chosen_node;
/* You must call prom_init() before you attempt to use any of the
* routines in the prom library. It returns 0 on success, 1 on
* failure. It gets passed the pointer to the PROM vector.
*/
extern void prom_cif_init(void *, void *);
void __init prom_init(void *cif_handler, void *cif_stack)
{
phandle node;
prom_cif_init(cif_handler, cif_stack);
prom_chosen_node = prom_finddevice(prom_chosen_path);
if (!prom_chosen_node || (s32)prom_chosen_node == -1)
prom_halt();
prom_stdout = prom_getint(prom_chosen_node, "stdout");
node = prom_finddevice("/openprom");
if (!node || (s32)node == -1)
prom_halt();
prom_getstring(node, "version", prom_version, sizeof(prom_version));
prom_printf("\n");
}
void __init prom_init_report(void)
{
printk("PROMLIB: Sun IEEE Boot Prom '%s'\n", prom_version);
printk("PROMLIB: Root node compatible: %s\n", prom_root_compatible);
}
| gpl-2.0 |
ciminaghi/linux-3.3.8 | net/netlabel/netlabel_addrlist.c | 7558 | 10564 | /*
* NetLabel Network Address Lists
*
* This file contains network address list functions used to manage ordered
* lists of network addresses for use by the NetLabel subsystem. The NetLabel
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
* Author: Paul Moore <paul@paul-moore.com>
*
*/
/*
* (c) Copyright Hewlett-Packard Development Company, L.P., 2008
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/types.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/audit.h>
#include "netlabel_addrlist.h"
/*
* Address List Functions
*/
/**
* netlbl_af4list_search - Search for a matching IPv4 address entry
* @addr: IPv4 address
* @head: the list head
*
* Description:
* Searches the IPv4 address list given by @head. If a matching address entry
* is found it is returned, otherwise NULL is returned. The caller is
* responsible for calling the rcu_read_[un]lock() functions.
*
*/
struct netlbl_af4list *netlbl_af4list_search(__be32 addr,
struct list_head *head)
{
struct netlbl_af4list *iter;
list_for_each_entry_rcu(iter, head, list)
if (iter->valid && (addr & iter->mask) == iter->addr)
return iter;
return NULL;
}
/**
* netlbl_af4list_search_exact - Search for an exact IPv4 address entry
* @addr: IPv4 address
* @mask: IPv4 address mask
* @head: the list head
*
* Description:
* Searches the IPv4 address list given by @head. If an exact match if found
* it is returned, otherwise NULL is returned. The caller is responsible for
* calling the rcu_read_[un]lock() functions.
*
*/
struct netlbl_af4list *netlbl_af4list_search_exact(__be32 addr,
__be32 mask,
struct list_head *head)
{
struct netlbl_af4list *iter;
list_for_each_entry_rcu(iter, head, list)
if (iter->valid && iter->addr == addr && iter->mask == mask)
return iter;
return NULL;
}
#if IS_ENABLED(CONFIG_IPV6)
/**
* netlbl_af6list_search - Search for a matching IPv6 address entry
* @addr: IPv6 address
* @head: the list head
*
* Description:
* Searches the IPv6 address list given by @head. If a matching address entry
* is found it is returned, otherwise NULL is returned. The caller is
* responsible for calling the rcu_read_[un]lock() functions.
*
*/
struct netlbl_af6list *netlbl_af6list_search(const struct in6_addr *addr,
struct list_head *head)
{
struct netlbl_af6list *iter;
list_for_each_entry_rcu(iter, head, list)
if (iter->valid &&
ipv6_masked_addr_cmp(&iter->addr, &iter->mask, addr) == 0)
return iter;
return NULL;
}
/**
* netlbl_af6list_search_exact - Search for an exact IPv6 address entry
* @addr: IPv6 address
* @mask: IPv6 address mask
* @head: the list head
*
* Description:
* Searches the IPv6 address list given by @head. If an exact match if found
* it is returned, otherwise NULL is returned. The caller is responsible for
* calling the rcu_read_[un]lock() functions.
*
*/
struct netlbl_af6list *netlbl_af6list_search_exact(const struct in6_addr *addr,
const struct in6_addr *mask,
struct list_head *head)
{
struct netlbl_af6list *iter;
list_for_each_entry_rcu(iter, head, list)
if (iter->valid &&
ipv6_addr_equal(&iter->addr, addr) &&
ipv6_addr_equal(&iter->mask, mask))
return iter;
return NULL;
}
#endif /* IPv6 */
/**
* netlbl_af4list_add - Add a new IPv4 address entry to a list
* @entry: address entry
* @head: the list head
*
* Description:
* Add a new address entry to the list pointed to by @head. On success zero is
* returned, otherwise a negative value is returned. The caller is responsible
* for calling the necessary locking functions.
*
*/
int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head)
{
struct netlbl_af4list *iter;
iter = netlbl_af4list_search(entry->addr, head);
if (iter != NULL &&
iter->addr == entry->addr && iter->mask == entry->mask)
return -EEXIST;
/* in order to speed up address searches through the list (the common
* case) we need to keep the list in order based on the size of the
* address mask such that the entry with the widest mask (smallest
* numerical value) appears first in the list */
list_for_each_entry_rcu(iter, head, list)
if (iter->valid &&
ntohl(entry->mask) > ntohl(iter->mask)) {
__list_add_rcu(&entry->list,
iter->list.prev,
&iter->list);
return 0;
}
list_add_tail_rcu(&entry->list, head);
return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
/**
* netlbl_af6list_add - Add a new IPv6 address entry to a list
* @entry: address entry
* @head: the list head
*
* Description:
* Add a new address entry to the list pointed to by @head. On success zero is
* returned, otherwise a negative value is returned. The caller is responsible
* for calling the necessary locking functions.
*
*/
int netlbl_af6list_add(struct netlbl_af6list *entry, struct list_head *head)
{
struct netlbl_af6list *iter;
iter = netlbl_af6list_search(&entry->addr, head);
if (iter != NULL &&
ipv6_addr_equal(&iter->addr, &entry->addr) &&
ipv6_addr_equal(&iter->mask, &entry->mask))
return -EEXIST;
/* in order to speed up address searches through the list (the common
* case) we need to keep the list in order based on the size of the
* address mask such that the entry with the widest mask (smallest
* numerical value) appears first in the list */
list_for_each_entry_rcu(iter, head, list)
if (iter->valid &&
ipv6_addr_cmp(&entry->mask, &iter->mask) > 0) {
__list_add_rcu(&entry->list,
iter->list.prev,
&iter->list);
return 0;
}
list_add_tail_rcu(&entry->list, head);
return 0;
}
#endif /* IPv6 */
/**
* netlbl_af4list_remove_entry - Remove an IPv4 address entry
* @entry: address entry
*
* Description:
* Remove the specified IP address entry. The caller is responsible for
* calling the necessary locking functions.
*
*/
void netlbl_af4list_remove_entry(struct netlbl_af4list *entry)
{
entry->valid = 0;
list_del_rcu(&entry->list);
}
/**
* netlbl_af4list_remove - Remove an IPv4 address entry
* @addr: IP address
* @mask: IP address mask
* @head: the list head
*
* Description:
* Remove an IP address entry from the list pointed to by @head. Returns the
* entry on success, NULL on failure. The caller is responsible for calling
* the necessary locking functions.
*
*/
struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask,
struct list_head *head)
{
struct netlbl_af4list *entry;
entry = netlbl_af4list_search_exact(addr, mask, head);
if (entry == NULL)
return NULL;
netlbl_af4list_remove_entry(entry);
return entry;
}
#if IS_ENABLED(CONFIG_IPV6)
/**
* netlbl_af6list_remove_entry - Remove an IPv6 address entry
* @entry: address entry
*
* Description:
* Remove the specified IP address entry. The caller is responsible for
* calling the necessary locking functions.
*
*/
void netlbl_af6list_remove_entry(struct netlbl_af6list *entry)
{
entry->valid = 0;
list_del_rcu(&entry->list);
}
/**
* netlbl_af6list_remove - Remove an IPv6 address entry
* @addr: IP address
* @mask: IP address mask
* @head: the list head
*
* Description:
* Remove an IP address entry from the list pointed to by @head. Returns the
* entry on success, NULL on failure. The caller is responsible for calling
* the necessary locking functions.
*
*/
struct netlbl_af6list *netlbl_af6list_remove(const struct in6_addr *addr,
const struct in6_addr *mask,
struct list_head *head)
{
struct netlbl_af6list *entry;
entry = netlbl_af6list_search_exact(addr, mask, head);
if (entry == NULL)
return NULL;
netlbl_af6list_remove_entry(entry);
return entry;
}
#endif /* IPv6 */
/*
* Audit Helper Functions
*/
#ifdef CONFIG_AUDIT
/**
* netlbl_af4list_audit_addr - Audit an IPv4 address
* @audit_buf: audit buffer
* @src: true if source address, false if destination
* @dev: network interface
* @addr: IP address
* @mask: IP address mask
*
* Description:
* Write the IPv4 address and address mask, if necessary, to @audit_buf.
*
*/
void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf,
int src, const char *dev,
__be32 addr, __be32 mask)
{
u32 mask_val = ntohl(mask);
char *dir = (src ? "src" : "dst");
if (dev != NULL)
audit_log_format(audit_buf, " netif=%s", dev);
audit_log_format(audit_buf, " %s=%pI4", dir, &addr);
if (mask_val != 0xffffffff) {
u32 mask_len = 0;
while (mask_val > 0) {
mask_val <<= 1;
mask_len++;
}
audit_log_format(audit_buf, " %s_prefixlen=%d", dir, mask_len);
}
}
#if IS_ENABLED(CONFIG_IPV6)
/**
* netlbl_af6list_audit_addr - Audit an IPv6 address
* @audit_buf: audit buffer
* @src: true if source address, false if destination
* @dev: network interface
* @addr: IP address
* @mask: IP address mask
*
* Description:
* Write the IPv6 address and address mask, if necessary, to @audit_buf.
*
*/
void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf,
int src,
const char *dev,
const struct in6_addr *addr,
const struct in6_addr *mask)
{
char *dir = (src ? "src" : "dst");
if (dev != NULL)
audit_log_format(audit_buf, " netif=%s", dev);
audit_log_format(audit_buf, " %s=%pI6", dir, addr);
if (ntohl(mask->s6_addr32[3]) != 0xffffffff) {
u32 mask_len = 0;
u32 mask_val;
int iter = -1;
while (ntohl(mask->s6_addr32[++iter]) == 0xffffffff)
mask_len += 32;
mask_val = ntohl(mask->s6_addr32[iter]);
while (mask_val > 0) {
mask_val <<= 1;
mask_len++;
}
audit_log_format(audit_buf, " %s_prefixlen=%d", dir, mask_len);
}
}
#endif /* IPv6 */
#endif /* CONFIG_AUDIT */
| gpl-2.0 |
PsychoGame/android_kernel_samsung_royss_old | net/ipv4/tcp_westwood.c | 8326 | 8222 | /*
* TCP Westwood+: end-to-end bandwidth estimation for TCP
*
* Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4
*
* Support at http://c3lab.poliba.it/index.php/Westwood
* Main references in literature:
*
* - Mascolo S, Casetti, M. Gerla et al.
* "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001
*
* - A. Grieco, s. Mascolo
* "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer
* Comm. Review, 2004
*
* - A. Dell'Aera, L. Grieco, S. Mascolo.
* "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving :
* A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004
*
* Westwood+ employs end-to-end bandwidth measurement to set cwnd and
* ssthresh after packet loss. The probing phase is as the original Reno.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/inet_diag.h>
#include <net/tcp.h>
/* TCP Westwood structure */
struct westwood {
u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */
u32 bw_est; /* bandwidth estimate */
u32 rtt_win_sx; /* here starts a new evaluation... */
u32 bk;
u32 snd_una; /* used for evaluating the number of acked bytes */
u32 cumul_ack;
u32 accounted;
u32 rtt;
u32 rtt_min; /* minimum observed RTT */
u8 first_ack; /* flag which infers that this is the first ack */
u8 reset_rtt_min; /* Reset RTT min to next RTT sample*/
};
/* TCP Westwood functions and constants */
#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
/*
* @tcp_westwood_create
* This function initializes fields used in TCP Westwood+,
* it is called after the initial SYN, so the sequence numbers
* are correct but new passive connections we have no
* information about RTTmin at this time so we simply set it to
* TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
* since in this way we're sure it will be updated in a consistent
* way as soon as possible. It will reasonably happen within the first
* RTT period of the connection lifetime.
*/
static void tcp_westwood_init(struct sock *sk)
{
struct westwood *w = inet_csk_ca(sk);
w->bk = 0;
w->bw_ns_est = 0;
w->bw_est = 0;
w->accounted = 0;
w->cumul_ack = 0;
w->reset_rtt_min = 1;
w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
w->rtt_win_sx = tcp_time_stamp;
w->snd_una = tcp_sk(sk)->snd_una;
w->first_ack = 1;
}
/*
* @westwood_do_filter
* Low-pass filter. Implemented using constant coefficients.
*/
static inline u32 westwood_do_filter(u32 a, u32 b)
{
return ((7 * a) + b) >> 3;
}
static void westwood_filter(struct westwood *w, u32 delta)
{
/* If the filter is empty fill it with the first sample of bandwidth */
if (w->bw_ns_est == 0 && w->bw_est == 0) {
w->bw_ns_est = w->bk / delta;
w->bw_est = w->bw_ns_est;
} else {
w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
}
}
/*
* @westwood_pkts_acked
* Called after processing group of packets.
* but all westwood needs is the last sample of srtt.
*/
static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
{
struct westwood *w = inet_csk_ca(sk);
if (rtt > 0)
w->rtt = usecs_to_jiffies(rtt);
}
/*
* @westwood_update_window
* It updates RTT evaluation window if it is the right moment to do
* it. If so it calls filter for evaluating bandwidth.
*/
static void westwood_update_window(struct sock *sk)
{
struct westwood *w = inet_csk_ca(sk);
s32 delta = tcp_time_stamp - w->rtt_win_sx;
/* Initialize w->snd_una with the first acked sequence number in order
* to fix mismatch between tp->snd_una and w->snd_una for the first
* bandwidth sample
*/
if (w->first_ack) {
w->snd_una = tcp_sk(sk)->snd_una;
w->first_ack = 0;
}
/*
* See if a RTT-window has passed.
* Be careful since if RTT is less than
* 50ms we don't filter but we continue 'building the sample'.
* This minimum limit was chosen since an estimation on small
* time intervals is better to avoid...
* Obviously on a LAN we reasonably will always have
* right_bound = left_bound + WESTWOOD_RTT_MIN
*/
if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
westwood_filter(w, delta);
w->bk = 0;
w->rtt_win_sx = tcp_time_stamp;
}
}
static inline void update_rtt_min(struct westwood *w)
{
if (w->reset_rtt_min) {
w->rtt_min = w->rtt;
w->reset_rtt_min = 0;
} else
w->rtt_min = min(w->rtt, w->rtt_min);
}
/*
* @westwood_fast_bw
* It is called when we are in fast path. In particular it is called when
* header prediction is successful. In such case in fact update is
* straight forward and doesn't need any particular care.
*/
static inline void westwood_fast_bw(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct westwood *w = inet_csk_ca(sk);
westwood_update_window(sk);
w->bk += tp->snd_una - w->snd_una;
w->snd_una = tp->snd_una;
update_rtt_min(w);
}
/*
* @westwood_acked_count
* This function evaluates cumul_ack for evaluating bk in case of
* delayed or partial acks.
*/
static inline u32 westwood_acked_count(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct westwood *w = inet_csk_ca(sk);
w->cumul_ack = tp->snd_una - w->snd_una;
/* If cumul_ack is 0 this is a dupack since it's not moving
* tp->snd_una.
*/
if (!w->cumul_ack) {
w->accounted += tp->mss_cache;
w->cumul_ack = tp->mss_cache;
}
if (w->cumul_ack > tp->mss_cache) {
/* Partial or delayed ack */
if (w->accounted >= w->cumul_ack) {
w->accounted -= w->cumul_ack;
w->cumul_ack = tp->mss_cache;
} else {
w->cumul_ack -= w->accounted;
w->accounted = 0;
}
}
w->snd_una = tp->snd_una;
return w->cumul_ack;
}
/*
* TCP Westwood
* Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
* in packets we use mss_cache). Rttmin is guaranteed to be >= 2
* so avoids ever returning 0.
*/
static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct westwood *w = inet_csk_ca(sk);
return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
}
static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
{
struct tcp_sock *tp = tcp_sk(sk);
struct westwood *w = inet_csk_ca(sk);
switch (event) {
case CA_EVENT_FAST_ACK:
westwood_fast_bw(sk);
break;
case CA_EVENT_COMPLETE_CWR:
tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
break;
case CA_EVENT_FRTO:
tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
/* Update RTT_min when next ack arrives */
w->reset_rtt_min = 1;
break;
case CA_EVENT_SLOW_ACK:
westwood_update_window(sk);
w->bk += westwood_acked_count(sk);
update_rtt_min(w);
break;
default:
/* don't care */
break;
}
}
/* Extract info for Tcp socket info provided via netlink. */
static void tcp_westwood_info(struct sock *sk, u32 ext,
struct sk_buff *skb)
{
const struct westwood *ca = inet_csk_ca(sk);
if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
struct tcpvegas_info info = {
.tcpv_enabled = 1,
.tcpv_rtt = jiffies_to_usecs(ca->rtt),
.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min),
};
nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
}
}
static struct tcp_congestion_ops tcp_westwood __read_mostly = {
.init = tcp_westwood_init,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
.min_cwnd = tcp_westwood_bw_rttmin,
.cwnd_event = tcp_westwood_event,
.get_info = tcp_westwood_info,
.pkts_acked = tcp_westwood_pkts_acked,
.owner = THIS_MODULE,
.name = "westwood"
};
static int __init tcp_westwood_register(void)
{
BUILD_BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
return tcp_register_congestion_control(&tcp_westwood);
}
static void __exit tcp_westwood_unregister(void)
{
tcp_unregister_congestion_control(&tcp_westwood);
}
module_init(tcp_westwood_register);
module_exit(tcp_westwood_unregister);
MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TCP Westwood+");
| gpl-2.0 |
jamison904/N920T | arch/blackfin/kernel/cplb-mpu/cplbmgr.c | 9094 | 9515 | /*
* Blackfin CPLB exception handling for when MPU in on
*
* Copyright 2008-2009 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/blackfin.h>
#include <asm/cacheflush.h>
#include <asm/cplb.h>
#include <asm/cplbinit.h>
#include <asm/mmu_context.h>
/*
* WARNING
*
* This file is compiled with certain -ffixed-reg options. We have to
* make sure not to call any functions here that could clobber these
* registers.
*/
int page_mask_nelts;
int page_mask_order;
unsigned long *current_rwx_mask[NR_CPUS];
int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
int nr_cplb_flush[NR_CPUS];
#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
#define MGR_ATTR __attribute__((l1_text))
#else
#define MGR_ATTR
#endif
/*
* Given the contents of the status register, return the index of the
* CPLB that caused the fault.
*/
static inline int faulting_cplb_index(int status)
{
int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
return 30 - signbits;
}
/*
* Given the contents of the status register and the DCPLB_DATA contents,
* return true if a write access should be permitted.
*/
static inline int write_permitted(int status, unsigned long data)
{
if (status & FAULT_USERSUPV)
return !!(data & CPLB_SUPV_WR);
else
return !!(data & CPLB_USER_WR);
}
/* Counters to implement round-robin replacement. */
static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
/*
* Find an ICPLB entry to be evicted and return its index.
*/
MGR_ATTR static int evict_one_icplb(unsigned int cpu)
{
int i;
for (i = first_switched_icplb; i < MAX_CPLBS; i++)
if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
return i;
i = first_switched_icplb + icplb_rr_index[cpu];
if (i >= MAX_CPLBS) {
i -= MAX_CPLBS - first_switched_icplb;
icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
}
icplb_rr_index[cpu]++;
return i;
}
MGR_ATTR static int evict_one_dcplb(unsigned int cpu)
{
int i;
for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
return i;
i = first_switched_dcplb + dcplb_rr_index[cpu];
if (i >= MAX_CPLBS) {
i -= MAX_CPLBS - first_switched_dcplb;
dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
}
dcplb_rr_index[cpu]++;
return i;
}
MGR_ATTR static noinline int dcplb_miss(unsigned int cpu)
{
unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
int status = bfin_read_DCPLB_STATUS();
unsigned long *mask;
int idx;
unsigned long d_data;
nr_dcplb_miss[cpu]++;
d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
if (bfin_addr_dcacheable(addr)) {
d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
d_data |= CPLB_L1_AOW | CPLB_WT;
# endif
}
#endif
if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
addr = L2_START;
d_data = L2_DMEMORY;
} else if (addr >= physical_mem_end) {
if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
mask = current_rwx_mask[cpu];
if (mask) {
int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
int idx = page >> 5;
int bit = 1 << (page & 31);
if (mask[idx] & bit)
d_data |= CPLB_USER_RD;
}
#endif
} else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
&& (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
addr &= ~(1 * 1024 * 1024 - 1);
d_data &= ~PAGE_SIZE_4KB;
d_data |= PAGE_SIZE_1MB;
} else
return CPLB_PROT_VIOL;
} else if (addr >= _ramend) {
d_data |= CPLB_USER_RD | CPLB_USER_WR;
if (reserved_mem_dcache_on)
d_data |= CPLB_L1_CHBL;
} else {
mask = current_rwx_mask[cpu];
if (mask) {
int page = addr >> PAGE_SHIFT;
int idx = page >> 5;
int bit = 1 << (page & 31);
if (mask[idx] & bit)
d_data |= CPLB_USER_RD;
mask += page_mask_nelts;
if (mask[idx] & bit)
d_data |= CPLB_USER_WR;
}
}
idx = evict_one_dcplb(cpu);
addr &= PAGE_MASK;
dcplb_tbl[cpu][idx].addr = addr;
dcplb_tbl[cpu][idx].data = d_data;
_disable_dcplb();
bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
_enable_dcplb();
return 0;
}
MGR_ATTR static noinline int icplb_miss(unsigned int cpu)
{
unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
int status = bfin_read_ICPLB_STATUS();
int idx;
unsigned long i_data;
nr_icplb_miss[cpu]++;
/* If inside the uncached DMA region, fault. */
if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
return CPLB_PROT_VIOL;
if (status & FAULT_USERSUPV)
nr_icplb_supv_miss[cpu]++;
/*
* First, try to find a CPLB that matches this address. If we
* find one, then the fact that we're in the miss handler means
* that the instruction crosses a page boundary.
*/
for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
unsigned long this_addr = icplb_tbl[cpu][idx].addr;
if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
addr += PAGE_SIZE;
break;
}
}
}
i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
/*
* Normal RAM, and possibly the reserved memory area, are
* cacheable.
*/
if (addr < _ramend ||
(addr < physical_mem_end && reserved_mem_icache_on))
i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#endif
if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
addr = L2_START;
i_data = L2_IMEMORY;
} else if (addr >= physical_mem_end) {
if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
if (!(status & FAULT_USERSUPV)) {
unsigned long *mask = current_rwx_mask[cpu];
if (mask) {
int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
int idx = page >> 5;
int bit = 1 << (page & 31);
mask += 2 * page_mask_nelts;
if (mask[idx] & bit)
i_data |= CPLB_USER_RD;
}
}
} else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
&& (status & FAULT_USERSUPV)) {
addr &= ~(1 * 1024 * 1024 - 1);
i_data &= ~PAGE_SIZE_4KB;
i_data |= PAGE_SIZE_1MB;
} else
return CPLB_PROT_VIOL;
} else if (addr >= _ramend) {
i_data |= CPLB_USER_RD;
if (reserved_mem_icache_on)
i_data |= CPLB_L1_CHBL;
} else {
/*
* Two cases to distinguish - a supervisor access must
* necessarily be for a module page; we grant it
* unconditionally (could do better here in the future).
* Otherwise, check the x bitmap of the current process.
*/
if (!(status & FAULT_USERSUPV)) {
unsigned long *mask = current_rwx_mask[cpu];
if (mask) {
int page = addr >> PAGE_SHIFT;
int idx = page >> 5;
int bit = 1 << (page & 31);
mask += 2 * page_mask_nelts;
if (mask[idx] & bit)
i_data |= CPLB_USER_RD;
}
}
}
idx = evict_one_icplb(cpu);
addr &= PAGE_MASK;
icplb_tbl[cpu][idx].addr = addr;
icplb_tbl[cpu][idx].data = i_data;
_disable_icplb();
bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
_enable_icplb();
return 0;
}
MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu)
{
int status = bfin_read_DCPLB_STATUS();
nr_dcplb_prot[cpu]++;
if (status & FAULT_RW) {
int idx = faulting_cplb_index(status);
unsigned long data = dcplb_tbl[cpu][idx].data;
if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
write_permitted(status, data)) {
data |= CPLB_DIRTY;
dcplb_tbl[cpu][idx].data = data;
bfin_write32(DCPLB_DATA0 + idx * 4, data);
return 0;
}
}
return CPLB_PROT_VIOL;
}
MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
{
int cause = seqstat & 0x3f;
unsigned int cpu = raw_smp_processor_id();
switch (cause) {
case 0x23:
return dcplb_protection_fault(cpu);
case 0x2C:
return icplb_miss(cpu);
case 0x26:
return dcplb_miss(cpu);
default:
return 1;
}
}
void flush_switched_cplbs(unsigned int cpu)
{
int i;
unsigned long flags;
nr_cplb_flush[cpu]++;
flags = hard_local_irq_save();
_disable_icplb();
for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
icplb_tbl[cpu][i].data = 0;
bfin_write32(ICPLB_DATA0 + i * 4, 0);
}
_enable_icplb();
_disable_dcplb();
for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
dcplb_tbl[cpu][i].data = 0;
bfin_write32(DCPLB_DATA0 + i * 4, 0);
}
_enable_dcplb();
hard_local_irq_restore(flags);
}
void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
{
int i;
unsigned long addr = (unsigned long)masks;
unsigned long d_data;
unsigned long flags;
if (!masks) {
current_rwx_mask[cpu] = masks;
return;
}
flags = hard_local_irq_save();
current_rwx_mask[cpu] = masks;
if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
addr = L2_START;
d_data = L2_DMEMORY;
} else {
d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
d_data |= CPLB_L1_CHBL;
# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
d_data |= CPLB_L1_AOW | CPLB_WT;
# endif
#endif
}
_disable_dcplb();
for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
dcplb_tbl[cpu][i].addr = addr;
dcplb_tbl[cpu][i].data = d_data;
bfin_write32(DCPLB_DATA0 + i * 4, d_data);
bfin_write32(DCPLB_ADDR0 + i * 4, addr);
addr += PAGE_SIZE;
}
_enable_dcplb();
hard_local_irq_restore(flags);
}
| gpl-2.0 |
shazzl/sa77 | arch/tile/lib/memset_64.c | 9606 | 3507 | /*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#include <arch/chip.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
#undef memset
void *memset(void *s, int c, size_t n)
{
uint64_t *out64;
int n64, to_align64;
uint64_t v64;
uint8_t *out8 = s;
/* Experimentation shows that a trivial tight loop is a win up until
* around a size of 20, where writing a word at a time starts to win.
*/
#define BYTE_CUTOFF 20
#if BYTE_CUTOFF < 7
/* This must be at least at least this big, or some code later
* on doesn't work.
*/
#error "BYTE_CUTOFF is too small"
#endif
if (n < BYTE_CUTOFF) {
/* Strangely, this turns out to be the tightest way to
* write this loop.
*/
if (n != 0) {
do {
/* Strangely, combining these into one line
* performs worse.
*/
*out8 = c;
out8++;
} while (--n != 0);
}
return s;
}
/* Align 'out8'. We know n >= 7 so this won't write past the end. */
while (((uintptr_t) out8 & 7) != 0) {
*out8++ = c;
--n;
}
/* Align 'n'. */
while (n & 7)
out8[--n] = c;
out64 = (uint64_t *) out8;
n64 = n >> 3;
/* Tile input byte out to 64 bits. */
/* KLUDGE */
v64 = 0x0101010101010101ULL * (uint8_t)c;
/* This must be at least 8 or the following loop doesn't work. */
#define CACHE_LINE_SIZE_IN_DOUBLEWORDS (CHIP_L2_LINE_SIZE() / 8)
/* Determine how many words we need to emit before the 'out32'
* pointer becomes aligned modulo the cache line size.
*/
to_align64 = (-((uintptr_t)out64 >> 3)) &
(CACHE_LINE_SIZE_IN_DOUBLEWORDS - 1);
/* Only bother aligning and using wh64 if there is at least
* one full cache line to process. This check also prevents
* overrunning the end of the buffer with alignment words.
*/
if (to_align64 <= n64 - CACHE_LINE_SIZE_IN_DOUBLEWORDS) {
int lines_left;
/* Align out64 mod the cache line size so we can use wh64. */
n64 -= to_align64;
for (; to_align64 != 0; to_align64--) {
*out64 = v64;
out64++;
}
/* Use unsigned divide to turn this into a right shift. */
lines_left = (unsigned)n64 / CACHE_LINE_SIZE_IN_DOUBLEWORDS;
do {
/* Only wh64 a few lines at a time, so we don't
* exceed the maximum number of victim lines.
*/
int x = ((lines_left < CHIP_MAX_OUTSTANDING_VICTIMS())
? lines_left
: CHIP_MAX_OUTSTANDING_VICTIMS());
uint64_t *wh = out64;
int i = x;
int j;
lines_left -= x;
do {
__insn_wh64(wh);
wh += CACHE_LINE_SIZE_IN_DOUBLEWORDS;
} while (--i);
for (j = x * (CACHE_LINE_SIZE_IN_DOUBLEWORDS / 4);
j != 0; j--) {
*out64++ = v64;
*out64++ = v64;
*out64++ = v64;
*out64++ = v64;
}
} while (lines_left != 0);
/* We processed all full lines above, so only this many
* words remain to be processed.
*/
n64 &= CACHE_LINE_SIZE_IN_DOUBLEWORDS - 1;
}
/* Now handle any leftover values. */
if (n64 != 0) {
do {
*out64 = v64;
out64++;
} while (--n64 != 0);
}
return s;
}
EXPORT_SYMBOL(memset);
| gpl-2.0 |
razaina/android-kernel-samsung-smdk4412 | arch/powerpc/platforms/pseries/hvcserver.c | 9606 | 7926 | /*
* hvcserver.c
* Copyright (C) 2004 Ryan S Arnold, IBM Corporation
*
* PPC64 virtual I/O console server support.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/hvcall.h>
#include <asm/hvcserver.h>
#include <asm/io.h>
#define HVCS_ARCH_VERSION "1.0.0"
MODULE_AUTHOR("Ryan S. Arnold <rsa@us.ibm.com>");
MODULE_DESCRIPTION("IBM hvcs ppc64 API");
MODULE_LICENSE("GPL");
MODULE_VERSION(HVCS_ARCH_VERSION);
/*
* Convert arch specific return codes into relevant errnos. The hvcs
* functions aren't performance sensitive, so this conversion isn't an
* issue.
*/
static int hvcs_convert(long to_convert)
{
switch (to_convert) {
case H_SUCCESS:
return 0;
case H_PARAMETER:
return -EINVAL;
case H_HARDWARE:
return -EIO;
case H_BUSY:
case H_LONG_BUSY_ORDER_1_MSEC:
case H_LONG_BUSY_ORDER_10_MSEC:
case H_LONG_BUSY_ORDER_100_MSEC:
case H_LONG_BUSY_ORDER_1_SEC:
case H_LONG_BUSY_ORDER_10_SEC:
case H_LONG_BUSY_ORDER_100_SEC:
return -EBUSY;
case H_FUNCTION: /* fall through */
default:
return -EPERM;
}
}
/**
* hvcs_free_partner_info - free pi allocated by hvcs_get_partner_info
* @head: list_head pointer for an allocated list of partner info structs to
* free.
*
* This function is used to free the partner info list that was returned by
* calling hvcs_get_partner_info().
*/
int hvcs_free_partner_info(struct list_head *head)
{
struct hvcs_partner_info *pi;
struct list_head *element;
if (!head)
return -EINVAL;
while (!list_empty(head)) {
element = head->next;
pi = list_entry(element, struct hvcs_partner_info, node);
list_del(element);
kfree(pi);
}
return 0;
}
EXPORT_SYMBOL(hvcs_free_partner_info);
/* Helper function for hvcs_get_partner_info */
static int hvcs_next_partner(uint32_t unit_address,
unsigned long last_p_partition_ID,
unsigned long last_p_unit_address, unsigned long *pi_buff)
{
long retval;
retval = plpar_hcall_norets(H_VTERM_PARTNER_INFO, unit_address,
last_p_partition_ID,
last_p_unit_address, virt_to_phys(pi_buff));
return hvcs_convert(retval);
}
/**
* hvcs_get_partner_info - Get all of the partner info for a vty-server adapter
* @unit_address: The unit_address of the vty-server adapter for which this
* function is fetching partner info.
* @head: An initialized list_head pointer to an empty list to use to return the
* list of partner info fetched from the hypervisor to the caller.
* @pi_buff: A page sized buffer pre-allocated prior to calling this function
* that is to be used to be used by firmware as an iterator to keep track
* of the partner info retrieval.
*
* This function returns non-zero on success, or if there is no partner info.
*
* The pi_buff is pre-allocated prior to calling this function because this
* function may be called with a spin_lock held and kmalloc of a page is not
* recommended as GFP_ATOMIC.
*
* The first long of this buffer is used to store a partner unit address. The
* second long is used to store a partner partition ID and starting at
* pi_buff[2] is the 79 character Converged Location Code (diff size than the
* unsigned longs, hence the casting mumbo jumbo you see later).
*
* Invocation of this function should always be followed by an invocation of
* hvcs_free_partner_info() using a pointer to the SAME list head instance
* that was passed as a parameter to this function.
*/
int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
unsigned long *pi_buff)
{
/*
* Dealt with as longs because of the hcall interface even though the
* values are uint32_t.
*/
unsigned long last_p_partition_ID;
unsigned long last_p_unit_address;
struct hvcs_partner_info *next_partner_info = NULL;
int more = 1;
int retval;
memset(pi_buff, 0x00, PAGE_SIZE);
/* invalid parameters */
if (!head || !pi_buff)
return -EINVAL;
last_p_partition_ID = last_p_unit_address = ~0UL;
INIT_LIST_HEAD(head);
do {
retval = hvcs_next_partner(unit_address, last_p_partition_ID,
last_p_unit_address, pi_buff);
if (retval) {
/*
* Don't indicate that we've failed if we have
* any list elements.
*/
if (!list_empty(head))
return 0;
return retval;
}
last_p_partition_ID = pi_buff[0];
last_p_unit_address = pi_buff[1];
/* This indicates that there are no further partners */
if (last_p_partition_ID == ~0UL
&& last_p_unit_address == ~0UL)
break;
/* This is a very small struct and will be freed soon in
* hvcs_free_partner_info(). */
next_partner_info = kmalloc(sizeof(struct hvcs_partner_info),
GFP_ATOMIC);
if (!next_partner_info) {
printk(KERN_WARNING "HVCONSOLE: kmalloc() failed to"
" allocate partner info struct.\n");
hvcs_free_partner_info(head);
return -ENOMEM;
}
next_partner_info->unit_address
= (unsigned int)last_p_unit_address;
next_partner_info->partition_ID
= (unsigned int)last_p_partition_ID;
/* copy the Null-term char too */
strncpy(&next_partner_info->location_code[0],
(char *)&pi_buff[2],
strlen((char *)&pi_buff[2]) + 1);
list_add_tail(&(next_partner_info->node), head);
next_partner_info = NULL;
} while (more);
return 0;
}
EXPORT_SYMBOL(hvcs_get_partner_info);
/**
* hvcs_register_connection - establish a connection between this vty-server and
* a vty.
* @unit_address: The unit address of the vty-server adapter that is to be
* establish a connection.
* @p_partition_ID: The partition ID of the vty adapter that is to be connected.
* @p_unit_address: The unit address of the vty adapter to which the vty-server
* is to be connected.
*
* If this function is called once and -EINVAL is returned it may
* indicate that the partner info needs to be refreshed for the
* target unit address at which point the caller must invoke
* hvcs_get_partner_info() and then call this function again. If,
* for a second time, -EINVAL is returned then it indicates that
* there is probably already a partner connection registered to a
* different vty-server adapter. It is also possible that a second
* -EINVAL may indicate that one of the parms is not valid, for
* instance if the link was removed between the vty-server adapter
* and the vty adapter that you are trying to open. Don't shoot the
* messenger. Firmware implemented it this way.
*/
int hvcs_register_connection( uint32_t unit_address,
uint32_t p_partition_ID, uint32_t p_unit_address)
{
long retval;
retval = plpar_hcall_norets(H_REGISTER_VTERM, unit_address,
p_partition_ID, p_unit_address);
return hvcs_convert(retval);
}
EXPORT_SYMBOL(hvcs_register_connection);
/**
* hvcs_free_connection - free the connection between a vty-server and vty
* @unit_address: The unit address of the vty-server that is to have its
* connection severed.
*
* This function is used to free the partner connection between a vty-server
* adapter and a vty adapter.
*
* If -EBUSY is returned continue to call this function until 0 is returned.
*/
int hvcs_free_connection(uint32_t unit_address)
{
long retval;
retval = plpar_hcall_norets(H_FREE_VTERM, unit_address);
return hvcs_convert(retval);
}
EXPORT_SYMBOL(hvcs_free_connection);
| gpl-2.0 |
piasek1906/Piasek.4.3.JWR | lib/crc-t10dif.c | 12422 | 2965 | /*
* T10 Data Integrity Field CRC16 calculation
*
* Copyright (c) 2007 Oracle Corporation. All rights reserved.
* Written by Martin K. Petersen <martin.petersen@oracle.com>
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc-t10dif.h>
/* Table generated using the following polynomium:
* x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
* gt: 0x8bb7
*/
static const __u16 t10_dif_crc_table[256] = {
0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
};
__u16 crc_t10dif(const unsigned char *buffer, size_t len)
{
__u16 crc = 0;
unsigned int i;
for (i = 0 ; i < len ; i++)
crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
return crc;
}
EXPORT_SYMBOL(crc_t10dif);
MODULE_DESCRIPTION("T10 DIF CRC calculation");
MODULE_LICENSE("GPL");
| gpl-2.0 |
fabiocannizzo/linux | sound/soc/codecs/wm8978.c | 135 | 31793 | // SPDX-License-Identifier: GPL-2.0-only
/*
* wm8978.c -- WM8978 ALSA SoC Audio Codec driver
*
* Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
* Copyright (C) 2007 Carlos Munoz <carlos@kenati.com>
* Copyright 2006-2009 Wolfson Microelectronics PLC.
* Based on wm8974 and wm8990 by Liam Girdwood <lrg@slimlogic.co.uk>
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include <asm/div64.h>
#include "wm8978.h"
static const struct reg_default wm8978_reg_defaults[] = {
{ 1, 0x0000 },
{ 2, 0x0000 },
{ 3, 0x0000 },
{ 4, 0x0050 },
{ 5, 0x0000 },
{ 6, 0x0140 },
{ 7, 0x0000 },
{ 8, 0x0000 },
{ 9, 0x0000 },
{ 10, 0x0000 },
{ 11, 0x00ff },
{ 12, 0x00ff },
{ 13, 0x0000 },
{ 14, 0x0100 },
{ 15, 0x00ff },
{ 16, 0x00ff },
{ 17, 0x0000 },
{ 18, 0x012c },
{ 19, 0x002c },
{ 20, 0x002c },
{ 21, 0x002c },
{ 22, 0x002c },
{ 23, 0x0000 },
{ 24, 0x0032 },
{ 25, 0x0000 },
{ 26, 0x0000 },
{ 27, 0x0000 },
{ 28, 0x0000 },
{ 29, 0x0000 },
{ 30, 0x0000 },
{ 31, 0x0000 },
{ 32, 0x0038 },
{ 33, 0x000b },
{ 34, 0x0032 },
{ 35, 0x0000 },
{ 36, 0x0008 },
{ 37, 0x000c },
{ 38, 0x0093 },
{ 39, 0x00e9 },
{ 40, 0x0000 },
{ 41, 0x0000 },
{ 42, 0x0000 },
{ 43, 0x0000 },
{ 44, 0x0033 },
{ 45, 0x0010 },
{ 46, 0x0010 },
{ 47, 0x0100 },
{ 48, 0x0100 },
{ 49, 0x0002 },
{ 50, 0x0001 },
{ 51, 0x0001 },
{ 52, 0x0039 },
{ 53, 0x0039 },
{ 54, 0x0039 },
{ 55, 0x0039 },
{ 56, 0x0001 },
{ 57, 0x0001 },
};
static bool wm8978_volatile(struct device *dev, unsigned int reg)
{
return reg == WM8978_RESET;
}
/* codec private data */
struct wm8978_priv {
struct regmap *regmap;
unsigned int f_pllout;
unsigned int f_mclk;
unsigned int f_256fs;
unsigned int f_opclk;
int mclk_idx;
enum wm8978_sysclk_src sysclk;
};
static const char *wm8978_companding[] = {"Off", "NC", "u-law", "A-law"};
static const char *wm8978_eqmode[] = {"Capture", "Playback"};
static const char *wm8978_bw[] = {"Narrow", "Wide"};
static const char *wm8978_eq1[] = {"80Hz", "105Hz", "135Hz", "175Hz"};
static const char *wm8978_eq2[] = {"230Hz", "300Hz", "385Hz", "500Hz"};
static const char *wm8978_eq3[] = {"650Hz", "850Hz", "1.1kHz", "1.4kHz"};
static const char *wm8978_eq4[] = {"1.8kHz", "2.4kHz", "3.2kHz", "4.1kHz"};
static const char *wm8978_eq5[] = {"5.3kHz", "6.9kHz", "9kHz", "11.7kHz"};
static const char *wm8978_alc3[] = {"ALC", "Limiter"};
static const char *wm8978_alc1[] = {"Off", "Right", "Left", "Both"};
static SOC_ENUM_SINGLE_DECL(adc_compand, WM8978_COMPANDING_CONTROL, 1,
wm8978_companding);
static SOC_ENUM_SINGLE_DECL(dac_compand, WM8978_COMPANDING_CONTROL, 3,
wm8978_companding);
static SOC_ENUM_SINGLE_DECL(eqmode, WM8978_EQ1, 8, wm8978_eqmode);
static SOC_ENUM_SINGLE_DECL(eq1, WM8978_EQ1, 5, wm8978_eq1);
static SOC_ENUM_SINGLE_DECL(eq2bw, WM8978_EQ2, 8, wm8978_bw);
static SOC_ENUM_SINGLE_DECL(eq2, WM8978_EQ2, 5, wm8978_eq2);
static SOC_ENUM_SINGLE_DECL(eq3bw, WM8978_EQ3, 8, wm8978_bw);
static SOC_ENUM_SINGLE_DECL(eq3, WM8978_EQ3, 5, wm8978_eq3);
static SOC_ENUM_SINGLE_DECL(eq4bw, WM8978_EQ4, 8, wm8978_bw);
static SOC_ENUM_SINGLE_DECL(eq4, WM8978_EQ4, 5, wm8978_eq4);
static SOC_ENUM_SINGLE_DECL(eq5, WM8978_EQ5, 5, wm8978_eq5);
static SOC_ENUM_SINGLE_DECL(alc3, WM8978_ALC_CONTROL_3, 8, wm8978_alc3);
static SOC_ENUM_SINGLE_DECL(alc1, WM8978_ALC_CONTROL_1, 7, wm8978_alc1);
static const DECLARE_TLV_DB_SCALE(digital_tlv, -12750, 50, 1);
static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1200, 75, 0);
static const DECLARE_TLV_DB_SCALE(spk_tlv, -5700, 100, 0);
static const DECLARE_TLV_DB_SCALE(boost_tlv, -1500, 300, 1);
static const DECLARE_TLV_DB_SCALE(limiter_tlv, 0, 100, 0);
static const struct snd_kcontrol_new wm8978_snd_controls[] = {
SOC_SINGLE("Digital Loopback Switch",
WM8978_COMPANDING_CONTROL, 0, 1, 0),
SOC_ENUM("ADC Companding", adc_compand),
SOC_ENUM("DAC Companding", dac_compand),
SOC_DOUBLE("DAC Inversion Switch", WM8978_DAC_CONTROL, 0, 1, 1, 0),
SOC_DOUBLE_R_TLV("PCM Volume",
WM8978_LEFT_DAC_DIGITAL_VOLUME, WM8978_RIGHT_DAC_DIGITAL_VOLUME,
0, 255, 0, digital_tlv),
SOC_SINGLE("High Pass Filter Switch", WM8978_ADC_CONTROL, 8, 1, 0),
SOC_SINGLE("High Pass Cut Off", WM8978_ADC_CONTROL, 4, 7, 0),
SOC_DOUBLE("ADC Inversion Switch", WM8978_ADC_CONTROL, 0, 1, 1, 0),
SOC_DOUBLE_R_TLV("ADC Volume",
WM8978_LEFT_ADC_DIGITAL_VOLUME, WM8978_RIGHT_ADC_DIGITAL_VOLUME,
0, 255, 0, digital_tlv),
SOC_ENUM("Equaliser Function", eqmode),
SOC_ENUM("EQ1 Cut Off", eq1),
SOC_SINGLE_TLV("EQ1 Volume", WM8978_EQ1, 0, 24, 1, eq_tlv),
SOC_ENUM("Equaliser EQ2 Bandwidth", eq2bw),
SOC_ENUM("EQ2 Cut Off", eq2),
SOC_SINGLE_TLV("EQ2 Volume", WM8978_EQ2, 0, 24, 1, eq_tlv),
SOC_ENUM("Equaliser EQ3 Bandwidth", eq3bw),
SOC_ENUM("EQ3 Cut Off", eq3),
SOC_SINGLE_TLV("EQ3 Volume", WM8978_EQ3, 0, 24, 1, eq_tlv),
SOC_ENUM("Equaliser EQ4 Bandwidth", eq4bw),
SOC_ENUM("EQ4 Cut Off", eq4),
SOC_SINGLE_TLV("EQ4 Volume", WM8978_EQ4, 0, 24, 1, eq_tlv),
SOC_ENUM("EQ5 Cut Off", eq5),
SOC_SINGLE_TLV("EQ5 Volume", WM8978_EQ5, 0, 24, 1, eq_tlv),
SOC_SINGLE("DAC Playback Limiter Switch",
WM8978_DAC_LIMITER_1, 8, 1, 0),
SOC_SINGLE("DAC Playback Limiter Decay",
WM8978_DAC_LIMITER_1, 4, 15, 0),
SOC_SINGLE("DAC Playback Limiter Attack",
WM8978_DAC_LIMITER_1, 0, 15, 0),
SOC_SINGLE("DAC Playback Limiter Threshold",
WM8978_DAC_LIMITER_2, 4, 7, 0),
SOC_SINGLE_TLV("DAC Playback Limiter Volume",
WM8978_DAC_LIMITER_2, 0, 12, 0, limiter_tlv),
SOC_ENUM("ALC Enable Switch", alc1),
SOC_SINGLE("ALC Capture Min Gain", WM8978_ALC_CONTROL_1, 0, 7, 0),
SOC_SINGLE("ALC Capture Max Gain", WM8978_ALC_CONTROL_1, 3, 7, 0),
SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 10, 0),
SOC_SINGLE("ALC Capture Target", WM8978_ALC_CONTROL_2, 0, 15, 0),
SOC_ENUM("ALC Capture Mode", alc3),
SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 10, 0),
SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 10, 0),
SOC_SINGLE("ALC Capture Noise Gate Switch", WM8978_NOISE_GATE, 3, 1, 0),
SOC_SINGLE("ALC Capture Noise Gate Threshold",
WM8978_NOISE_GATE, 0, 7, 0),
SOC_DOUBLE_R("Capture PGA ZC Switch",
WM8978_LEFT_INP_PGA_CONTROL, WM8978_RIGHT_INP_PGA_CONTROL,
7, 1, 0),
/* OUT1 - Headphones */
SOC_DOUBLE_R("Headphone Playback ZC Switch",
WM8978_LOUT1_HP_CONTROL, WM8978_ROUT1_HP_CONTROL, 7, 1, 0),
SOC_DOUBLE_R_TLV("Headphone Playback Volume",
WM8978_LOUT1_HP_CONTROL, WM8978_ROUT1_HP_CONTROL,
0, 63, 0, spk_tlv),
/* OUT2 - Speakers */
SOC_DOUBLE_R("Speaker Playback ZC Switch",
WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL, 7, 1, 0),
SOC_DOUBLE_R_TLV("Speaker Playback Volume",
WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL,
0, 63, 0, spk_tlv),
/* OUT3/4 - Line Output */
SOC_DOUBLE_R("Line Playback Switch",
WM8978_OUT3_MIXER_CONTROL, WM8978_OUT4_MIXER_CONTROL, 6, 1, 1),
/* Mixer #3: Boost (Input) mixer */
SOC_DOUBLE_R("PGA Boost (+20dB)",
WM8978_LEFT_ADC_BOOST_CONTROL, WM8978_RIGHT_ADC_BOOST_CONTROL,
8, 1, 0),
SOC_DOUBLE_R_TLV("L2/R2 Boost Volume",
WM8978_LEFT_ADC_BOOST_CONTROL, WM8978_RIGHT_ADC_BOOST_CONTROL,
4, 7, 0, boost_tlv),
SOC_DOUBLE_R_TLV("Aux Boost Volume",
WM8978_LEFT_ADC_BOOST_CONTROL, WM8978_RIGHT_ADC_BOOST_CONTROL,
0, 7, 0, boost_tlv),
/* Input PGA volume */
SOC_DOUBLE_R_TLV("Input PGA Volume",
WM8978_LEFT_INP_PGA_CONTROL, WM8978_RIGHT_INP_PGA_CONTROL,
0, 63, 0, inpga_tlv),
/* Headphone */
SOC_DOUBLE_R("Headphone Switch",
WM8978_LOUT1_HP_CONTROL, WM8978_ROUT1_HP_CONTROL, 6, 1, 1),
/* Speaker */
SOC_DOUBLE_R("Speaker Switch",
WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL, 6, 1, 1),
/* DAC / ADC oversampling */
SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL,
5, 1, 0),
SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL,
5, 1, 0),
};
/* Mixer #1: Output (OUT1, OUT2) Mixer: mix AUX, Input mixer output and DAC */
static const struct snd_kcontrol_new wm8978_left_out_mixer[] = {
SOC_DAPM_SINGLE("Line Bypass Switch", WM8978_LEFT_MIXER_CONTROL, 1, 1, 0),
SOC_DAPM_SINGLE("Aux Playback Switch", WM8978_LEFT_MIXER_CONTROL, 5, 1, 0),
SOC_DAPM_SINGLE("PCM Playback Switch", WM8978_LEFT_MIXER_CONTROL, 0, 1, 0),
};
static const struct snd_kcontrol_new wm8978_right_out_mixer[] = {
SOC_DAPM_SINGLE("Line Bypass Switch", WM8978_RIGHT_MIXER_CONTROL, 1, 1, 0),
SOC_DAPM_SINGLE("Aux Playback Switch", WM8978_RIGHT_MIXER_CONTROL, 5, 1, 0),
SOC_DAPM_SINGLE("PCM Playback Switch", WM8978_RIGHT_MIXER_CONTROL, 0, 1, 0),
};
/* OUT3/OUT4 Mixer not implemented */
/* Mixer #2: Input PGA Mute */
static const struct snd_kcontrol_new wm8978_left_input_mixer[] = {
SOC_DAPM_SINGLE("L2 Switch", WM8978_INPUT_CONTROL, 2, 1, 0),
SOC_DAPM_SINGLE("MicN Switch", WM8978_INPUT_CONTROL, 1, 1, 0),
SOC_DAPM_SINGLE("MicP Switch", WM8978_INPUT_CONTROL, 0, 1, 0),
};
static const struct snd_kcontrol_new wm8978_right_input_mixer[] = {
SOC_DAPM_SINGLE("R2 Switch", WM8978_INPUT_CONTROL, 6, 1, 0),
SOC_DAPM_SINGLE("MicN Switch", WM8978_INPUT_CONTROL, 5, 1, 0),
SOC_DAPM_SINGLE("MicP Switch", WM8978_INPUT_CONTROL, 4, 1, 0),
};
static const struct snd_soc_dapm_widget wm8978_dapm_widgets[] = {
SND_SOC_DAPM_DAC("Left DAC", "Left HiFi Playback",
WM8978_POWER_MANAGEMENT_3, 0, 0),
SND_SOC_DAPM_DAC("Right DAC", "Right HiFi Playback",
WM8978_POWER_MANAGEMENT_3, 1, 0),
SND_SOC_DAPM_ADC("Left ADC", "Left HiFi Capture",
WM8978_POWER_MANAGEMENT_2, 0, 0),
SND_SOC_DAPM_ADC("Right ADC", "Right HiFi Capture",
WM8978_POWER_MANAGEMENT_2, 1, 0),
/* Mixer #1: OUT1,2 */
SOC_MIXER_ARRAY("Left Output Mixer", WM8978_POWER_MANAGEMENT_3,
2, 0, wm8978_left_out_mixer),
SOC_MIXER_ARRAY("Right Output Mixer", WM8978_POWER_MANAGEMENT_3,
3, 0, wm8978_right_out_mixer),
SOC_MIXER_ARRAY("Left Input Mixer", WM8978_POWER_MANAGEMENT_2,
2, 0, wm8978_left_input_mixer),
SOC_MIXER_ARRAY("Right Input Mixer", WM8978_POWER_MANAGEMENT_2,
3, 0, wm8978_right_input_mixer),
SND_SOC_DAPM_PGA("Left Boost Mixer", WM8978_POWER_MANAGEMENT_2,
4, 0, NULL, 0),
SND_SOC_DAPM_PGA("Right Boost Mixer", WM8978_POWER_MANAGEMENT_2,
5, 0, NULL, 0),
SND_SOC_DAPM_PGA("Left Capture PGA", WM8978_LEFT_INP_PGA_CONTROL,
6, 1, NULL, 0),
SND_SOC_DAPM_PGA("Right Capture PGA", WM8978_RIGHT_INP_PGA_CONTROL,
6, 1, NULL, 0),
SND_SOC_DAPM_PGA("Left Headphone Out", WM8978_POWER_MANAGEMENT_2,
7, 0, NULL, 0),
SND_SOC_DAPM_PGA("Right Headphone Out", WM8978_POWER_MANAGEMENT_2,
8, 0, NULL, 0),
SND_SOC_DAPM_PGA("Left Speaker Out", WM8978_POWER_MANAGEMENT_3,
6, 0, NULL, 0),
SND_SOC_DAPM_PGA("Right Speaker Out", WM8978_POWER_MANAGEMENT_3,
5, 0, NULL, 0),
SND_SOC_DAPM_MIXER("OUT4 VMID", WM8978_POWER_MANAGEMENT_3,
8, 0, NULL, 0),
SND_SOC_DAPM_MICBIAS("Mic Bias", WM8978_POWER_MANAGEMENT_1, 4, 0),
SND_SOC_DAPM_INPUT("LMICN"),
SND_SOC_DAPM_INPUT("LMICP"),
SND_SOC_DAPM_INPUT("RMICN"),
SND_SOC_DAPM_INPUT("RMICP"),
SND_SOC_DAPM_INPUT("LAUX"),
SND_SOC_DAPM_INPUT("RAUX"),
SND_SOC_DAPM_INPUT("L2"),
SND_SOC_DAPM_INPUT("R2"),
SND_SOC_DAPM_OUTPUT("LHP"),
SND_SOC_DAPM_OUTPUT("RHP"),
SND_SOC_DAPM_OUTPUT("LSPK"),
SND_SOC_DAPM_OUTPUT("RSPK"),
};
static const struct snd_soc_dapm_route wm8978_dapm_routes[] = {
/* Output mixer */
{"Right Output Mixer", "PCM Playback Switch", "Right DAC"},
{"Right Output Mixer", "Aux Playback Switch", "RAUX"},
{"Right Output Mixer", "Line Bypass Switch", "Right Boost Mixer"},
{"Left Output Mixer", "PCM Playback Switch", "Left DAC"},
{"Left Output Mixer", "Aux Playback Switch", "LAUX"},
{"Left Output Mixer", "Line Bypass Switch", "Left Boost Mixer"},
/* Outputs */
{"Right Headphone Out", NULL, "Right Output Mixer"},
{"RHP", NULL, "Right Headphone Out"},
{"Left Headphone Out", NULL, "Left Output Mixer"},
{"LHP", NULL, "Left Headphone Out"},
{"Right Speaker Out", NULL, "Right Output Mixer"},
{"RSPK", NULL, "Right Speaker Out"},
{"Left Speaker Out", NULL, "Left Output Mixer"},
{"LSPK", NULL, "Left Speaker Out"},
/* Boost Mixer */
{"Right ADC", NULL, "Right Boost Mixer"},
{"Right Boost Mixer", NULL, "RAUX"},
{"Right Boost Mixer", NULL, "Right Capture PGA"},
{"Right Boost Mixer", NULL, "R2"},
{"Left ADC", NULL, "Left Boost Mixer"},
{"Left Boost Mixer", NULL, "LAUX"},
{"Left Boost Mixer", NULL, "Left Capture PGA"},
{"Left Boost Mixer", NULL, "L2"},
/* Input PGA */
{"Right Capture PGA", NULL, "Right Input Mixer"},
{"Left Capture PGA", NULL, "Left Input Mixer"},
{"Right Input Mixer", "R2 Switch", "R2"},
{"Right Input Mixer", "MicN Switch", "RMICN"},
{"Right Input Mixer", "MicP Switch", "RMICP"},
{"Left Input Mixer", "L2 Switch", "L2"},
{"Left Input Mixer", "MicN Switch", "LMICN"},
{"Left Input Mixer", "MicP Switch", "LMICP"},
};
/* PLL divisors */
struct wm8978_pll_div {
u32 k;
u8 n;
u8 div2;
};
#define FIXED_PLL_SIZE (1 << 24)
static void pll_factors(struct snd_soc_component *component,
struct wm8978_pll_div *pll_div, unsigned int target, unsigned int source)
{
u64 k_part;
unsigned int k, n_div, n_mod;
n_div = target / source;
if (n_div < 6) {
source >>= 1;
pll_div->div2 = 1;
n_div = target / source;
} else {
pll_div->div2 = 0;
}
if (n_div < 6 || n_div > 12)
dev_warn(component->dev,
"WM8978 N value exceeds recommended range! N = %u\n",
n_div);
pll_div->n = n_div;
n_mod = target - source * n_div;
k_part = FIXED_PLL_SIZE * (long long)n_mod + source / 2;
do_div(k_part, source);
k = k_part & 0xFFFFFFFF;
pll_div->k = k;
}
/* MCLK dividers */
static const int mclk_numerator[] = {1, 3, 2, 3, 4, 6, 8, 12};
static const int mclk_denominator[] = {1, 2, 1, 1, 1, 1, 1, 1};
/*
* find index >= idx, such that, for a given f_out,
* 3 * f_mclk / 4 <= f_PLLOUT < 13 * f_mclk / 4
* f_out can be f_256fs or f_opclk, currently only used for f_256fs. Can be
* generalised for f_opclk with suitable coefficient arrays, but currently
* the OPCLK divisor is calculated directly, not iteratively.
*/
static int wm8978_enum_mclk(unsigned int f_out, unsigned int f_mclk,
unsigned int *f_pllout)
{
int i;
for (i = 0; i < ARRAY_SIZE(mclk_numerator); i++) {
unsigned int f_pllout_x4 = 4 * f_out * mclk_numerator[i] /
mclk_denominator[i];
if (3 * f_mclk <= f_pllout_x4 && f_pllout_x4 < 13 * f_mclk) {
*f_pllout = f_pllout_x4 / 4;
return i;
}
}
return -EINVAL;
}
/*
* Calculate internal frequencies and dividers, according to Figure 40
* "PLL and Clock Select Circuit" in WM8978 datasheet Rev. 2.6
*/
static int wm8978_configure_pll(struct snd_soc_component *component)
{
struct wm8978_priv *wm8978 = snd_soc_component_get_drvdata(component);
struct wm8978_pll_div pll_div;
unsigned int f_opclk = wm8978->f_opclk, f_mclk = wm8978->f_mclk,
f_256fs = wm8978->f_256fs;
unsigned int f2;
if (!f_mclk)
return -EINVAL;
if (f_opclk) {
unsigned int opclk_div;
/* Cannot set up MCLK divider now, do later */
wm8978->mclk_idx = -1;
/*
* The user needs OPCLK. Choose OPCLKDIV to put
* 6 <= R = f2 / f1 < 13, 1 <= OPCLKDIV <= 4.
* f_opclk = f_mclk * prescale * R / 4 / OPCLKDIV, where
* prescale = 1, or prescale = 2. Prescale is calculated inside
* pll_factors(). We have to select f_PLLOUT, such that
* f_mclk * 3 / 4 <= f_PLLOUT < f_mclk * 13 / 4. Must be
* f_mclk * 3 / 16 <= f_opclk < f_mclk * 13 / 4.
*/
if (16 * f_opclk < 3 * f_mclk || 4 * f_opclk >= 13 * f_mclk)
return -EINVAL;
if (4 * f_opclk < 3 * f_mclk)
/* Have to use OPCLKDIV */
opclk_div = (3 * f_mclk / 4 + f_opclk - 1) / f_opclk;
else
opclk_div = 1;
dev_dbg(component->dev, "%s: OPCLKDIV=%d\n", __func__, opclk_div);
snd_soc_component_update_bits(component, WM8978_GPIO_CONTROL, 0x30,
(opclk_div - 1) << 4);
wm8978->f_pllout = f_opclk * opclk_div;
} else if (f_256fs) {
/*
* Not using OPCLK, but PLL is used for the codec, choose R:
* 6 <= R = f2 / f1 < 13, to put 1 <= MCLKDIV <= 12.
* f_256fs = f_mclk * prescale * R / 4 / MCLKDIV, where
* prescale = 1, or prescale = 2. Prescale is calculated inside
* pll_factors(). We have to select f_PLLOUT, such that
* f_mclk * 3 / 4 <= f_PLLOUT < f_mclk * 13 / 4. Must be
* f_mclk * 3 / 48 <= f_256fs < f_mclk * 13 / 4. This means MCLK
* must be 3.781MHz <= f_MCLK <= 32.768MHz
*/
int idx = wm8978_enum_mclk(f_256fs, f_mclk, &wm8978->f_pllout);
if (idx < 0)
return idx;
wm8978->mclk_idx = idx;
} else {
return -EINVAL;
}
f2 = wm8978->f_pllout * 4;
dev_dbg(component->dev, "%s: f_MCLK=%uHz, f_PLLOUT=%uHz\n", __func__,
wm8978->f_mclk, wm8978->f_pllout);
pll_factors(component, &pll_div, f2, wm8978->f_mclk);
dev_dbg(component->dev, "%s: calculated PLL N=0x%x, K=0x%x, div2=%d\n",
__func__, pll_div.n, pll_div.k, pll_div.div2);
/* Turn PLL off for configuration... */
snd_soc_component_update_bits(component, WM8978_POWER_MANAGEMENT_1, 0x20, 0);
snd_soc_component_write(component, WM8978_PLL_N, (pll_div.div2 << 4) | pll_div.n);
snd_soc_component_write(component, WM8978_PLL_K1, pll_div.k >> 18);
snd_soc_component_write(component, WM8978_PLL_K2, (pll_div.k >> 9) & 0x1ff);
snd_soc_component_write(component, WM8978_PLL_K3, pll_div.k & 0x1ff);
/* ...and on again */
snd_soc_component_update_bits(component, WM8978_POWER_MANAGEMENT_1, 0x20, 0x20);
if (f_opclk)
/* Output PLL (OPCLK) to GPIO1 */
snd_soc_component_update_bits(component, WM8978_GPIO_CONTROL, 7, 4);
return 0;
}
/*
* Configure WM8978 clock dividers.
*/
static int wm8978_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
int div_id, int div)
{
struct snd_soc_component *component = codec_dai->component;
struct wm8978_priv *wm8978 = snd_soc_component_get_drvdata(component);
int ret = 0;
switch (div_id) {
case WM8978_OPCLKRATE:
wm8978->f_opclk = div;
if (wm8978->f_mclk)
/*
* We know the MCLK frequency, the user has requested
* OPCLK, configure the PLL based on that and start it
* and OPCLK immediately. We will configure PLL to match
* user-requested OPCLK frquency as good as possible.
* In fact, it is likely, that matching the sampling
* rate, when it becomes known, is more important, and
* we will not be reconfiguring PLL then, because we
* must not interrupt OPCLK. But it should be fine,
* because typically the user will request OPCLK to run
* at 256fs or 512fs, and for these cases we will also
* find an exact MCLK divider configuration - it will
* be equal to or double the OPCLK divisor.
*/
ret = wm8978_configure_pll(component);
break;
case WM8978_BCLKDIV:
if (div & ~0x1c)
return -EINVAL;
snd_soc_component_update_bits(component, WM8978_CLOCKING, 0x1c, div);
break;
default:
return -EINVAL;
}
dev_dbg(component->dev, "%s: ID %d, value %u\n", __func__, div_id, div);
return ret;
}
/*
* @freq: when .set_pll() us not used, freq is codec MCLK input frequency
*/
static int wm8978_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id,
unsigned int freq, int dir)
{
struct snd_soc_component *component = codec_dai->component;
struct wm8978_priv *wm8978 = snd_soc_component_get_drvdata(component);
int ret = 0;
dev_dbg(component->dev, "%s: ID %d, freq %u\n", __func__, clk_id, freq);
if (freq) {
wm8978->f_mclk = freq;
/* Even if MCLK is used for system clock, might have to drive OPCLK */
if (wm8978->f_opclk)
ret = wm8978_configure_pll(component);
/* Our sysclk is fixed to 256 * fs, will configure in .hw_params() */
if (!ret)
wm8978->sysclk = clk_id;
}
if (wm8978->sysclk == WM8978_PLL && (!freq || clk_id == WM8978_MCLK)) {
/* Clock CODEC directly from MCLK */
snd_soc_component_update_bits(component, WM8978_CLOCKING, 0x100, 0);
/* GPIO1 into default mode as input - before configuring PLL */
snd_soc_component_update_bits(component, WM8978_GPIO_CONTROL, 7, 0);
/* Turn off PLL */
snd_soc_component_update_bits(component, WM8978_POWER_MANAGEMENT_1, 0x20, 0);
wm8978->sysclk = WM8978_MCLK;
wm8978->f_pllout = 0;
wm8978->f_opclk = 0;
}
return ret;
}
/*
* Set ADC and Voice DAC format.
*/
static int wm8978_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
{
struct snd_soc_component *component = codec_dai->component;
/*
* BCLK polarity mask = 0x100, LRC clock polarity mask = 0x80,
* Data Format mask = 0x18: all will be calculated anew
*/
u16 iface = snd_soc_component_read(component, WM8978_AUDIO_INTERFACE) & ~0x198;
u16 clk = snd_soc_component_read(component, WM8978_CLOCKING);
dev_dbg(component->dev, "%s\n", __func__);
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
clk |= 1;
break;
case SND_SOC_DAIFMT_CBS_CFS:
clk &= ~1;
break;
default:
return -EINVAL;
}
/* interface format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
iface |= 0x10;
break;
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
iface |= 0x8;
break;
case SND_SOC_DAIFMT_DSP_A:
iface |= 0x18;
break;
default:
return -EINVAL;
}
/* clock inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
iface |= 0x180;
break;
case SND_SOC_DAIFMT_IB_NF:
iface |= 0x100;
break;
case SND_SOC_DAIFMT_NB_IF:
iface |= 0x80;
break;
default:
return -EINVAL;
}
snd_soc_component_write(component, WM8978_AUDIO_INTERFACE, iface);
snd_soc_component_write(component, WM8978_CLOCKING, clk);
return 0;
}
/*
* Set PCM DAI bit size and sample rate.
*/
static int wm8978_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_component *component = dai->component;
struct wm8978_priv *wm8978 = snd_soc_component_get_drvdata(component);
/* Word length mask = 0x60 */
u16 iface_ctl = snd_soc_component_read(component, WM8978_AUDIO_INTERFACE) & ~0x60;
/* Sampling rate mask = 0xe (for filters) */
u16 add_ctl = snd_soc_component_read(component, WM8978_ADDITIONAL_CONTROL) & ~0xe;
u16 clking = snd_soc_component_read(component, WM8978_CLOCKING);
enum wm8978_sysclk_src current_clk_id = (clking & 0x100) ?
WM8978_PLL : WM8978_MCLK;
unsigned int f_sel, diff, diff_best = INT_MAX;
int i, best = 0;
if (!wm8978->f_mclk)
return -EINVAL;
/* bit size */
switch (params_width(params)) {
case 16:
break;
case 20:
iface_ctl |= 0x20;
break;
case 24:
iface_ctl |= 0x40;
break;
case 32:
iface_ctl |= 0x60;
break;
}
/* filter coefficient */
switch (params_rate(params)) {
case 8000:
add_ctl |= 0x5 << 1;
break;
case 11025:
add_ctl |= 0x4 << 1;
break;
case 16000:
add_ctl |= 0x3 << 1;
break;
case 22050:
add_ctl |= 0x2 << 1;
break;
case 32000:
add_ctl |= 0x1 << 1;
break;
case 44100:
case 48000:
break;
}
/* Sampling rate is known now, can configure the MCLK divider */
wm8978->f_256fs = params_rate(params) * 256;
if (wm8978->sysclk == WM8978_MCLK) {
wm8978->mclk_idx = -1;
f_sel = wm8978->f_mclk;
} else {
if (!wm8978->f_opclk) {
/* We only enter here, if OPCLK is not used */
int ret = wm8978_configure_pll(component);
if (ret < 0)
return ret;
}
f_sel = wm8978->f_pllout;
}
if (wm8978->mclk_idx < 0) {
/* Either MCLK is used directly, or OPCLK is used */
if (f_sel < wm8978->f_256fs || f_sel > 12 * wm8978->f_256fs)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(mclk_numerator); i++) {
diff = abs(wm8978->f_256fs * 3 -
f_sel * 3 * mclk_denominator[i] / mclk_numerator[i]);
if (diff < diff_best) {
diff_best = diff;
best = i;
}
if (!diff)
break;
}
} else {
/* OPCLK not used, codec driven by PLL */
best = wm8978->mclk_idx;
diff = 0;
}
if (diff)
dev_warn(component->dev, "Imprecise sampling rate: %uHz%s\n",
f_sel * mclk_denominator[best] / mclk_numerator[best] / 256,
wm8978->sysclk == WM8978_MCLK ?
", consider using PLL" : "");
dev_dbg(component->dev, "%s: width %d, rate %u, MCLK divisor #%d\n", __func__,
params_width(params), params_rate(params), best);
/* MCLK divisor mask = 0xe0 */
snd_soc_component_update_bits(component, WM8978_CLOCKING, 0xe0, best << 5);
snd_soc_component_write(component, WM8978_AUDIO_INTERFACE, iface_ctl);
snd_soc_component_write(component, WM8978_ADDITIONAL_CONTROL, add_ctl);
if (wm8978->sysclk != current_clk_id) {
if (wm8978->sysclk == WM8978_PLL)
/* Run CODEC from PLL instead of MCLK */
snd_soc_component_update_bits(component, WM8978_CLOCKING,
0x100, 0x100);
else
/* Clock CODEC directly from MCLK */
snd_soc_component_update_bits(component, WM8978_CLOCKING, 0x100, 0);
}
return 0;
}
static int wm8978_mute(struct snd_soc_dai *dai, int mute, int direction)
{
struct snd_soc_component *component = dai->component;
dev_dbg(component->dev, "%s: %d\n", __func__, mute);
if (mute)
snd_soc_component_update_bits(component, WM8978_DAC_CONTROL, 0x40, 0x40);
else
snd_soc_component_update_bits(component, WM8978_DAC_CONTROL, 0x40, 0);
return 0;
}
static int wm8978_set_bias_level(struct snd_soc_component *component,
enum snd_soc_bias_level level)
{
u16 power1 = snd_soc_component_read(component, WM8978_POWER_MANAGEMENT_1) & ~3;
switch (level) {
case SND_SOC_BIAS_ON:
case SND_SOC_BIAS_PREPARE:
power1 |= 1; /* VMID 75k */
snd_soc_component_write(component, WM8978_POWER_MANAGEMENT_1, power1);
break;
case SND_SOC_BIAS_STANDBY:
/* bit 3: enable bias, bit 2: enable I/O tie off buffer */
power1 |= 0xc;
if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) {
/* Initial cap charge at VMID 5k */
snd_soc_component_write(component, WM8978_POWER_MANAGEMENT_1,
power1 | 0x3);
mdelay(100);
}
power1 |= 0x2; /* VMID 500k */
snd_soc_component_write(component, WM8978_POWER_MANAGEMENT_1, power1);
break;
case SND_SOC_BIAS_OFF:
/* Preserve PLL - OPCLK may be used by someone */
snd_soc_component_update_bits(component, WM8978_POWER_MANAGEMENT_1, ~0x20, 0);
snd_soc_component_write(component, WM8978_POWER_MANAGEMENT_2, 0);
snd_soc_component_write(component, WM8978_POWER_MANAGEMENT_3, 0);
break;
}
dev_dbg(component->dev, "%s: %d, %x\n", __func__, level, power1);
return 0;
}
#define WM8978_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops wm8978_dai_ops = {
.hw_params = wm8978_hw_params,
.mute_stream = wm8978_mute,
.set_fmt = wm8978_set_dai_fmt,
.set_clkdiv = wm8978_set_dai_clkdiv,
.set_sysclk = wm8978_set_dai_sysclk,
.no_capture_mute = 1,
};
/* Also supports 12kHz */
static struct snd_soc_dai_driver wm8978_dai = {
.name = "wm8978-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = WM8978_FORMATS,
},
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = WM8978_FORMATS,
},
.ops = &wm8978_dai_ops,
.symmetric_rate = 1,
};
static int wm8978_suspend(struct snd_soc_component *component)
{
struct wm8978_priv *wm8978 = snd_soc_component_get_drvdata(component);
snd_soc_component_force_bias_level(component, SND_SOC_BIAS_OFF);
/* Also switch PLL off */
snd_soc_component_write(component, WM8978_POWER_MANAGEMENT_1, 0);
regcache_mark_dirty(wm8978->regmap);
return 0;
}
static int wm8978_resume(struct snd_soc_component *component)
{
struct wm8978_priv *wm8978 = snd_soc_component_get_drvdata(component);
/* Sync reg_cache with the hardware */
regcache_sync(wm8978->regmap);
snd_soc_component_force_bias_level(component, SND_SOC_BIAS_STANDBY);
if (wm8978->f_pllout)
/* Switch PLL on */
snd_soc_component_update_bits(component, WM8978_POWER_MANAGEMENT_1, 0x20, 0x20);
return 0;
}
/*
* These registers contain an "update" bit - bit 8. This means, for example,
* that one can write new DAC digital volume for both channels, but only when
* the update bit is set, will also the volume be updated - simultaneously for
* both channels.
*/
static const int update_reg[] = {
WM8978_LEFT_DAC_DIGITAL_VOLUME,
WM8978_RIGHT_DAC_DIGITAL_VOLUME,
WM8978_LEFT_ADC_DIGITAL_VOLUME,
WM8978_RIGHT_ADC_DIGITAL_VOLUME,
WM8978_LEFT_INP_PGA_CONTROL,
WM8978_RIGHT_INP_PGA_CONTROL,
WM8978_LOUT1_HP_CONTROL,
WM8978_ROUT1_HP_CONTROL,
WM8978_LOUT2_SPK_CONTROL,
WM8978_ROUT2_SPK_CONTROL,
};
static int wm8978_probe(struct snd_soc_component *component)
{
struct wm8978_priv *wm8978 = snd_soc_component_get_drvdata(component);
int i;
/*
* Set default system clock to PLL, it is more precise, this is also the
* default hardware setting
*/
wm8978->sysclk = WM8978_PLL;
/*
* Set the update bit in all registers, that have one. This way all
* writes to those registers will also cause the update bit to be
* written.
*/
for (i = 0; i < ARRAY_SIZE(update_reg); i++)
snd_soc_component_update_bits(component, update_reg[i], 0x100, 0x100);
return 0;
}
static const struct snd_soc_component_driver soc_component_dev_wm8978 = {
.probe = wm8978_probe,
.suspend = wm8978_suspend,
.resume = wm8978_resume,
.set_bias_level = wm8978_set_bias_level,
.controls = wm8978_snd_controls,
.num_controls = ARRAY_SIZE(wm8978_snd_controls),
.dapm_widgets = wm8978_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8978_dapm_widgets),
.dapm_routes = wm8978_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(wm8978_dapm_routes),
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
.non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8978_regmap_config = {
.reg_bits = 7,
.val_bits = 9,
.max_register = WM8978_MAX_REGISTER,
.volatile_reg = wm8978_volatile,
.cache_type = REGCACHE_RBTREE,
.reg_defaults = wm8978_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8978_reg_defaults),
};
static int wm8978_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8978_priv *wm8978;
int ret;
wm8978 = devm_kzalloc(&i2c->dev, sizeof(struct wm8978_priv),
GFP_KERNEL);
if (wm8978 == NULL)
return -ENOMEM;
wm8978->regmap = devm_regmap_init_i2c(i2c, &wm8978_regmap_config);
if (IS_ERR(wm8978->regmap)) {
ret = PTR_ERR(wm8978->regmap);
dev_err(&i2c->dev, "Failed to allocate regmap: %d\n", ret);
return ret;
}
i2c_set_clientdata(i2c, wm8978);
/* Reset the codec */
ret = regmap_write(wm8978->regmap, WM8978_RESET, 0);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to issue reset: %d\n", ret);
return ret;
}
ret = devm_snd_soc_register_component(&i2c->dev,
&soc_component_dev_wm8978, &wm8978_dai, 1);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
return ret;
}
return 0;
}
static const struct i2c_device_id wm8978_i2c_id[] = {
{ "wm8978", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8978_i2c_id);
static const struct of_device_id wm8978_of_match[] = {
{ .compatible = "wlf,wm8978", },
{ }
};
MODULE_DEVICE_TABLE(of, wm8978_of_match);
static struct i2c_driver wm8978_i2c_driver = {
.driver = {
.name = "wm8978",
.of_match_table = wm8978_of_match,
},
.probe = wm8978_i2c_probe,
.id_table = wm8978_i2c_id,
};
module_i2c_driver(wm8978_i2c_driver);
MODULE_DESCRIPTION("ASoC WM8978 codec driver");
MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Hardslog/GrimTF300 | arch/sh/kernel/cpu/sh4a/clock-sh7780.c | 647 | 2831 | /*
* arch/sh/kernel/cpu/sh4a/clock-sh7780.c
*
* SH7780 support for the clock framework
*
* Copyright (C) 2005 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static int ifc_divisors[] = { 2, 4 };
static int bfc_divisors[] = { 1, 1, 1, 8, 12, 16, 24, 1 };
static int pfc_divisors[] = { 1, 24, 24, 1 };
static int cfc_divisors[] = { 1, 1, 4, 1, 6, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
clk->rate *= pfc_divisors[__raw_readl(FRQCR) & 0x0003];
}
static struct clk_ops sh7780_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = (__raw_readl(FRQCR) & 0x0003);
return clk->parent->rate / pfc_divisors[idx];
}
static struct clk_ops sh7780_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = ((__raw_readl(FRQCR) >> 16) & 0x0007);
return clk->parent->rate / bfc_divisors[idx];
}
static struct clk_ops sh7780_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = ((__raw_readl(FRQCR) >> 24) & 0x0001);
return clk->parent->rate / ifc_divisors[idx];
}
static struct clk_ops sh7780_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct clk_ops *sh7780_clk_ops[] = {
&sh7780_master_clk_ops,
&sh7780_module_clk_ops,
&sh7780_bus_clk_ops,
&sh7780_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7780_clk_ops))
*ops = sh7780_clk_ops[idx];
}
static unsigned long shyway_clk_recalc(struct clk *clk)
{
int idx = ((__raw_readl(FRQCR) >> 20) & 0x0007);
return clk->parent->rate / cfc_divisors[idx];
}
static struct clk_ops sh7780_shyway_clk_ops = {
.recalc = shyway_clk_recalc,
};
static struct clk sh7780_shyway_clk = {
.flags = CLK_ENABLE_ON_INIT,
.ops = &sh7780_shyway_clk_ops,
};
/*
* Additional SH7780-specific on-chip clocks that aren't already part of the
* clock framework
*/
static struct clk *sh7780_onchip_clocks[] = {
&sh7780_shyway_clk,
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("shyway_clk", &sh7780_shyway_clk),
};
int __init arch_clk_init(void)
{
struct clk *clk;
int i, ret = 0;
cpg_clk_init();
clk = clk_get(NULL, "master_clk");
for (i = 0; i < ARRAY_SIZE(sh7780_onchip_clocks); i++) {
struct clk *clkp = sh7780_onchip_clocks[i];
clkp->parent = clk;
ret |= clk_register(clkp);
}
clk_put(clk);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
return ret;
}
| gpl-2.0 |
DennisBold/android_kernel_htc_pyramid | arch/arm/mach-msm/acpuclock-8930aa.c | 903 | 8696 | /*
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <mach/rpm-regulator.h>
#include <mach/msm_bus_board.h>
#include <mach/msm_bus.h>
#include "acpuclock.h"
#include "acpuclock-krait.h"
/* Corner type vreg VDD values */
#define LVL_NONE RPM_VREG_CORNER_NONE
#define LVL_LOW RPM_VREG_CORNER_LOW
#define LVL_NOM RPM_VREG_CORNER_NOMINAL
#define LVL_HIGH RPM_VREG_CORNER_HIGH
static struct hfpll_data hfpll_data __initdata = {
.mode_offset = 0x00,
.l_offset = 0x08,
.m_offset = 0x0C,
.n_offset = 0x10,
.config_offset = 0x04,
.config_val = 0x7845C665,
.has_droop_ctl = true,
.droop_offset = 0x14,
.droop_val = 0x0108C000,
.low_vdd_l_max = 22,
.nom_vdd_l_max = 42,
.vdd[HFPLL_VDD_NONE] = LVL_NONE,
.vdd[HFPLL_VDD_LOW] = LVL_LOW,
.vdd[HFPLL_VDD_NOM] = LVL_NOM,
.vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
};
static struct scalable scalable[] __initdata = {
[CPU0] = {
.hfpll_phys_base = 0x00903200,
.aux_clk_sel_phys = 0x02088014,
.aux_clk_sel = 3,
.sec_clk_sel = 2,
.l2cpmr_iaddr = 0x4501,
.vreg[VREG_CORE] = { "krait0", 1300000 },
.vreg[VREG_MEM] = { "krait0_mem", 1150000 },
.vreg[VREG_DIG] = { "krait0_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
},
[CPU1] = {
.hfpll_phys_base = 0x00903300,
.aux_clk_sel_phys = 0x02098014,
.aux_clk_sel = 3,
.sec_clk_sel = 2,
.l2cpmr_iaddr = 0x5501,
.vreg[VREG_CORE] = { "krait1", 1300000 },
.vreg[VREG_MEM] = { "krait1_mem", 1150000 },
.vreg[VREG_DIG] = { "krait1_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
},
[L2] = {
.hfpll_phys_base = 0x00903400,
.aux_clk_sel_phys = 0x02011028,
.aux_clk_sel = 3,
.sec_clk_sel = 2,
.l2cpmr_iaddr = 0x0500,
.vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
},
};
static struct msm_bus_paths bw_level_tbl[] __initdata = {
[0] = BW_MBPS(640), /* At least 80 MHz on bus. */
[1] = BW_MBPS(1064), /* At least 133 MHz on bus. */
[2] = BW_MBPS(1600), /* At least 200 MHz on bus. */
[3] = BW_MBPS(2128), /* At least 266 MHz on bus. */
[4] = BW_MBPS(3200), /* At least 400 MHz on bus. */
[5] = BW_MBPS(3600), /* At least 450 MHz on bus. */
[6] = BW_MBPS(3936), /* At least 492 MHz on bus. */
[7] = BW_MBPS(4264), /* At least 533 MHz on bus. */
};
static struct msm_bus_scale_pdata bus_scale_data __initdata = {
.usecase = bw_level_tbl,
.num_usecases = ARRAY_SIZE(bw_level_tbl),
.active_only = 1,
.name = "acpuclk-8930aa",
};
static struct l2_level l2_freq_tbl[] __initdata = {
[0] = { { 384000, PLL_8, 0, 0x00 }, LVL_LOW, 1050000, 1 },
[1] = { { 432000, HFPLL, 2, 0x20 }, LVL_NOM, 1050000, 2 },
[2] = { { 486000, HFPLL, 2, 0x24 }, LVL_NOM, 1050000, 2 },
[3] = { { 540000, HFPLL, 2, 0x28 }, LVL_NOM, 1050000, 2 },
[4] = { { 594000, HFPLL, 1, 0x16 }, LVL_NOM, 1050000, 2 },
[5] = { { 648000, HFPLL, 1, 0x18 }, LVL_NOM, 1050000, 4 },
[6] = { { 702000, HFPLL, 1, 0x1A }, LVL_NOM, 1050000, 4 },
[7] = { { 756000, HFPLL, 1, 0x1C }, LVL_HIGH, 1150000, 4 },
[8] = { { 810000, HFPLL, 1, 0x1E }, LVL_HIGH, 1150000, 4 },
[9] = { { 864000, HFPLL, 1, 0x20 }, LVL_HIGH, 1150000, 4 },
[10] = { { 918000, HFPLL, 1, 0x22 }, LVL_HIGH, 1150000, 7 },
[11] = { { 972000, HFPLL, 1, 0x24 }, LVL_HIGH, 1150000, 7 },
[12] = { { 1026000, HFPLL, 1, 0x26 }, LVL_HIGH, 1150000, 7 },
[13] = { { 1080000, HFPLL, 1, 0x28 }, LVL_HIGH, 1150000, 7 },
[14] = { { 1134000, HFPLL, 1, 0x2A }, LVL_HIGH, 1150000, 7 },
[15] = { { 1188000, HFPLL, 1, 0x2C }, LVL_HIGH, 1150000, 7 },
{ }
};
static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000 },
{ 0, { 432000, HFPLL, 2, 0x20 }, L2(5), 975000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 975000 },
{ 0, { 540000, HFPLL, 2, 0x28 }, L2(5), 1000000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 1000000 },
{ 0, { 648000, HFPLL, 1, 0x18 }, L2(5), 1025000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 1025000 },
{ 0, { 756000, HFPLL, 1, 0x1C }, L2(10), 1075000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(10), 1075000 },
{ 0, { 864000, HFPLL, 1, 0x20 }, L2(10), 1100000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(10), 1100000 },
{ 0, { 972000, HFPLL, 1, 0x24 }, L2(10), 1125000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(10), 1125000 },
{ 0, { 1080000, HFPLL, 1, 0x28 }, L2(15), 1175000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1175000 },
{ 0, { 1188000, HFPLL, 1, 0x2C }, L2(15), 1200000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1200000 },
{ 0, { 1296000, HFPLL, 1, 0x30 }, L2(15), 1225000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1225000 },
{ 1, { 1404000, HFPLL, 1, 0x34 }, L2(15), 1237500 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 925000 },
{ 0, { 432000, HFPLL, 2, 0x20 }, L2(5), 950000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 950000 },
{ 0, { 540000, HFPLL, 2, 0x28 }, L2(5), 975000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 975000 },
{ 0, { 648000, HFPLL, 1, 0x18 }, L2(5), 1000000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 1000000 },
{ 0, { 756000, HFPLL, 1, 0x1C }, L2(10), 1050000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(10), 1050000 },
{ 0, { 864000, HFPLL, 1, 0x20 }, L2(10), 1075000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(10), 1075000 },
{ 0, { 972000, HFPLL, 1, 0x24 }, L2(10), 1100000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(10), 1100000 },
{ 0, { 1080000, HFPLL, 1, 0x28 }, L2(15), 1150000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1150000 },
{ 0, { 1188000, HFPLL, 1, 0x2C }, L2(15), 1175000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1175000 },
{ 0, { 1296000, HFPLL, 1, 0x30 }, L2(15), 1200000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1200000 },
{ 1, { 1404000, HFPLL, 1, 0x34 }, L2(15), 1212500 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 900000 },
{ 0, { 432000, HFPLL, 2, 0x20 }, L2(5), 900000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 900000 },
{ 0, { 540000, HFPLL, 2, 0x28 }, L2(5), 925000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 925000 },
{ 0, { 648000, HFPLL, 1, 0x18 }, L2(5), 950000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 950000 },
{ 0, { 756000, HFPLL, 1, 0x1C }, L2(10), 1000000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(10), 1000000 },
{ 0, { 864000, HFPLL, 1, 0x20 }, L2(10), 1025000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(10), 1025000 },
{ 0, { 972000, HFPLL, 1, 0x24 }, L2(10), 1050000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(10), 1050000 },
{ 0, { 1080000, HFPLL, 1, 0x28 }, L2(15), 1100000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1100000 },
{ 0, { 1188000, HFPLL, 1, 0x2C }, L2(15), 1125000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1125000 },
{ 0, { 1296000, HFPLL, 1, 0x30 }, L2(15), 1150000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1150000 },
{ 1, { 1404000, HFPLL, 1, 0x34 }, L2(15), 1162500 },
{ 0, { 0 } }
};
static struct pvs_table pvs_tables[NUM_SPEED_BINS][NUM_PVS] __initdata = {
[0][PVS_SLOW] = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow), 0 },
[0][PVS_NOMINAL] = { acpu_freq_tbl_nom, sizeof(acpu_freq_tbl_nom), 25000 },
[0][PVS_FAST] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
};
static struct acpuclk_krait_params acpuclk_8930aa_params __initdata = {
.scalable = scalable,
.scalable_size = sizeof(scalable),
.hfpll_data = &hfpll_data,
.pvs_tables = pvs_tables,
.l2_freq_tbl = l2_freq_tbl,
.l2_freq_tbl_size = sizeof(l2_freq_tbl),
.bus_scale = &bus_scale_data,
.pte_efuse_phys = 0x007000C0,
.stby_khz = 384000,
};
static int __init acpuclk_8930aa_probe(struct platform_device *pdev)
{
return acpuclk_krait_init(&pdev->dev, &acpuclk_8930aa_params);
}
static struct platform_driver acpuclk_8930aa_driver = {
.driver = {
.name = "acpuclk-8930aa",
.owner = THIS_MODULE,
},
};
static int __init acpuclk_8930aa_init(void)
{
return platform_driver_probe(&acpuclk_8930aa_driver,
acpuclk_8930aa_probe);
}
device_initcall(acpuclk_8930aa_init);
| gpl-2.0 |
LiquidSmooth-Devices/android_kernel_samsung_hlte | drivers/staging/vt6656/baseband.c | 903 | 70790 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: baseband.c
*
* Purpose: Implement functions to access baseband
*
* Author: Jerry Chen
*
* Date: Jun. 5, 2002
*
* Functions:
* BBuGetFrameTime - Calculate data frame transmitting time
* BBvCaculateParameter - Caculate PhyLength, PhyService and Phy Signal parameter for baseband Tx
* BBbVT3184Init - VIA VT3184 baseband chip init code
* BBvLoopbackOn - Turn on BaseBand Loopback mode
* BBvLoopbackOff - Turn off BaseBand Loopback mode
*
* Revision History:
*
*
*/
#include "tmacro.h"
#include "tether.h"
#include "mac.h"
#include "baseband.h"
#include "rf.h"
#include "srom.h"
#include "control.h"
#include "datarate.h"
#include "rndis.h"
/*--------------------- Static Definitions -------------------------*/
static int msglevel =MSG_LEVEL_INFO;
//static int msglevel =MSG_LEVEL_DEBUG;
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
BYTE abyVT3184_AGC[] = {
0x00, //0
0x00, //1
0x02, //2
0x02, //3 //RobertYu:20060505, 0x04, //3
0x04, //4
0x04, //5 //RobertYu:20060505, 0x06, //5
0x06, //6
0x06, //7
0x08, //8
0x08, //9
0x0A, //A
0x0A, //B
0x0C, //C
0x0C, //D
0x0E, //E
0x0E, //F
0x10, //10
0x10, //11
0x12, //12
0x12, //13
0x14, //14
0x14, //15
0x16, //16
0x16, //17
0x18, //18
0x18, //19
0x1A, //1A
0x1A, //1B
0x1C, //1C
0x1C, //1D
0x1E, //1E
0x1E, //1F
0x20, //20
0x20, //21
0x22, //22
0x22, //23
0x24, //24
0x24, //25
0x26, //26
0x26, //27
0x28, //28
0x28, //29
0x2A, //2A
0x2A, //2B
0x2C, //2C
0x2C, //2D
0x2E, //2E
0x2E, //2F
0x30, //30
0x30, //31
0x32, //32
0x32, //33
0x34, //34
0x34, //35
0x36, //36
0x36, //37
0x38, //38
0x38, //39
0x3A, //3A
0x3A, //3B
0x3C, //3C
0x3C, //3D
0x3E, //3E
0x3E //3F
};
BYTE abyVT3184_AL2230[] = {
0x31,//00
0x00,
0x00,
0x00,
0x00,
0x80,
0x00,
0x00,
0x70,
0x45,//tx //0x64 for FPGA
0x2A,
0x76,
0x00,
0x00,
0x80,
0x00,
0x00,//10
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x8e, //RobertYu:20060522, //0x8d,
0x0a, //RobertYu:20060515, //0x09,
0x00,
0x00,
0x00,
0x00,//20
0x00,
0x00,
0x00,
0x00,
0x4a,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x4a,
0x00,
0x0c, //RobertYu:20060522, //0x10,
0x26,//30
0x5b,
0x00,
0x00,
0x00,
0x00,
0xaa,
0xaa,
0xff,
0xff,
0x79,
0x00,
0x00,
0x0b,
0x48,
0x04,
0x00,//40
0x08,
0x00,
0x08,
0x08,
0x14,
0x05,
0x09,
0x00,
0x00,
0x00,
0x00,
0x09,
0x73,
0x00,
0xc5,
0x00,//50 //RobertYu:20060505, //0x15,//50
0x19,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0xd0, //RobertYu:20060505, //0xb0,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0xe4,//60
0x80,
0x00,
0x00,
0x00,
0x00,
0x98,
0x0a,
0x00,
0x00,
0x00,
0x00,
0x00, //0x80 for FPGA
0x03,
0x01,
0x00,
0x00,//70
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x8c,//80
0x01,
0x09,
0x00,
0x00,
0x00,
0x00,
0x00,
0x08,
0x00,
0x1f, //RobertYu:20060516, //0x0f,
0xb7,
0x88,
0x47,
0xaa,
0x00, //RobertYu:20060505, //0x02,
0x20,//90 //RobertYu:20060505, //0x22,//90
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0xeb,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x01,
0x00,//a0
0x00,
0x00,
0x00,
0x00,
0x00,
0x10,
0x00,
0x18,
0x00,
0x00,
0x00,
0x00,
0x15, //RobertYu:20060516, //0x00,
0x00,
0x18,
0x38,//b0
0x30,
0x00,
0x00,
0xff,
0x0f,
0xe4,
0xe2,
0x00,
0x00,
0x00,
0x03,
0x01,
0x00,
0x00,
0x00,
0x18,//c0
0x20,
0x07,
0x18,
0xff,
0xff, //RobertYu:20060509, //0x2c,
0x0e, //RobertYu:20060530, //0x0c,
0x0a,
0x0e,
0x00, //RobertYu:20060505, //0x01,
0x82, //RobertYu:20060516, //0x8f,
0xa7,
0x3c,
0x10,
0x30, //RobertYu:20060627, //0x0b,
0x05, //RobertYu:20060516, //0x25,
0x40,//d0
0x12,
0x00,
0x00,
0x10,
0x28,
0x80,
0x2A,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,//e0
0xf3, //RobertYu:20060516, //0xd3,
0x00,
0x00,
0x00,
0x10,
0x00,
0x12, //RobertYu:20060627, //0x10,
0x00,
0xf4,
0x00,
0xff,
0x79,
0x20,
0x30,
0x05, //RobertYu:20060516, //0x0c,
0x00,//f0
0x3e,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00
};
//{{RobertYu:20060515, new BB setting for VT3226D0
BYTE abyVT3184_VT3226D0[] = {
0x31,//00
0x00,
0x00,
0x00,
0x00,
0x80,
0x00,
0x00,
0x70,
0x45,//tx //0x64 for FPGA
0x2A,
0x76,
0x00,
0x00,
0x80,
0x00,
0x00,//10
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x8e, //RobertYu:20060525, //0x8d,
0x0a, //RobertYu:20060515, //0x09,
0x00,
0x00,
0x00,
0x00,//20
0x00,
0x00,
0x00,
0x00,
0x4a,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x4a,
0x00,
0x0c, //RobertYu:20060525, //0x10,
0x26,//30
0x5b,
0x00,
0x00,
0x00,
0x00,
0xaa,
0xaa,
0xff,
0xff,
0x79,
0x00,
0x00,
0x0b,
0x48,
0x04,
0x00,//40
0x08,
0x00,
0x08,
0x08,
0x14,
0x05,
0x09,
0x00,
0x00,
0x00,
0x00,
0x09,
0x73,
0x00,
0xc5,
0x00,//50 //RobertYu:20060505, //0x15,//50
0x19,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0xd0, //RobertYu:20060505, //0xb0,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0xe4,//60
0x80,
0x00,
0x00,
0x00,
0x00,
0x98,
0x0a,
0x00,
0x00,
0x00,
0x00,
0x00, //0x80 for FPGA
0x03,
0x01,
0x00,
0x00,//70
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x8c,//80
0x01,
0x09,
0x00,
0x00,
0x00,
0x00,
0x00,
0x08,
0x00,
0x1f, //RobertYu:20060515, //0x0f,
0xb7,
0x88,
0x47,
0xaa,
0x00, //RobertYu:20060505, //0x02,
0x20,//90 //RobertYu:20060505, //0x22,//90
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0xeb,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x01,
0x00,//a0
0x00,
0x00,
0x00,
0x00,
0x00,
0x10,
0x00,
0x18,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x18,
0x38,//b0
0x30,
0x00,
0x00,
0xff,
0x0f,
0xe4,
0xe2,
0x00,
0x00,
0x00,
0x03,
0x01,
0x00,
0x00,
0x00,
0x18,//c0
0x20,
0x07,
0x18,
0xff,
0xff, //RobertYu:20060509, //0x2c,
0x10, //RobertYu:20060525, //0x0c,
0x0a,
0x0e,
0x00, //RobertYu:20060505, //0x01,
0x84, //RobertYu:20060525, //0x8f,
0xa7,
0x3c,
0x10,
0x24, //RobertYu:20060627, //0x18,
0x05, //RobertYu:20060515, //0x25,
0x40,//d0
0x12,
0x00,
0x00,
0x10,
0x28,
0x80,
0x2A,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,//e0
0xf3, //RobertYu:20060515, //0xd3,
0x00,
0x00,
0x00,
0x10,
0x00,
0x10, //RobertYu:20060627, //0x0e,
0x00,
0xf4,
0x00,
0xff,
0x79,
0x20,
0x30,
0x08, //RobertYu:20060515, //0x0c,
0x00,//f0
0x3e,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
};
const WORD awcFrameTime[MAX_RATE] =
{10, 20, 55, 110, 24, 36, 48, 72, 96, 144, 192, 216};
/*--------------------- Static Functions --------------------------*/
/*
static
unsigned long
s_ulGetLowSQ3(PSDevice pDevice);
static
unsigned long
s_ulGetRatio(PSDevice pDevice);
static
void
s_vClearSQ3Value(PSDevice pDevice);
*/
/*--------------------- Export Variables --------------------------*/
/*
* Description: Calculate data frame transmitting time
*
* Parameters:
* In:
* byPreambleType - Preamble Type
* byPktType - PK_TYPE_11A, PK_TYPE_11B, PK_TYPE_11GB, PK_TYPE_11GA
* cbFrameLength - Baseband Type
* wRate - Tx Rate
* Out:
*
* Return Value: FrameTime
*
*/
unsigned int
BBuGetFrameTime (
BYTE byPreambleType,
BYTE byPktType,
unsigned int cbFrameLength,
WORD wRate
)
{
unsigned int uFrameTime;
unsigned int uPreamble;
unsigned int uTmp;
unsigned int uRateIdx = (unsigned int)wRate;
unsigned int uRate = 0;
if (uRateIdx > RATE_54M) {
ASSERT(0);
return 0;
}
uRate = (unsigned int)awcFrameTime[uRateIdx];
if (uRateIdx <= 3) { //CCK mode
if (byPreambleType == 1) {//Short
uPreamble = 96;
} else {
uPreamble = 192;
}
uFrameTime = (cbFrameLength * 80) / uRate; //?????
uTmp = (uFrameTime * uRate) / 80;
if (cbFrameLength != uTmp) {
uFrameTime ++;
}
return (uPreamble + uFrameTime);
}
else {
uFrameTime = (cbFrameLength * 8 + 22) / uRate; //????????
uTmp = ((uFrameTime * uRate) - 22) / 8;
if(cbFrameLength != uTmp) {
uFrameTime ++;
}
uFrameTime = uFrameTime * 4; //???????
if(byPktType != PK_TYPE_11A) {
uFrameTime += 6;
}
return (20 + uFrameTime); //??????
}
}
/*
* Description: Caculate Length, Service, and Signal fields of Phy for Tx
*
* Parameters:
* In:
* pDevice - Device Structure
* cbFrameLength - Tx Frame Length
* wRate - Tx Rate
* Out:
* pwPhyLen - pointer to Phy Length field
* pbyPhySrv - pointer to Phy Service field
* pbyPhySgn - pointer to Phy Signal field
*
* Return Value: none
*
*/
void
BBvCaculateParameter (
PSDevice pDevice,
unsigned int cbFrameLength,
WORD wRate,
BYTE byPacketType,
PWORD pwPhyLen,
PBYTE pbyPhySrv,
PBYTE pbyPhySgn
)
{
unsigned int cbBitCount;
unsigned int cbUsCount = 0;
unsigned int cbTmp;
BOOL bExtBit;
BYTE byPreambleType = pDevice->byPreambleType;
BOOL bCCK = pDevice->bCCK;
cbBitCount = cbFrameLength * 8;
bExtBit = FALSE;
switch (wRate) {
case RATE_1M :
cbUsCount = cbBitCount;
*pbyPhySgn = 0x00;
break;
case RATE_2M :
cbUsCount = cbBitCount / 2;
if (byPreambleType == 1)
*pbyPhySgn = 0x09;
else // long preamble
*pbyPhySgn = 0x01;
break;
case RATE_5M :
if (bCCK == FALSE)
cbBitCount ++;
cbUsCount = (cbBitCount * 10) / 55;
cbTmp = (cbUsCount * 55) / 10;
if (cbTmp != cbBitCount)
cbUsCount ++;
if (byPreambleType == 1)
*pbyPhySgn = 0x0a;
else // long preamble
*pbyPhySgn = 0x02;
break;
case RATE_11M :
if (bCCK == FALSE)
cbBitCount ++;
cbUsCount = cbBitCount / 11;
cbTmp = cbUsCount * 11;
if (cbTmp != cbBitCount) {
cbUsCount ++;
if ((cbBitCount - cbTmp) <= 3)
bExtBit = TRUE;
}
if (byPreambleType == 1)
*pbyPhySgn = 0x0b;
else // long preamble
*pbyPhySgn = 0x03;
break;
case RATE_6M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
*pbyPhySgn = 0x9B; //1001 1011
}
else {//11g, 2.4GHZ
*pbyPhySgn = 0x8B; //1000 1011
}
break;
case RATE_9M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
*pbyPhySgn = 0x9F; //1001 1111
}
else {//11g, 2.4GHZ
*pbyPhySgn = 0x8F; //1000 1111
}
break;
case RATE_12M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
*pbyPhySgn = 0x9A; //1001 1010
}
else {//11g, 2.4GHZ
*pbyPhySgn = 0x8A; //1000 1010
}
break;
case RATE_18M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
*pbyPhySgn = 0x9E; //1001 1110
}
else {//11g, 2.4GHZ
*pbyPhySgn = 0x8E; //1000 1110
}
break;
case RATE_24M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
*pbyPhySgn = 0x99; //1001 1001
}
else {//11g, 2.4GHZ
*pbyPhySgn = 0x89; //1000 1001
}
break;
case RATE_36M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
*pbyPhySgn = 0x9D; //1001 1101
}
else {//11g, 2.4GHZ
*pbyPhySgn = 0x8D; //1000 1101
}
break;
case RATE_48M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
*pbyPhySgn = 0x98; //1001 1000
}
else {//11g, 2.4GHZ
*pbyPhySgn = 0x88; //1000 1000
}
break;
case RATE_54M :
if (byPacketType == PK_TYPE_11A) {//11a, 5GHZ
*pbyPhySgn = 0x9C; //1001 1100
}
else {//11g, 2.4GHZ
*pbyPhySgn = 0x8C; //1000 1100
}
break;
default :
if (byPacketType == PK_TYPE_11A) {//11a, 5GHZ
*pbyPhySgn = 0x9C; //1001 1100
}
else {//11g, 2.4GHZ
*pbyPhySgn = 0x8C; //1000 1100
}
break;
}
if (byPacketType == PK_TYPE_11B) {
*pbyPhySrv = 0x00;
if (bExtBit)
*pbyPhySrv = *pbyPhySrv | 0x80;
*pwPhyLen = (WORD) cbUsCount;
}
else {
*pbyPhySrv = 0x00;
*pwPhyLen = (WORD)cbFrameLength;
}
}
/*
* Description: Set Antenna mode
*
* Parameters:
* In:
* pDevice - Device Structure
* byAntennaMode - Antenna Mode
* Out:
* none
*
* Return Value: none
*
*/
void
BBvSetAntennaMode (PSDevice pDevice, BYTE byAntennaMode)
{
switch (byAntennaMode) {
case ANT_TXA:
case ANT_TXB:
break;
case ANT_RXA:
pDevice->byBBRxConf &= 0xFC;
break;
case ANT_RXB:
pDevice->byBBRxConf &= 0xFE;
pDevice->byBBRxConf |= 0x02;
break;
}
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_SET_ANTMD,
(WORD) byAntennaMode,
0,
0,
NULL);
}
/*
* Description: Set Antenna mode
*
* Parameters:
* In:
* pDevice - Device Structure
* byAntennaMode - Antenna Mode
* Out:
* none
*
* Return Value: none
*
*/
BOOL BBbVT3184Init(PSDevice pDevice)
{
int ntStatus;
WORD wLength;
PBYTE pbyAddr;
PBYTE pbyAgc;
WORD wLengthAgc;
BYTE abyArray[256];
u8 data;
ntStatus = CONTROLnsRequestIn(pDevice,
MESSAGE_TYPE_READ,
0,
MESSAGE_REQUEST_EEPROM,
EEP_MAX_CONTEXT_SIZE,
pDevice->abyEEPROM);
if (ntStatus != STATUS_SUCCESS) {
return FALSE;
}
// if ((pDevice->abyEEPROM[EEP_OFS_RADIOCTL]&0x06)==0x04)
// return FALSE;
//zonetype initial
pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
if(pDevice->config_file.ZoneType >= 0) { //read zonetype file ok!
if ((pDevice->config_file.ZoneType == 0)&&
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE] !=0x00)){ //for USA
pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0;
pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0B;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Init Zone Type :USA\n");
}
else if((pDevice->config_file.ZoneType == 1)&&
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE]!=0x01)){ //for Japan
pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0x01;
pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Init Zone Type :Japan\n");
}
else if((pDevice->config_file.ZoneType == 2)&&
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE]!=0x02)){ //for Europe
pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0x02;
pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Init Zone Type :Europe\n");
}
else {
if(pDevice->config_file.ZoneType !=pDevice->abyEEPROM[EEP_OFS_ZONETYPE])
printk("zonetype in file[%02x] mismatch with in EEPROM[%02x]\n",pDevice->config_file.ZoneType,pDevice->abyEEPROM[EEP_OFS_ZONETYPE]);
else
printk("Read Zonetype file success,use default zonetype setting[%02x]\n",pDevice->config_file.ZoneType);
}
}
if ( !pDevice->bZoneRegExist ) {
pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
}
pDevice->byRFType = pDevice->abyEEPROM[EEP_OFS_RFTYPE];
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Zone Type %x\n", pDevice->byZoneType);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RF Type %d\n", pDevice->byRFType);
if ((pDevice->byRFType == RF_AL2230) || (pDevice->byRFType == RF_AL2230S)) {
pDevice->byBBRxConf = abyVT3184_AL2230[10];
wLength = sizeof(abyVT3184_AL2230);
pbyAddr = abyVT3184_AL2230;
pbyAgc = abyVT3184_AGC;
wLengthAgc = sizeof(abyVT3184_AGC);
pDevice->abyBBVGA[0] = 0x1C;
pDevice->abyBBVGA[1] = 0x10;
pDevice->abyBBVGA[2] = 0x0;
pDevice->abyBBVGA[3] = 0x0;
pDevice->ldBmThreshold[0] = -70;
pDevice->ldBmThreshold[1] = -48;
pDevice->ldBmThreshold[2] = 0;
pDevice->ldBmThreshold[3] = 0;
}
else if (pDevice->byRFType == RF_AIROHA7230) {
pDevice->byBBRxConf = abyVT3184_AL2230[10];
wLength = sizeof(abyVT3184_AL2230);
pbyAddr = abyVT3184_AL2230;
pbyAgc = abyVT3184_AGC;
wLengthAgc = sizeof(abyVT3184_AGC);
// Init ANT B select,TX Config CR09 = 0x61->0x45, 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
//pbyAddr[0x09] = 0x41;
// Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted)
//pbyAddr[0x0a] = 0x28;
// Select VC1/VC2, CR215 = 0x02->0x06
pbyAddr[0xd7] = 0x06;
pDevice->abyBBVGA[0] = 0x1C;
pDevice->abyBBVGA[1] = 0x10;
pDevice->abyBBVGA[2] = 0x0;
pDevice->abyBBVGA[3] = 0x0;
pDevice->ldBmThreshold[0] = -70;
pDevice->ldBmThreshold[1] = -48;
pDevice->ldBmThreshold[2] = 0;
pDevice->ldBmThreshold[3] = 0;
}
else if ( (pDevice->byRFType == RF_VT3226) || (pDevice->byRFType == RF_VT3226D0) ) {
pDevice->byBBRxConf = abyVT3184_VT3226D0[10]; //RobertYu:20060515
wLength = sizeof(abyVT3184_VT3226D0); //RobertYu:20060515
pbyAddr = abyVT3184_VT3226D0; //RobertYu:20060515
pbyAgc = abyVT3184_AGC;
wLengthAgc = sizeof(abyVT3184_AGC);
pDevice->abyBBVGA[0] = 0x20; //RobertYu:20060104, reguest by Jack
pDevice->abyBBVGA[1] = 0x10;
pDevice->abyBBVGA[2] = 0x0;
pDevice->abyBBVGA[3] = 0x0;
pDevice->ldBmThreshold[0] = -70;
pDevice->ldBmThreshold[1] = -48;
pDevice->ldBmThreshold[2] = 0;
pDevice->ldBmThreshold[3] = 0;
// Fix VT3226 DFC system timing issue
MACvRegBitsOn(pDevice, MAC_REG_SOFTPWRCTL2, SOFTPWRCTL_RFLEOPT);
//}}
//{{RobertYu:20060609
} else if ( (pDevice->byRFType == RF_VT3342A0) ) {
pDevice->byBBRxConf = abyVT3184_VT3226D0[10];
wLength = sizeof(abyVT3184_VT3226D0);
pbyAddr = abyVT3184_VT3226D0;
pbyAgc = abyVT3184_AGC;
wLengthAgc = sizeof(abyVT3184_AGC);
pDevice->abyBBVGA[0] = 0x20;
pDevice->abyBBVGA[1] = 0x10;
pDevice->abyBBVGA[2] = 0x0;
pDevice->abyBBVGA[3] = 0x0;
pDevice->ldBmThreshold[0] = -70;
pDevice->ldBmThreshold[1] = -48;
pDevice->ldBmThreshold[2] = 0;
pDevice->ldBmThreshold[3] = 0;
// Fix VT3226 DFC system timing issue
MACvRegBitsOn(pDevice, MAC_REG_SOFTPWRCTL2, SOFTPWRCTL_RFLEOPT);
//}}
} else {
return TRUE;
}
memcpy(abyArray, pbyAddr, wLength);
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_WRITE,
0,
MESSAGE_REQUEST_BBREG,
wLength,
abyArray
);
memcpy(abyArray, pbyAgc, wLengthAgc);
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_WRITE,
0,
MESSAGE_REQUEST_BBAGC,
wLengthAgc,
abyArray
);
if ((pDevice->byRFType == RF_VT3226) || //RobertYu:20051116, 20060111 remove VT3226D0
(pDevice->byRFType == RF_VT3342A0) //RobertYu:20060609
) {
ControlvWriteByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_ITRTMSET,0x23);
MACvRegBitsOn(pDevice,MAC_REG_PAPEDELAY,0x01);
}
else if (pDevice->byRFType == RF_VT3226D0)
{
ControlvWriteByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_ITRTMSET,0x11);
MACvRegBitsOn(pDevice,MAC_REG_PAPEDELAY,0x01);
}
ControlvWriteByte(pDevice,MESSAGE_REQUEST_BBREG,0x04,0x7F);
ControlvWriteByte(pDevice,MESSAGE_REQUEST_BBREG,0x0D,0x01);
RFbRFTableDownload(pDevice);
/* Fix for TX USB resets from vendors driver */
CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, USB_REG4,
MESSAGE_REQUEST_MEM, sizeof(data), &data);
data |= 0x2;
CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, USB_REG4,
MESSAGE_REQUEST_MEM, sizeof(data), &data);
return TRUE;//ntStatus;
}
/*
* Description: Turn on BaseBand Loopback mode
*
* Parameters:
* In:
* pDevice - Device Structure
*
* Out:
* none
*
* Return Value: none
*
*/
void BBvLoopbackOn (PSDevice pDevice)
{
BYTE byData;
//CR C9 = 0x00
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0xC9, &pDevice->byBBCRc9);//CR201
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0);
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0x4D, &pDevice->byBBCR4d);//CR77
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x4D, 0x90);
//CR 88 = 0x02(CCK), 0x03(OFDM)
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0x88, &pDevice->byBBCR88);//CR136
if (pDevice->wCurrentRate <= RATE_11M) { //CCK
// Enable internal digital loopback: CR33 |= 0000 0001
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0x21, &byData);//CR33
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x21, (BYTE)(byData | 0x01));//CR33
// CR154 = 0x00
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x9A, 0); //CR154
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x88, 0x02);//CR239
}
else { //OFDM
// Enable internal digital loopback:CR154 |= 0000 0001
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0x9A, &byData);//CR154
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x9A, (BYTE)(byData | 0x01));//CR154
// CR33 = 0x00
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x21, 0); //CR33
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x88, 0x03);//CR239
}
//CR14 = 0x00
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0E, 0);//CR14
// Disable TX_IQUN
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0x09, &pDevice->byBBCR09);
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x09, (BYTE)(pDevice->byBBCR09 & 0xDE));
}
/*
* Description: Turn off BaseBand Loopback mode
*
* Parameters:
* In:
* pDevice - Device Structure
*
* Out:
* none
*
* Return Value: none
*
*/
void BBvLoopbackOff (PSDevice pDevice)
{
BYTE byData;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, pDevice->byBBCRc9);//CR201
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x88, pDevice->byBBCR88);//CR136
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x09, pDevice->byBBCR09);//CR136
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x4D, pDevice->byBBCR4d);//CR77
if (pDevice->wCurrentRate <= RATE_11M) { // CCK
// Set the CR33 Bit2 to disable internal Loopback.
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0x21, &byData);//CR33
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x21, (BYTE)(byData & 0xFE));//CR33
} else { /* OFDM */
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0x9A, &byData);//CR154
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x9A, (BYTE)(byData & 0xFE));//CR154
}
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0x0E, &byData);//CR14
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0E, (BYTE)(byData | 0x80));//CR14
}
/*
* Description: Set ShortSlotTime mode
*
* Parameters:
* In:
* pDevice - Device Structure
* Out:
* none
*
* Return Value: none
*
*/
void
BBvSetShortSlotTime (PSDevice pDevice)
{
BYTE byBBVGA=0;
if (pDevice->bShortSlotTime)
pDevice->byBBRxConf &= 0xDF;//1101 1111
else
pDevice->byBBRxConf |= 0x20;//0010 0000
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0xE7, &byBBVGA);
if (byBBVGA == pDevice->abyBBVGA[0])
pDevice->byBBRxConf |= 0x20;//0010 0000
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0A, pDevice->byBBRxConf);
}
void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData)
{
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xE7, byData);
// patch for 3253B0 Baseband with Cardbus module
if (pDevice->bShortSlotTime)
pDevice->byBBRxConf &= 0xDF; /* 1101 1111 */
else
pDevice->byBBRxConf |= 0x20; /* 0010 0000 */
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0A, pDevice->byBBRxConf);//CR10
}
/*
* Description: Baseband SoftwareReset
*
* Parameters:
* In:
* dwIoBase - I/O base address
* Out:
* none
*
* Return Value: none
*
*/
void
BBvSoftwareReset (PSDevice pDevice)
{
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x50, 0x40);
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x50, 0);
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x9C, 0x01);
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x9C, 0);
}
/*
* Description: BBvSetDeepSleep
*
* Parameters:
* In:
* pDevice - Device Structure
* Out:
* none
*
* Return Value: none
*
*/
void
BBvSetDeepSleep (PSDevice pDevice)
{
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0c, 0x17);//CR12
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0D, 0xB9);//CR13
}
void
BBvExitDeepSleep (PSDevice pDevice)
{
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0C, 0x00);//CR12
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0D, 0x01);//CR13
}
static unsigned long s_ulGetLowSQ3(PSDevice pDevice)
{
int ii;
unsigned long ulSQ3 = 0;
unsigned long ulMaxPacket;
ulMaxPacket = pDevice->aulPktNum[RATE_54M];
if (pDevice->aulPktNum[RATE_54M] != 0)
ulSQ3 = pDevice->aulSQ3Val[RATE_54M] / pDevice->aulPktNum[RATE_54M];
for (ii = RATE_48M; ii >= RATE_6M; ii--)
if (pDevice->aulPktNum[ii] > ulMaxPacket) {
ulMaxPacket = pDevice->aulPktNum[ii];
ulSQ3 = pDevice->aulSQ3Val[ii] / pDevice->aulPktNum[ii];
}
return ulSQ3;
}
static unsigned long s_ulGetRatio(PSDevice pDevice)
{
int ii, jj;
unsigned long ulRatio = 0;
unsigned long ulMaxPacket;
unsigned long ulPacketNum;
//This is a thousand-ratio
ulMaxPacket = pDevice->aulPktNum[RATE_54M];
if ( pDevice->aulPktNum[RATE_54M] != 0 ) {
ulPacketNum = pDevice->aulPktNum[RATE_54M];
ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
ulRatio += TOP_RATE_54M;
}
for (ii = RATE_48M; ii >= RATE_1M; ii--)
if ( pDevice->aulPktNum[ii] > ulMaxPacket ) {
ulPacketNum = 0;
for ( jj=RATE_54M;jj>=ii;jj--)
ulPacketNum += pDevice->aulPktNum[jj];
ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
ulRatio += TOP_RATE_48M;
ulMaxPacket = pDevice->aulPktNum[ii];
}
return ulRatio;
}
static
void
s_vClearSQ3Value (PSDevice pDevice)
{
int ii;
pDevice->uDiversityCnt = 0;
for ( ii=RATE_1M;ii<MAX_RATE;ii++) {
pDevice->aulPktNum[ii] = 0;
pDevice->aulSQ3Val[ii] = 0;
}
}
/*
* Description: Antenna Diversity
*
* Parameters:
* In:
* pDevice - Device Structure
* byRSR - RSR from received packet
* bySQ3 - SQ3 value from received packet
* Out:
* none
*
* Return Value: none
*
*/
void
BBvAntennaDiversity (PSDevice pDevice, BYTE byRxRate, BYTE bySQ3)
{
pDevice->uDiversityCnt++;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pDevice->uDiversityCnt = %d\n", (int)pDevice->uDiversityCnt);
if (byRxRate == 2) {
pDevice->aulPktNum[RATE_1M]++;
}
else if (byRxRate==4) {
pDevice->aulPktNum[RATE_2M]++;
}
else if (byRxRate==11) {
pDevice->aulPktNum[RATE_5M]++;
}
else if (byRxRate==22) {
pDevice->aulPktNum[RATE_11M]++;
}
else if(byRxRate==12){
pDevice->aulPktNum[RATE_6M]++;
pDevice->aulSQ3Val[RATE_6M] += bySQ3;
}
else if(byRxRate==18){
pDevice->aulPktNum[RATE_9M]++;
pDevice->aulSQ3Val[RATE_9M] += bySQ3;
}
else if(byRxRate==24){
pDevice->aulPktNum[RATE_12M]++;
pDevice->aulSQ3Val[RATE_12M] += bySQ3;
}
else if(byRxRate==36){
pDevice->aulPktNum[RATE_18M]++;
pDevice->aulSQ3Val[RATE_18M] += bySQ3;
}
else if(byRxRate==48){
pDevice->aulPktNum[RATE_24M]++;
pDevice->aulSQ3Val[RATE_24M] += bySQ3;
}
else if(byRxRate==72){
pDevice->aulPktNum[RATE_36M]++;
pDevice->aulSQ3Val[RATE_36M] += bySQ3;
}
else if(byRxRate==96){
pDevice->aulPktNum[RATE_48M]++;
pDevice->aulSQ3Val[RATE_48M] += bySQ3;
}
else if(byRxRate==108){
pDevice->aulPktNum[RATE_54M]++;
pDevice->aulSQ3Val[RATE_54M] += bySQ3;
}
if (pDevice->byAntennaState == 0) {
if (pDevice->uDiversityCnt > pDevice->ulDiversityNValue) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ulDiversityNValue=[%d],54M-[%d]\n",(int)pDevice->ulDiversityNValue, (int)pDevice->aulPktNum[RATE_54M]);
pDevice->ulSQ3_State0 = s_ulGetLowSQ3(pDevice);
pDevice->ulRatio_State0 = s_ulGetRatio(pDevice);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"SQ3_State0, SQ3= [%08x] rate = [%08x]\n",(int)pDevice->ulSQ3_State0,(int)pDevice->ulRatio_State0);
if ( ((pDevice->aulPktNum[RATE_54M] < pDevice->ulDiversityNValue/2) &&
(pDevice->ulSQ3_State0 > pDevice->ulSQ3TH) ) ||
(pDevice->ulSQ3_State0 == 0 ) ) {
if ( pDevice->byTMax == 0 )
return;
bScheduleCommand((void *) pDevice,
WLAN_CMD_CHANGE_ANTENNA,
NULL);
pDevice->byAntennaState = 1;
del_timer(&pDevice->TimerSQ3Tmax3);
del_timer(&pDevice->TimerSQ3Tmax2);
pDevice->TimerSQ3Tmax1.expires = RUN_AT(pDevice->byTMax * HZ);
add_timer(&pDevice->TimerSQ3Tmax1);
} else {
pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
add_timer(&pDevice->TimerSQ3Tmax3);
}
s_vClearSQ3Value(pDevice);
}
} else { //byAntennaState == 1
if (pDevice->uDiversityCnt > pDevice->ulDiversityMValue) {
del_timer(&pDevice->TimerSQ3Tmax1);
pDevice->ulSQ3_State1 = s_ulGetLowSQ3(pDevice);
pDevice->ulRatio_State1 = s_ulGetRatio(pDevice);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"SQ3_State1, rate0 = %08x,rate1 = %08x\n",(int)pDevice->ulRatio_State0,(int)pDevice->ulRatio_State1);
if ( ((pDevice->ulSQ3_State1 == 0) && (pDevice->ulSQ3_State0 != 0)) ||
((pDevice->ulSQ3_State1 == 0) && (pDevice->ulSQ3_State0 == 0) && (pDevice->ulRatio_State1 < pDevice->ulRatio_State0)) ||
((pDevice->ulSQ3_State1 != 0) && (pDevice->ulSQ3_State0 != 0) && (pDevice->ulSQ3_State0 < pDevice->ulSQ3_State1))
) {
bScheduleCommand((void *) pDevice,
WLAN_CMD_CHANGE_ANTENNA,
NULL);
pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
pDevice->TimerSQ3Tmax2.expires = RUN_AT(pDevice->byTMax2 * HZ);
add_timer(&pDevice->TimerSQ3Tmax3);
add_timer(&pDevice->TimerSQ3Tmax2);
}
pDevice->byAntennaState = 0;
s_vClearSQ3Value(pDevice);
}
} //byAntennaState
}
/*+
*
* Description:
* Timer for SQ3 antenna diversity
*
* Parameters:
* In:
* pvSysSpec1
* hDeviceContext - Pointer to the adapter
* pvSysSpec2
* pvSysSpec3
* Out:
* none
*
* Return Value: none
*
-*/
void TimerSQ3CallBack(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"TimerSQ3CallBack...");
spin_lock_irq(&pDevice->lock);
bScheduleCommand((void *) pDevice, WLAN_CMD_CHANGE_ANTENNA, NULL);
pDevice->byAntennaState = 0;
s_vClearSQ3Value(pDevice);
pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
pDevice->TimerSQ3Tmax2.expires = RUN_AT(pDevice->byTMax2 * HZ);
add_timer(&pDevice->TimerSQ3Tmax3);
add_timer(&pDevice->TimerSQ3Tmax2);
spin_unlock_irq(&pDevice->lock);
}
/*+
*
* Description:
* Timer for SQ3 antenna diversity
*
* Parameters:
* In:
* pvSysSpec1
* hDeviceContext - Pointer to the adapter
* pvSysSpec2
* pvSysSpec3
* Out:
* none
*
* Return Value: none
*
-*/
void TimerSQ3Tmax3CallBack(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"TimerSQ3Tmax3CallBack...");
spin_lock_irq(&pDevice->lock);
pDevice->ulRatio_State0 = s_ulGetRatio(pDevice);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"SQ3_State0 = [%08x]\n",(int)pDevice->ulRatio_State0);
s_vClearSQ3Value(pDevice);
if ( pDevice->byTMax == 0 ) {
pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
add_timer(&pDevice->TimerSQ3Tmax3);
spin_unlock_irq(&pDevice->lock);
return;
}
bScheduleCommand((void *) pDevice, WLAN_CMD_CHANGE_ANTENNA, NULL);
pDevice->byAntennaState = 1;
del_timer(&pDevice->TimerSQ3Tmax3);
del_timer(&pDevice->TimerSQ3Tmax2);
pDevice->TimerSQ3Tmax1.expires = RUN_AT(pDevice->byTMax * HZ);
add_timer(&pDevice->TimerSQ3Tmax1);
spin_unlock_irq(&pDevice->lock);
}
void
BBvUpdatePreEDThreshold(
PSDevice pDevice,
BOOL bScanning)
{
switch(pDevice->byRFType)
{
case RF_AL2230:
case RF_AL2230S:
case RF_AIROHA7230:
//RobertYu:20060627, update new table
if( bScanning )
{ // need Max sensitivity //RSSI -69, -70,....
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x30); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -69, -70, -71,...\n");
break;
}
if(pDevice->byBBPreEDRSSI <= 45) { // RSSI 0, -1,-2,....-45
if(pDevice->byBBPreEDIndex == 20) break;
pDevice->byBBPreEDIndex = 20;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0xFF); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI 0, -1,-2,..-45\n");
} else if(pDevice->byBBPreEDRSSI <= 46) { //RSSI -46
if(pDevice->byBBPreEDIndex == 19) break;
pDevice->byBBPreEDIndex = 19;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x1A); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -46\n");
} else if(pDevice->byBBPreEDRSSI <= 47) { //RSSI -47
if(pDevice->byBBPreEDIndex == 18) break;
pDevice->byBBPreEDIndex = 18;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x15); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -47\n");
} else if(pDevice->byBBPreEDRSSI <= 49) { //RSSI -48, -49
if(pDevice->byBBPreEDIndex == 17) break;
pDevice->byBBPreEDIndex = 17;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x0E); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -48,-49\n");
} else if(pDevice->byBBPreEDRSSI <= 51) { //RSSI -50, -51
if(pDevice->byBBPreEDIndex == 16) break;
pDevice->byBBPreEDIndex = 16;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x09); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -50,-51\n");
} else if(pDevice->byBBPreEDRSSI <= 53) { //RSSI -52, -53
if(pDevice->byBBPreEDIndex == 15) break;
pDevice->byBBPreEDIndex = 15;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x06); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -52,-53\n");
} else if(pDevice->byBBPreEDRSSI <= 55) { //RSSI -54, -55
if(pDevice->byBBPreEDIndex == 14) break;
pDevice->byBBPreEDIndex = 14;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x03); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -54,-55\n");
} else if(pDevice->byBBPreEDRSSI <= 56) { //RSSI -56
if(pDevice->byBBPreEDIndex == 13) break;
pDevice->byBBPreEDIndex = 13;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x02); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0xA0); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -56\n");
} else if(pDevice->byBBPreEDRSSI <= 57) { //RSSI -57
if(pDevice->byBBPreEDIndex == 12) break;
pDevice->byBBPreEDIndex = 12;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x02); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x20); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -57\n");
} else if(pDevice->byBBPreEDRSSI <= 58) { //RSSI -58
if(pDevice->byBBPreEDIndex == 11) break;
pDevice->byBBPreEDIndex = 11;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x01); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0xA0); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -58\n");
} else if(pDevice->byBBPreEDRSSI <= 59) { //RSSI -59
if(pDevice->byBBPreEDIndex == 10) break;
pDevice->byBBPreEDIndex = 10;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x01); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x54); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -59\n");
} else if(pDevice->byBBPreEDRSSI <= 60) { //RSSI -60
if(pDevice->byBBPreEDIndex == 9) break;
pDevice->byBBPreEDIndex = 9;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x01); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x18); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -60\n");
} else if(pDevice->byBBPreEDRSSI <= 61) { //RSSI -61
if(pDevice->byBBPreEDIndex == 8) break;
pDevice->byBBPreEDIndex = 8;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0xE3); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -61\n");
} else if(pDevice->byBBPreEDRSSI <= 62) { //RSSI -62
if(pDevice->byBBPreEDIndex == 7) break;
pDevice->byBBPreEDIndex = 7;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0xB9); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -62\n");
} else if(pDevice->byBBPreEDRSSI <= 63) { //RSSI -63
if(pDevice->byBBPreEDIndex == 6) break;
pDevice->byBBPreEDIndex = 6;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x93); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -63\n");
} else if(pDevice->byBBPreEDRSSI <= 64) { //RSSI -64
if(pDevice->byBBPreEDIndex == 5) break;
pDevice->byBBPreEDIndex = 5;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x79); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -64\n");
} else if(pDevice->byBBPreEDRSSI <= 65) { //RSSI -65
if(pDevice->byBBPreEDIndex == 4) break;
pDevice->byBBPreEDIndex = 4;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x62); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -65\n");
} else if(pDevice->byBBPreEDRSSI <= 66) { //RSSI -66
if(pDevice->byBBPreEDIndex == 3) break;
pDevice->byBBPreEDIndex = 3;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x51); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -66\n");
} else if(pDevice->byBBPreEDRSSI <= 67) { //RSSI -67
if(pDevice->byBBPreEDIndex == 2) break;
pDevice->byBBPreEDIndex = 2;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x43); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -67\n");
} else if(pDevice->byBBPreEDRSSI <= 68) { //RSSI -68
if(pDevice->byBBPreEDIndex == 1) break;
pDevice->byBBPreEDIndex = 1;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x36); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -68\n");
} else { //RSSI -69, -70,....
if(pDevice->byBBPreEDIndex == 0) break;
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x30); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -69, -70,...\n");
}
break;
case RF_VT3226:
case RF_VT3226D0:
//RobertYu:20060627, update new table
if( bScanning )
{ // need Max sensitivity //RSSI -69, -70, ...
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x24); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -69, -70,..\n");
break;
}
if(pDevice->byBBPreEDRSSI <= 41) { // RSSI 0, -1,-2,....-41
if(pDevice->byBBPreEDIndex == 22) break;
pDevice->byBBPreEDIndex = 22;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0xFF); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI 0, -1,-2,..-41\n");
} else if(pDevice->byBBPreEDRSSI <= 42) { //RSSI -42
if(pDevice->byBBPreEDIndex == 21) break;
pDevice->byBBPreEDIndex = 21;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x36); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -42\n");
} else if(pDevice->byBBPreEDRSSI <= 43) { //RSSI -43
if(pDevice->byBBPreEDIndex == 20) break;
pDevice->byBBPreEDIndex = 20;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x26); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -43\n");
} else if(pDevice->byBBPreEDRSSI <= 45) { //RSSI -44, -45
if(pDevice->byBBPreEDIndex == 19) break;
pDevice->byBBPreEDIndex = 19;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x18); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -44,-45\n");
} else if(pDevice->byBBPreEDRSSI <= 47) { //RSSI -46, -47
if(pDevice->byBBPreEDIndex == 18) break;
pDevice->byBBPreEDIndex = 18;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x11); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -46,-47\n");
} else if(pDevice->byBBPreEDRSSI <= 49) { //RSSI -48, -49
if(pDevice->byBBPreEDIndex == 17) break;
pDevice->byBBPreEDIndex = 17;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x0a); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -48,-49\n");
} else if(pDevice->byBBPreEDRSSI <= 51) { //RSSI -50, -51
if(pDevice->byBBPreEDIndex == 16) break;
pDevice->byBBPreEDIndex = 16;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x07); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -50,-51\n");
} else if(pDevice->byBBPreEDRSSI <= 53) { //RSSI -52, -53
if(pDevice->byBBPreEDIndex == 15) break;
pDevice->byBBPreEDIndex = 15;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x04); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -52,-53\n");
} else if(pDevice->byBBPreEDRSSI <= 55) { //RSSI -54, -55
if(pDevice->byBBPreEDIndex == 14) break;
pDevice->byBBPreEDIndex = 14;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x02); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0xC0); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -54,-55\n");
} else if(pDevice->byBBPreEDRSSI <= 56) { //RSSI -56
if(pDevice->byBBPreEDIndex == 13) break;
pDevice->byBBPreEDIndex = 13;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x02); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x30); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -56\n");
} else if(pDevice->byBBPreEDRSSI <= 57) { //RSSI -57
if(pDevice->byBBPreEDIndex == 12) break;
pDevice->byBBPreEDIndex = 12;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x01); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0xB0); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -57\n");
} else if(pDevice->byBBPreEDRSSI <= 58) { //RSSI -58
if(pDevice->byBBPreEDIndex == 11) break;
pDevice->byBBPreEDIndex = 11;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x01); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x70); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -58\n");
} else if(pDevice->byBBPreEDRSSI <= 59) { //RSSI -59
if(pDevice->byBBPreEDIndex == 10) break;
pDevice->byBBPreEDIndex = 10;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x01); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x30); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -59\n");
} else if(pDevice->byBBPreEDRSSI <= 60) { //RSSI -60
if(pDevice->byBBPreEDIndex == 9) break;
pDevice->byBBPreEDIndex = 9;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0xEA); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -60\n");
} else if(pDevice->byBBPreEDRSSI <= 61) { //RSSI -61
if(pDevice->byBBPreEDIndex == 8) break;
pDevice->byBBPreEDIndex = 8;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0xC0); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -61\n");
} else if(pDevice->byBBPreEDRSSI <= 62) { //RSSI -62
if(pDevice->byBBPreEDIndex == 7) break;
pDevice->byBBPreEDIndex = 7;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x9C); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -62\n");
} else if(pDevice->byBBPreEDRSSI <= 63) { //RSSI -63
if(pDevice->byBBPreEDIndex == 6) break;
pDevice->byBBPreEDIndex = 6;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x80); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -63\n");
} else if(pDevice->byBBPreEDRSSI <= 64) { //RSSI -64
if(pDevice->byBBPreEDIndex == 5) break;
pDevice->byBBPreEDIndex = 5;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x68); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -64\n");
} else if(pDevice->byBBPreEDRSSI <= 65) { //RSSI -65
if(pDevice->byBBPreEDIndex == 4) break;
pDevice->byBBPreEDIndex = 4;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x52); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -65\n");
} else if(pDevice->byBBPreEDRSSI <= 66) { //RSSI -66
if(pDevice->byBBPreEDIndex == 3) break;
pDevice->byBBPreEDIndex = 3;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x43); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -66\n");
} else if(pDevice->byBBPreEDRSSI <= 67) { //RSSI -67
if(pDevice->byBBPreEDIndex == 2) break;
pDevice->byBBPreEDIndex = 2;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x36); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -67\n");
} else if(pDevice->byBBPreEDRSSI <= 68) { //RSSI -68
if(pDevice->byBBPreEDIndex == 1) break;
pDevice->byBBPreEDIndex = 1;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x2D); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -68\n");
} else { //RSSI -69, -70, ...
if(pDevice->byBBPreEDIndex == 0) break;
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x24); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -69, -70,..\n");
}
break;
case RF_VT3342A0: //RobertYu:20060627, testing table
if( bScanning )
{ // need Max sensitivity //RSSI -67, -68, ...
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x38); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -67, -68,..\n");
break;
}
if(pDevice->byBBPreEDRSSI <= 41) { // RSSI 0, -1,-2,....-41
if(pDevice->byBBPreEDIndex == 20) break;
pDevice->byBBPreEDIndex = 20;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0xFF); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI 0, -1,-2,..-41\n");
} else if(pDevice->byBBPreEDRSSI <= 42) { //RSSI -42
if(pDevice->byBBPreEDIndex == 19) break;
pDevice->byBBPreEDIndex = 19;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x36); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -42\n");
} else if(pDevice->byBBPreEDRSSI <= 43) { //RSSI -43
if(pDevice->byBBPreEDIndex == 18) break;
pDevice->byBBPreEDIndex = 18;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x26); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -43\n");
} else if(pDevice->byBBPreEDRSSI <= 45) { //RSSI -44, -45
if(pDevice->byBBPreEDIndex == 17) break;
pDevice->byBBPreEDIndex = 17;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x18); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -44,-45\n");
} else if(pDevice->byBBPreEDRSSI <= 47) { //RSSI -46, -47
if(pDevice->byBBPreEDIndex == 16) break;
pDevice->byBBPreEDIndex = 16;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x11); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -46,-47\n");
} else if(pDevice->byBBPreEDRSSI <= 49) { //RSSI -48, -49
if(pDevice->byBBPreEDIndex == 15) break;
pDevice->byBBPreEDIndex = 15;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x0a); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -48,-49\n");
} else if(pDevice->byBBPreEDRSSI <= 51) { //RSSI -50, -51
if(pDevice->byBBPreEDIndex == 14) break;
pDevice->byBBPreEDIndex = 14;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x07); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -50,-51\n");
} else if(pDevice->byBBPreEDRSSI <= 53) { //RSSI -52, -53
if(pDevice->byBBPreEDIndex == 13) break;
pDevice->byBBPreEDIndex = 13;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x04); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x00); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -52,-53\n");
} else if(pDevice->byBBPreEDRSSI <= 55) { //RSSI -54, -55
if(pDevice->byBBPreEDIndex == 12) break;
pDevice->byBBPreEDIndex = 12;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x02); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0xC0); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -54,-55\n");
} else if(pDevice->byBBPreEDRSSI <= 56) { //RSSI -56
if(pDevice->byBBPreEDIndex == 11) break;
pDevice->byBBPreEDIndex = 11;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x02); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x30); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -56\n");
} else if(pDevice->byBBPreEDRSSI <= 57) { //RSSI -57
if(pDevice->byBBPreEDIndex == 10) break;
pDevice->byBBPreEDIndex = 10;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x01); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0xB0); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -57\n");
} else if(pDevice->byBBPreEDRSSI <= 58) { //RSSI -58
if(pDevice->byBBPreEDIndex == 9) break;
pDevice->byBBPreEDIndex = 9;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x01); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x70); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -58\n");
} else if(pDevice->byBBPreEDRSSI <= 59) { //RSSI -59
if(pDevice->byBBPreEDIndex == 8) break;
pDevice->byBBPreEDIndex = 8;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x01); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x30); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -59\n");
} else if(pDevice->byBBPreEDRSSI <= 60) { //RSSI -60
if(pDevice->byBBPreEDIndex == 7) break;
pDevice->byBBPreEDIndex = 7;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0xEA); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -60\n");
} else if(pDevice->byBBPreEDRSSI <= 61) { //RSSI -61
if(pDevice->byBBPreEDIndex == 6) break;
pDevice->byBBPreEDIndex = 6;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0xC0); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -61\n");
} else if(pDevice->byBBPreEDRSSI <= 62) { //RSSI -62
if(pDevice->byBBPreEDIndex == 5) break;
pDevice->byBBPreEDIndex = 5;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x9C); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -62\n");
} else if(pDevice->byBBPreEDRSSI <= 63) { //RSSI -63
if(pDevice->byBBPreEDIndex == 4) break;
pDevice->byBBPreEDIndex = 4;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x80); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -63\n");
} else if(pDevice->byBBPreEDRSSI <= 64) { //RSSI -64
if(pDevice->byBBPreEDIndex == 3) break;
pDevice->byBBPreEDIndex = 3;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x68); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -64\n");
} else if(pDevice->byBBPreEDRSSI <= 65) { //RSSI -65
if(pDevice->byBBPreEDIndex == 2) break;
pDevice->byBBPreEDIndex = 2;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x52); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -65\n");
} else if(pDevice->byBBPreEDRSSI <= 66) { //RSSI -66
if(pDevice->byBBPreEDIndex == 1) break;
pDevice->byBBPreEDIndex = 1;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x43); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -66\n");
} else { //RSSI -67, -68, ...
if(pDevice->byBBPreEDIndex == 0) break;
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x38); //CR206(0xCE)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->byBBPreEDRSSI -67, -68,..\n");
}
break;
}
}
| gpl-2.0 |
TeamRegular/android_kernel_lge_c50 | drivers/clocksource/nomadik-mtu.c | 1159 | 6939 | /*
* Copyright (C) 2008 STMicroelectronics
* Copyright (C) 2010 Alessandro Rubini
* Copyright (C) 2010 Linus Walleij for ST-Ericsson
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2, as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/clk.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/platform_data/clocksource-nomadik-mtu.h>
#include <linux/sched_clock.h>
#include <asm/mach/time.h>
/*
* The MTU device hosts four different counters, with 4 set of
* registers. These are register names.
*/
#define MTU_IMSC 0x00 /* Interrupt mask set/clear */
#define MTU_RIS 0x04 /* Raw interrupt status */
#define MTU_MIS 0x08 /* Masked interrupt status */
#define MTU_ICR 0x0C /* Interrupt clear register */
/* per-timer registers take 0..3 as argument */
#define MTU_LR(x) (0x10 + 0x10 * (x) + 0x00) /* Load value */
#define MTU_VAL(x) (0x10 + 0x10 * (x) + 0x04) /* Current value */
#define MTU_CR(x) (0x10 + 0x10 * (x) + 0x08) /* Control reg */
#define MTU_BGLR(x) (0x10 + 0x10 * (x) + 0x0c) /* At next overflow */
/* bits for the control register */
#define MTU_CRn_ENA 0x80
#define MTU_CRn_PERIODIC 0x40 /* if 0 = free-running */
#define MTU_CRn_PRESCALE_MASK 0x0c
#define MTU_CRn_PRESCALE_1 0x00
#define MTU_CRn_PRESCALE_16 0x04
#define MTU_CRn_PRESCALE_256 0x08
#define MTU_CRn_32BITS 0x02
#define MTU_CRn_ONESHOT 0x01 /* if 0 = wraps reloading from BGLR*/
/* Other registers are usual amba/primecell registers, currently not used */
#define MTU_ITCR 0xff0
#define MTU_ITOP 0xff4
#define MTU_PERIPH_ID0 0xfe0
#define MTU_PERIPH_ID1 0xfe4
#define MTU_PERIPH_ID2 0xfe8
#define MTU_PERIPH_ID3 0xfeC
#define MTU_PCELL0 0xff0
#define MTU_PCELL1 0xff4
#define MTU_PCELL2 0xff8
#define MTU_PCELL3 0xffC
static void __iomem *mtu_base;
static bool clkevt_periodic;
static u32 clk_prescale;
static u32 nmdk_cycle; /* write-once */
static struct delay_timer mtu_delay_timer;
#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
/*
* Override the global weak sched_clock symbol with this
* local implementation which uses the clocksource to get some
* better resolution when scheduling the kernel.
*/
static u32 notrace nomadik_read_sched_clock(void)
{
if (unlikely(!mtu_base))
return 0;
return -readl(mtu_base + MTU_VAL(0));
}
#endif
static unsigned long nmdk_timer_read_current_timer(void)
{
return ~readl_relaxed(mtu_base + MTU_VAL(0));
}
/* Clockevent device: use one-shot mode */
static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)
{
writel(1 << 1, mtu_base + MTU_IMSC);
writel(evt, mtu_base + MTU_LR(1));
/* Load highest value, enable device, enable interrupts */
writel(MTU_CRn_ONESHOT | clk_prescale |
MTU_CRn_32BITS | MTU_CRn_ENA,
mtu_base + MTU_CR(1));
return 0;
}
void nmdk_clkevt_reset(void)
{
if (clkevt_periodic) {
/* Timer: configure load and background-load, and fire it up */
writel(nmdk_cycle, mtu_base + MTU_LR(1));
writel(nmdk_cycle, mtu_base + MTU_BGLR(1));
writel(MTU_CRn_PERIODIC | clk_prescale |
MTU_CRn_32BITS | MTU_CRn_ENA,
mtu_base + MTU_CR(1));
writel(1 << 1, mtu_base + MTU_IMSC);
} else {
/* Generate an interrupt to start the clockevent again */
(void) nmdk_clkevt_next(nmdk_cycle, NULL);
}
}
static void nmdk_clkevt_mode(enum clock_event_mode mode,
struct clock_event_device *dev)
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
clkevt_periodic = true;
nmdk_clkevt_reset();
break;
case CLOCK_EVT_MODE_ONESHOT:
clkevt_periodic = false;
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
writel(0, mtu_base + MTU_IMSC);
/* disable timer */
writel(0, mtu_base + MTU_CR(1));
/* load some high default value */
writel(0xffffffff, mtu_base + MTU_LR(1));
break;
case CLOCK_EVT_MODE_RESUME:
break;
}
}
void nmdk_clksrc_reset(void)
{
/* Disable */
writel(0, mtu_base + MTU_CR(0));
/* ClockSource: configure load and background-load, and fire it up */
writel(nmdk_cycle, mtu_base + MTU_LR(0));
writel(nmdk_cycle, mtu_base + MTU_BGLR(0));
writel(clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA,
mtu_base + MTU_CR(0));
}
static void nmdk_clkevt_resume(struct clock_event_device *cedev)
{
nmdk_clkevt_reset();
nmdk_clksrc_reset();
}
static struct clock_event_device nmdk_clkevt = {
.name = "mtu_1",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.rating = 200,
.set_mode = nmdk_clkevt_mode,
.set_next_event = nmdk_clkevt_next,
.resume = nmdk_clkevt_resume,
};
/*
* IRQ Handler for timer 1 of the MTU block.
*/
static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = dev_id;
writel(1 << 1, mtu_base + MTU_ICR); /* Interrupt clear reg */
evdev->event_handler(evdev);
return IRQ_HANDLED;
}
static struct irqaction nmdk_timer_irq = {
.name = "Nomadik Timer Tick",
.flags = IRQF_DISABLED | IRQF_TIMER,
.handler = nmdk_timer_interrupt,
.dev_id = &nmdk_clkevt,
};
void __init nmdk_timer_init(void __iomem *base, int irq)
{
unsigned long rate;
struct clk *clk0, *pclk0;
mtu_base = base;
pclk0 = clk_get_sys("mtu0", "apb_pclk");
BUG_ON(IS_ERR(pclk0));
BUG_ON(clk_prepare(pclk0) < 0);
BUG_ON(clk_enable(pclk0) < 0);
clk0 = clk_get_sys("mtu0", NULL);
BUG_ON(IS_ERR(clk0));
BUG_ON(clk_prepare(clk0) < 0);
BUG_ON(clk_enable(clk0) < 0);
/*
* Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
* for ux500.
* Use a divide-by-16 counter if the tick rate is more than 32MHz.
* At 32 MHz, the timer (with 32 bit counter) can be programmed
* to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
* with 16 gives too low timer resolution.
*/
rate = clk_get_rate(clk0);
if (rate > 32000000) {
rate /= 16;
clk_prescale = MTU_CRn_PRESCALE_16;
} else {
clk_prescale = MTU_CRn_PRESCALE_1;
}
/* Cycles for periodic mode */
nmdk_cycle = DIV_ROUND_CLOSEST(rate, HZ);
/* Timer 0 is the free running clocksource */
nmdk_clksrc_reset();
if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
rate, 200, 32, clocksource_mmio_readl_down))
pr_err("timer: failed to initialize clock source %s\n",
"mtu_0");
#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
setup_sched_clock(nomadik_read_sched_clock, 32, rate);
#endif
/* Timer 1 is used for events, register irq and clockevents */
setup_irq(irq, &nmdk_timer_irq);
nmdk_clkevt.cpumask = cpumask_of(0);
nmdk_clkevt.irq = irq;
clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU);
mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer;
mtu_delay_timer.freq = rate;
register_current_timer_delay(&mtu_delay_timer);
}
| gpl-2.0 |
xcaliburinhand/I9000-Reoriented-for-I897-Ginger | net/rds/transport.c | 1415 | 3475 | /*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/in.h>
#include "rds.h"
#include "loop.h"
static struct rds_transport *transports[RDS_TRANS_COUNT];
static DECLARE_RWSEM(rds_trans_sem);
int rds_trans_register(struct rds_transport *trans)
{
BUG_ON(strlen(trans->t_name) + 1 > TRANSNAMSIZ);
down_write(&rds_trans_sem);
if (transports[trans->t_type])
printk(KERN_ERR "RDS Transport type %d already registered\n",
trans->t_type);
else {
transports[trans->t_type] = trans;
printk(KERN_INFO "Registered RDS/%s transport\n", trans->t_name);
}
up_write(&rds_trans_sem);
return 0;
}
EXPORT_SYMBOL_GPL(rds_trans_register);
void rds_trans_unregister(struct rds_transport *trans)
{
down_write(&rds_trans_sem);
transports[trans->t_type] = NULL;
printk(KERN_INFO "Unregistered RDS/%s transport\n", trans->t_name);
up_write(&rds_trans_sem);
}
EXPORT_SYMBOL_GPL(rds_trans_unregister);
struct rds_transport *rds_trans_get_preferred(__be32 addr)
{
struct rds_transport *ret = NULL;
int i;
if (IN_LOOPBACK(ntohl(addr)))
return &rds_loop_transport;
down_read(&rds_trans_sem);
for (i = 0; i < RDS_TRANS_COUNT; i++)
{
if (transports[i] && (transports[i]->laddr_check(addr) == 0)) {
ret = transports[i];
break;
}
}
up_read(&rds_trans_sem);
return ret;
}
/*
* This returns the number of stats entries in the snapshot and only
* copies them using the iter if there is enough space for them. The
* caller passes in the global stats so that we can size and copy while
* holding the lock.
*/
unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail)
{
struct rds_transport *trans;
unsigned int total = 0;
unsigned int part;
int i;
rds_info_iter_unmap(iter);
down_read(&rds_trans_sem);
for (i = 0; i < RDS_TRANS_COUNT; i++)
{
trans = transports[i];
if (!trans || !trans->stats_info_copy)
continue;
part = trans->stats_info_copy(iter, avail);
avail -= min(avail, part);
total += part;
}
up_read(&rds_trans_sem);
return total;
}
| gpl-2.0 |
Dm47021/Android_kernel_f6mt_aosp_jb-rebase | drivers/net/wireless/bcm4329/dhd_linux_sched.c | 4487 | 1593 | /*
* Expose some of the kernel scheduler routines
*
* Copyright (C) 1999-2010, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: dhd_linux_sched.c,v 1.1.34.1.6.1 2009/01/16 01:17:40 Exp $
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linuxver.h>
int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
{
int rc = 0;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
rc = sched_setscheduler(p, policy, param);
#endif /* LinuxVer */
return rc;
}
| gpl-2.0 |
badscimmia/Ronflex-Kernel | fs/reiserfs/prints.c | 7303 | 21190 | /*
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/
#include <linux/time.h>
#include <linux/fs.h>
#include "reiserfs.h"
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <stdarg.h>
static char error_buf[1024];
static char fmt_buf[1024];
static char off_buf[80];
static char *reiserfs_cpu_offset(struct cpu_key *key)
{
if (cpu_key_k_type(key) == TYPE_DIRENTRY)
sprintf(off_buf, "%Lu(%Lu)",
(unsigned long long)
GET_HASH_VALUE(cpu_key_k_offset(key)),
(unsigned long long)
GET_GENERATION_NUMBER(cpu_key_k_offset(key)));
else
sprintf(off_buf, "0x%Lx",
(unsigned long long)cpu_key_k_offset(key));
return off_buf;
}
static char *le_offset(struct reiserfs_key *key)
{
int version;
version = le_key_version(key);
if (le_key_k_type(version, key) == TYPE_DIRENTRY)
sprintf(off_buf, "%Lu(%Lu)",
(unsigned long long)
GET_HASH_VALUE(le_key_k_offset(version, key)),
(unsigned long long)
GET_GENERATION_NUMBER(le_key_k_offset(version, key)));
else
sprintf(off_buf, "0x%Lx",
(unsigned long long)le_key_k_offset(version, key));
return off_buf;
}
static char *cpu_type(struct cpu_key *key)
{
if (cpu_key_k_type(key) == TYPE_STAT_DATA)
return "SD";
if (cpu_key_k_type(key) == TYPE_DIRENTRY)
return "DIR";
if (cpu_key_k_type(key) == TYPE_DIRECT)
return "DIRECT";
if (cpu_key_k_type(key) == TYPE_INDIRECT)
return "IND";
return "UNKNOWN";
}
static char *le_type(struct reiserfs_key *key)
{
int version;
version = le_key_version(key);
if (le_key_k_type(version, key) == TYPE_STAT_DATA)
return "SD";
if (le_key_k_type(version, key) == TYPE_DIRENTRY)
return "DIR";
if (le_key_k_type(version, key) == TYPE_DIRECT)
return "DIRECT";
if (le_key_k_type(version, key) == TYPE_INDIRECT)
return "IND";
return "UNKNOWN";
}
/* %k */
static void sprintf_le_key(char *buf, struct reiserfs_key *key)
{
if (key)
sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id),
le32_to_cpu(key->k_objectid), le_offset(key),
le_type(key));
else
sprintf(buf, "[NULL]");
}
/* %K */
static void sprintf_cpu_key(char *buf, struct cpu_key *key)
{
if (key)
sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
cpu_type(key));
else
sprintf(buf, "[NULL]");
}
static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh)
{
if (deh)
sprintf(buf,
"[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
deh_offset(deh), deh_dir_id(deh), deh_objectid(deh),
deh_location(deh), deh_state(deh));
else
sprintf(buf, "[NULL]");
}
static void sprintf_item_head(char *buf, struct item_head *ih)
{
if (ih) {
strcpy(buf,
(ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
sprintf_le_key(buf + strlen(buf), &(ih->ih_key));
sprintf(buf + strlen(buf), ", item_len %d, item_location %d, "
"free_space(entry_count) %d",
ih_item_len(ih), ih_location(ih), ih_free_space(ih));
} else
sprintf(buf, "[NULL]");
}
static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de)
{
char name[20];
memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
}
static void sprintf_block_head(char *buf, struct buffer_head *bh)
{
sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
}
static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
{
char b[BDEVNAME_SIZE];
sprintf(buf,
"dev %s, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
bdevname(bh->b_bdev, b), bh->b_size,
(unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
bh->b_state, bh->b_page,
buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
buffer_dirty(bh) ? "DIRTY" : "CLEAN",
buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
}
static void sprintf_disk_child(char *buf, struct disk_child *dc)
{
sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc),
dc_size(dc));
}
static char *is_there_reiserfs_struct(char *fmt, int *what)
{
char *k = fmt;
while ((k = strchr(k, '%')) != NULL) {
if (k[1] == 'k' || k[1] == 'K' || k[1] == 'h' || k[1] == 't' ||
k[1] == 'z' || k[1] == 'b' || k[1] == 'y' || k[1] == 'a') {
*what = k[1];
break;
}
k++;
}
return k;
}
/* debugging reiserfs we used to print out a lot of different
variables, like keys, item headers, buffer heads etc. Values of
most fields matter. So it took a long time just to write
appropriative printk. With this reiserfs_warning you can use format
specification for complex structures like you used to do with
printfs for integers, doubles and pointers. For instance, to print
out key structure you have to write just:
reiserfs_warning ("bad key %k", key);
instead of
printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
key->k_offset, key->k_uniqueness);
*/
static DEFINE_SPINLOCK(error_lock);
static void prepare_error_buf(const char *fmt, va_list args)
{
char *fmt1 = fmt_buf;
char *k;
char *p = error_buf;
int what;
spin_lock(&error_lock);
strcpy(fmt1, fmt);
while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) {
*k = 0;
p += vsprintf(p, fmt1, args);
switch (what) {
case 'k':
sprintf_le_key(p, va_arg(args, struct reiserfs_key *));
break;
case 'K':
sprintf_cpu_key(p, va_arg(args, struct cpu_key *));
break;
case 'h':
sprintf_item_head(p, va_arg(args, struct item_head *));
break;
case 't':
sprintf_direntry(p,
va_arg(args,
struct reiserfs_dir_entry *));
break;
case 'y':
sprintf_disk_child(p,
va_arg(args, struct disk_child *));
break;
case 'z':
sprintf_block_head(p,
va_arg(args, struct buffer_head *));
break;
case 'b':
sprintf_buffer_head(p,
va_arg(args, struct buffer_head *));
break;
case 'a':
sprintf_de_head(p,
va_arg(args,
struct reiserfs_de_head *));
break;
}
p += strlen(p);
fmt1 = k + 2;
}
vsprintf(p, fmt1, args);
spin_unlock(&error_lock);
}
/* in addition to usual conversion specifiers this accepts reiserfs
specific conversion specifiers:
%k to print little endian key,
%K to print cpu key,
%h to print item_head,
%t to print directory entry
%z to print block head (arg must be struct buffer_head *
%b to print buffer_head
*/
#define do_reiserfs_warning(fmt)\
{\
va_list args;\
va_start( args, fmt );\
prepare_error_buf( fmt, args );\
va_end( args );\
}
void __reiserfs_warning(struct super_block *sb, const char *id,
const char *function, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
if (sb)
printk(KERN_WARNING "REISERFS warning (device %s): %s%s%s: "
"%s\n", sb->s_id, id ? id : "", id ? " " : "",
function, error_buf);
else
printk(KERN_WARNING "REISERFS warning: %s%s%s: %s\n",
id ? id : "", id ? " " : "", function, error_buf);
}
/* No newline.. reiserfs_info calls can be followed by printk's */
void reiserfs_info(struct super_block *sb, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
if (sb)
printk(KERN_NOTICE "REISERFS (device %s): %s",
sb->s_id, error_buf);
else
printk(KERN_NOTICE "REISERFS %s:", error_buf);
}
/* No newline.. reiserfs_printk calls can be followed by printk's */
static void reiserfs_printk(const char *fmt, ...)
{
do_reiserfs_warning(fmt);
printk(error_buf);
}
void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...)
{
#ifdef CONFIG_REISERFS_CHECK
do_reiserfs_warning(fmt);
if (s)
printk(KERN_DEBUG "REISERFS debug (device %s): %s\n",
s->s_id, error_buf);
else
printk(KERN_DEBUG "REISERFS debug: %s\n", error_buf);
#endif
}
/* The format:
maintainer-errorid: [function-name:] message
where errorid is unique to the maintainer and function-name is
optional, is recommended, so that anyone can easily find the bug
with a simple grep for the short to type string
maintainer-errorid. Don't bother with reusing errorids, there are
lots of numbers out there.
Example:
reiserfs_panic(
p_sb, "reiser-29: reiserfs_new_blocknrs: "
"one of search_start or rn(%d) is equal to MAX_B_NUM,"
"which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
rn, bh
);
Regular panic()s sometimes clear the screen before the message can
be read, thus the need for the while loop.
Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it
pointless complexity):
panics in reiserfs.h have numbers from 1000 to 1999
super.c 2000 to 2999
preserve.c (unused) 3000 to 3999
bitmap.c 4000 to 4999
stree.c 5000 to 5999
prints.c 6000 to 6999
namei.c 7000 to 7999
fix_nodes.c 8000 to 8999
dir.c 9000 to 9999
lbalance.c 10000 to 10999
ibalance.c 11000 to 11999 not ready
do_balan.c 12000 to 12999
inode.c 13000 to 13999
file.c 14000 to 14999
objectid.c 15000 - 15999
buffer.c 16000 - 16999
symlink.c 17000 - 17999
. */
void __reiserfs_panic(struct super_block *sb, const char *id,
const char *function, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
#ifdef CONFIG_REISERFS_CHECK
dump_stack();
#endif
if (sb)
panic(KERN_WARNING "REISERFS panic (device %s): %s%s%s: %s\n",
sb->s_id, id ? id : "", id ? " " : "",
function, error_buf);
else
panic(KERN_WARNING "REISERFS panic: %s%s%s: %s\n",
id ? id : "", id ? " " : "", function, error_buf);
}
void __reiserfs_error(struct super_block *sb, const char *id,
const char *function, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
BUG_ON(sb == NULL);
if (reiserfs_error_panic(sb))
__reiserfs_panic(sb, id, function, error_buf);
if (id && id[0])
printk(KERN_CRIT "REISERFS error (device %s): %s %s: %s\n",
sb->s_id, id, function, error_buf);
else
printk(KERN_CRIT "REISERFS error (device %s): %s: %s\n",
sb->s_id, function, error_buf);
if (sb->s_flags & MS_RDONLY)
return;
reiserfs_info(sb, "Remounting filesystem read-only\n");
sb->s_flags |= MS_RDONLY;
reiserfs_abort_journal(sb, -EIO);
}
void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
if (reiserfs_error_panic(sb)) {
panic(KERN_CRIT "REISERFS panic (device %s): %s\n", sb->s_id,
error_buf);
}
if (reiserfs_is_journal_aborted(SB_JOURNAL(sb)))
return;
printk(KERN_CRIT "REISERFS abort (device %s): %s\n", sb->s_id,
error_buf);
sb->s_flags |= MS_RDONLY;
reiserfs_abort_journal(sb, errno);
}
/* this prints internal nodes (4 keys/items in line) (dc_number,
dc_size)[k_dirid, k_objectid, k_offset, k_uniqueness](dc_number,
dc_size)...*/
static int print_internal(struct buffer_head *bh, int first, int last)
{
struct reiserfs_key *key;
struct disk_child *dc;
int i;
int from, to;
if (!B_IS_KEYS_LEVEL(bh))
return 1;
check_internal(bh);
if (first == -1) {
from = 0;
to = B_NR_ITEMS(bh);
} else {
from = first;
to = last < B_NR_ITEMS(bh) ? last : B_NR_ITEMS(bh);
}
reiserfs_printk("INTERNAL NODE (%ld) contains %z\n", bh->b_blocknr, bh);
dc = B_N_CHILD(bh, from);
reiserfs_printk("PTR %d: %y ", from, dc);
for (i = from, key = B_N_PDELIM_KEY(bh, from), dc++; i < to;
i++, key++, dc++) {
reiserfs_printk("KEY %d: %k PTR %d: %y ", i, key, i + 1, dc);
if (i && i % 4 == 0)
printk("\n");
}
printk("\n");
return 0;
}
static int print_leaf(struct buffer_head *bh, int print_mode, int first,
int last)
{
struct block_head *blkh;
struct item_head *ih;
int i, nr;
int from, to;
if (!B_IS_ITEMS_LEVEL(bh))
return 1;
check_leaf(bh);
blkh = B_BLK_HEAD(bh);
ih = B_N_PITEM_HEAD(bh, 0);
nr = blkh_nr_item(blkh);
printk
("\n===================================================================\n");
reiserfs_printk("LEAF NODE (%ld) contains %z\n", bh->b_blocknr, bh);
if (!(print_mode & PRINT_LEAF_ITEMS)) {
reiserfs_printk("FIRST ITEM_KEY: %k, LAST ITEM KEY: %k\n",
&(ih->ih_key), &((ih + nr - 1)->ih_key));
return 0;
}
if (first < 0 || first > nr - 1)
from = 0;
else
from = first;
if (last < 0 || last > nr)
to = nr;
else
to = last;
ih += from;
printk
("-------------------------------------------------------------------------------\n");
printk
("|##| type | key | ilen | free_space | version | loc |\n");
for (i = from; i < to; i++, ih++) {
printk
("-------------------------------------------------------------------------------\n");
reiserfs_printk("|%2d| %h |\n", i, ih);
if (print_mode & PRINT_LEAF_ITEMS)
op_print_item(ih, B_I_PITEM(bh, ih));
}
printk
("===================================================================\n");
return 0;
}
char *reiserfs_hashname(int code)
{
if (code == YURA_HASH)
return "rupasov";
if (code == TEA_HASH)
return "tea";
if (code == R5_HASH)
return "r5";
return "unknown";
}
/* return 1 if this is not super block */
static int print_super_block(struct buffer_head *bh)
{
struct reiserfs_super_block *rs =
(struct reiserfs_super_block *)(bh->b_data);
int skipped, data_blocks;
char *version;
char b[BDEVNAME_SIZE];
if (is_reiserfs_3_5(rs)) {
version = "3.5";
} else if (is_reiserfs_3_6(rs)) {
version = "3.6";
} else if (is_reiserfs_jr(rs)) {
version = ((sb_version(rs) == REISERFS_VERSION_2) ?
"3.6" : "3.5");
} else {
return 1;
}
printk("%s\'s super block is in block %llu\n", bdevname(bh->b_bdev, b),
(unsigned long long)bh->b_blocknr);
printk("Reiserfs version %s\n", version);
printk("Block count %u\n", sb_block_count(rs));
printk("Blocksize %d\n", sb_blocksize(rs));
printk("Free blocks %u\n", sb_free_blocks(rs));
// FIXME: this would be confusing if
// someone stores reiserfs super block in some data block ;)
// skipped = (bh->b_blocknr * bh->b_size) / sb_blocksize(rs);
skipped = bh->b_blocknr;
data_blocks = sb_block_count(rs) - skipped - 1 - sb_bmap_nr(rs) -
(!is_reiserfs_jr(rs) ? sb_jp_journal_size(rs) +
1 : sb_reserved_for_journal(rs)) - sb_free_blocks(rs);
printk
("Busy blocks (skipped %d, bitmaps - %d, journal (or reserved) blocks - %d\n"
"1 super block, %d data blocks\n", skipped, sb_bmap_nr(rs),
(!is_reiserfs_jr(rs) ? (sb_jp_journal_size(rs) + 1) :
sb_reserved_for_journal(rs)), data_blocks);
printk("Root block %u\n", sb_root_block(rs));
printk("Journal block (first) %d\n", sb_jp_journal_1st_block(rs));
printk("Journal dev %d\n", sb_jp_journal_dev(rs));
printk("Journal orig size %d\n", sb_jp_journal_size(rs));
printk("FS state %d\n", sb_fs_state(rs));
printk("Hash function \"%s\"\n",
reiserfs_hashname(sb_hash_function_code(rs)));
printk("Tree height %d\n", sb_tree_height(rs));
return 0;
}
static int print_desc_block(struct buffer_head *bh)
{
struct reiserfs_journal_desc *desc;
if (memcmp(get_journal_desc_magic(bh), JOURNAL_DESC_MAGIC, 8))
return 1;
desc = (struct reiserfs_journal_desc *)(bh->b_data);
printk("Desc block %llu (j_trans_id %d, j_mount_id %d, j_len %d)",
(unsigned long long)bh->b_blocknr, get_desc_trans_id(desc),
get_desc_mount_id(desc), get_desc_trans_len(desc));
return 0;
}
void print_block(struct buffer_head *bh, ...) //int print_mode, int first, int last)
{
va_list args;
int mode, first, last;
if (!bh) {
printk("print_block: buffer is NULL\n");
return;
}
va_start(args, bh);
mode = va_arg(args, int);
first = va_arg(args, int);
last = va_arg(args, int);
if (print_leaf(bh, mode, first, last))
if (print_internal(bh, first, last))
if (print_super_block(bh))
if (print_desc_block(bh))
printk
("Block %llu contains unformatted data\n",
(unsigned long long)bh->b_blocknr);
va_end(args);
}
static char print_tb_buf[2048];
/* this stores initial state of tree balance in the print_tb_buf */
void store_print_tb(struct tree_balance *tb)
{
int h = 0;
int i;
struct buffer_head *tbSh, *tbFh;
if (!tb)
return;
sprintf(print_tb_buf, "\n"
"BALANCING %d\n"
"MODE=%c, ITEM_POS=%d POS_IN_ITEM=%d\n"
"=====================================================================\n"
"* h * S * L * R * F * FL * FR * CFL * CFR *\n",
REISERFS_SB(tb->tb_sb)->s_do_balance,
tb->tb_mode, PATH_LAST_POSITION(tb->tb_path),
tb->tb_path->pos_in_item);
for (h = 0; h < ARRAY_SIZE(tb->insert_size); h++) {
if (PATH_H_PATH_OFFSET(tb->tb_path, h) <=
tb->tb_path->path_length
&& PATH_H_PATH_OFFSET(tb->tb_path,
h) > ILLEGAL_PATH_ELEMENT_OFFSET) {
tbSh = PATH_H_PBUFFER(tb->tb_path, h);
tbFh = PATH_H_PPARENT(tb->tb_path, h);
} else {
tbSh = NULL;
tbFh = NULL;
}
sprintf(print_tb_buf + strlen(print_tb_buf),
"* %d * %3lld(%2d) * %3lld(%2d) * %3lld(%2d) * %5lld * %5lld * %5lld * %5lld * %5lld *\n",
h,
(tbSh) ? (long long)(tbSh->b_blocknr) : (-1LL),
(tbSh) ? atomic_read(&(tbSh->b_count)) : -1,
(tb->L[h]) ? (long long)(tb->L[h]->b_blocknr) : (-1LL),
(tb->L[h]) ? atomic_read(&(tb->L[h]->b_count)) : -1,
(tb->R[h]) ? (long long)(tb->R[h]->b_blocknr) : (-1LL),
(tb->R[h]) ? atomic_read(&(tb->R[h]->b_count)) : -1,
(tbFh) ? (long long)(tbFh->b_blocknr) : (-1LL),
(tb->FL[h]) ? (long long)(tb->FL[h]->
b_blocknr) : (-1LL),
(tb->FR[h]) ? (long long)(tb->FR[h]->
b_blocknr) : (-1LL),
(tb->CFL[h]) ? (long long)(tb->CFL[h]->
b_blocknr) : (-1LL),
(tb->CFR[h]) ? (long long)(tb->CFR[h]->
b_blocknr) : (-1LL));
}
sprintf(print_tb_buf + strlen(print_tb_buf),
"=====================================================================\n"
"* h * size * ln * lb * rn * rb * blkn * s0 * s1 * s1b * s2 * s2b * curb * lk * rk *\n"
"* 0 * %4d * %2d * %2d * %2d * %2d * %4d * %2d * %2d * %3d * %2d * %3d * %4d * %2d * %2d *\n",
tb->insert_size[0], tb->lnum[0], tb->lbytes, tb->rnum[0],
tb->rbytes, tb->blknum[0], tb->s0num, tb->s1num, tb->s1bytes,
tb->s2num, tb->s2bytes, tb->cur_blknum, tb->lkey[0],
tb->rkey[0]);
/* this prints balance parameters for non-leaf levels */
h = 0;
do {
h++;
sprintf(print_tb_buf + strlen(print_tb_buf),
"* %d * %4d * %2d * * %2d * * %2d *\n",
h, tb->insert_size[h], tb->lnum[h], tb->rnum[h],
tb->blknum[h]);
} while (tb->insert_size[h]);
sprintf(print_tb_buf + strlen(print_tb_buf),
"=====================================================================\n"
"FEB list: ");
/* print FEB list (list of buffers in form (bh (b_blocknr, b_count), that will be used for new nodes) */
h = 0;
for (i = 0; i < ARRAY_SIZE(tb->FEB); i++)
sprintf(print_tb_buf + strlen(print_tb_buf),
"%p (%llu %d)%s", tb->FEB[i],
tb->FEB[i] ? (unsigned long long)tb->FEB[i]->
b_blocknr : 0ULL,
tb->FEB[i] ? atomic_read(&(tb->FEB[i]->b_count)) : 0,
(i == ARRAY_SIZE(tb->FEB) - 1) ? "\n" : ", ");
sprintf(print_tb_buf + strlen(print_tb_buf),
"======================== the end ====================================\n");
}
void print_cur_tb(char *mes)
{
printk("%s\n%s", mes, print_tb_buf);
}
static void check_leaf_block_head(struct buffer_head *bh)
{
struct block_head *blkh;
int nr;
blkh = B_BLK_HEAD(bh);
nr = blkh_nr_item(blkh);
if (nr > (bh->b_size - BLKH_SIZE) / IH_SIZE)
reiserfs_panic(NULL, "vs-6010", "invalid item number %z",
bh);
if (blkh_free_space(blkh) > bh->b_size - BLKH_SIZE - IH_SIZE * nr)
reiserfs_panic(NULL, "vs-6020", "invalid free space %z",
bh);
}
static void check_internal_block_head(struct buffer_head *bh)
{
struct block_head *blkh;
blkh = B_BLK_HEAD(bh);
if (!(B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL && B_LEVEL(bh) <= MAX_HEIGHT))
reiserfs_panic(NULL, "vs-6025", "invalid level %z", bh);
if (B_NR_ITEMS(bh) > (bh->b_size - BLKH_SIZE) / IH_SIZE)
reiserfs_panic(NULL, "vs-6030", "invalid item number %z", bh);
if (B_FREE_SPACE(bh) !=
bh->b_size - BLKH_SIZE - KEY_SIZE * B_NR_ITEMS(bh) -
DC_SIZE * (B_NR_ITEMS(bh) + 1))
reiserfs_panic(NULL, "vs-6040", "invalid free space %z", bh);
}
void check_leaf(struct buffer_head *bh)
{
int i;
struct item_head *ih;
if (!bh)
return;
check_leaf_block_head(bh);
for (i = 0, ih = B_N_PITEM_HEAD(bh, 0); i < B_NR_ITEMS(bh); i++, ih++)
op_check_item(ih, B_I_PITEM(bh, ih));
}
void check_internal(struct buffer_head *bh)
{
if (!bh)
return;
check_internal_block_head(bh);
}
void print_statistics(struct super_block *s)
{
/*
printk ("reiserfs_put_super: session statistics: balances %d, fix_nodes %d, \
bmap with search %d, without %d, dir2ind %d, ind2dir %d\n",
REISERFS_SB(s)->s_do_balance, REISERFS_SB(s)->s_fix_nodes,
REISERFS_SB(s)->s_bmaps, REISERFS_SB(s)->s_bmaps_without_search,
REISERFS_SB(s)->s_direct2indirect, REISERFS_SB(s)->s_indirect2direct);
*/
}
| gpl-2.0 |
savoca/kernel-msm | fs/reiserfs/prints.c | 7303 | 21190 | /*
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/
#include <linux/time.h>
#include <linux/fs.h>
#include "reiserfs.h"
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <stdarg.h>
static char error_buf[1024];
static char fmt_buf[1024];
static char off_buf[80];
static char *reiserfs_cpu_offset(struct cpu_key *key)
{
if (cpu_key_k_type(key) == TYPE_DIRENTRY)
sprintf(off_buf, "%Lu(%Lu)",
(unsigned long long)
GET_HASH_VALUE(cpu_key_k_offset(key)),
(unsigned long long)
GET_GENERATION_NUMBER(cpu_key_k_offset(key)));
else
sprintf(off_buf, "0x%Lx",
(unsigned long long)cpu_key_k_offset(key));
return off_buf;
}
static char *le_offset(struct reiserfs_key *key)
{
int version;
version = le_key_version(key);
if (le_key_k_type(version, key) == TYPE_DIRENTRY)
sprintf(off_buf, "%Lu(%Lu)",
(unsigned long long)
GET_HASH_VALUE(le_key_k_offset(version, key)),
(unsigned long long)
GET_GENERATION_NUMBER(le_key_k_offset(version, key)));
else
sprintf(off_buf, "0x%Lx",
(unsigned long long)le_key_k_offset(version, key));
return off_buf;
}
static char *cpu_type(struct cpu_key *key)
{
if (cpu_key_k_type(key) == TYPE_STAT_DATA)
return "SD";
if (cpu_key_k_type(key) == TYPE_DIRENTRY)
return "DIR";
if (cpu_key_k_type(key) == TYPE_DIRECT)
return "DIRECT";
if (cpu_key_k_type(key) == TYPE_INDIRECT)
return "IND";
return "UNKNOWN";
}
static char *le_type(struct reiserfs_key *key)
{
int version;
version = le_key_version(key);
if (le_key_k_type(version, key) == TYPE_STAT_DATA)
return "SD";
if (le_key_k_type(version, key) == TYPE_DIRENTRY)
return "DIR";
if (le_key_k_type(version, key) == TYPE_DIRECT)
return "DIRECT";
if (le_key_k_type(version, key) == TYPE_INDIRECT)
return "IND";
return "UNKNOWN";
}
/* %k */
static void sprintf_le_key(char *buf, struct reiserfs_key *key)
{
if (key)
sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id),
le32_to_cpu(key->k_objectid), le_offset(key),
le_type(key));
else
sprintf(buf, "[NULL]");
}
/* %K */
static void sprintf_cpu_key(char *buf, struct cpu_key *key)
{
if (key)
sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
cpu_type(key));
else
sprintf(buf, "[NULL]");
}
static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh)
{
if (deh)
sprintf(buf,
"[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
deh_offset(deh), deh_dir_id(deh), deh_objectid(deh),
deh_location(deh), deh_state(deh));
else
sprintf(buf, "[NULL]");
}
static void sprintf_item_head(char *buf, struct item_head *ih)
{
if (ih) {
strcpy(buf,
(ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
sprintf_le_key(buf + strlen(buf), &(ih->ih_key));
sprintf(buf + strlen(buf), ", item_len %d, item_location %d, "
"free_space(entry_count) %d",
ih_item_len(ih), ih_location(ih), ih_free_space(ih));
} else
sprintf(buf, "[NULL]");
}
static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de)
{
char name[20];
memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
}
static void sprintf_block_head(char *buf, struct buffer_head *bh)
{
sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
}
static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
{
char b[BDEVNAME_SIZE];
sprintf(buf,
"dev %s, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
bdevname(bh->b_bdev, b), bh->b_size,
(unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
bh->b_state, bh->b_page,
buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
buffer_dirty(bh) ? "DIRTY" : "CLEAN",
buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
}
static void sprintf_disk_child(char *buf, struct disk_child *dc)
{
sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc),
dc_size(dc));
}
static char *is_there_reiserfs_struct(char *fmt, int *what)
{
char *k = fmt;
while ((k = strchr(k, '%')) != NULL) {
if (k[1] == 'k' || k[1] == 'K' || k[1] == 'h' || k[1] == 't' ||
k[1] == 'z' || k[1] == 'b' || k[1] == 'y' || k[1] == 'a') {
*what = k[1];
break;
}
k++;
}
return k;
}
/* debugging reiserfs we used to print out a lot of different
variables, like keys, item headers, buffer heads etc. Values of
most fields matter. So it took a long time just to write
appropriative printk. With this reiserfs_warning you can use format
specification for complex structures like you used to do with
printfs for integers, doubles and pointers. For instance, to print
out key structure you have to write just:
reiserfs_warning ("bad key %k", key);
instead of
printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
key->k_offset, key->k_uniqueness);
*/
static DEFINE_SPINLOCK(error_lock);
static void prepare_error_buf(const char *fmt, va_list args)
{
char *fmt1 = fmt_buf;
char *k;
char *p = error_buf;
int what;
spin_lock(&error_lock);
strcpy(fmt1, fmt);
while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) {
*k = 0;
p += vsprintf(p, fmt1, args);
switch (what) {
case 'k':
sprintf_le_key(p, va_arg(args, struct reiserfs_key *));
break;
case 'K':
sprintf_cpu_key(p, va_arg(args, struct cpu_key *));
break;
case 'h':
sprintf_item_head(p, va_arg(args, struct item_head *));
break;
case 't':
sprintf_direntry(p,
va_arg(args,
struct reiserfs_dir_entry *));
break;
case 'y':
sprintf_disk_child(p,
va_arg(args, struct disk_child *));
break;
case 'z':
sprintf_block_head(p,
va_arg(args, struct buffer_head *));
break;
case 'b':
sprintf_buffer_head(p,
va_arg(args, struct buffer_head *));
break;
case 'a':
sprintf_de_head(p,
va_arg(args,
struct reiserfs_de_head *));
break;
}
p += strlen(p);
fmt1 = k + 2;
}
vsprintf(p, fmt1, args);
spin_unlock(&error_lock);
}
/* in addition to usual conversion specifiers this accepts reiserfs
specific conversion specifiers:
%k to print little endian key,
%K to print cpu key,
%h to print item_head,
%t to print directory entry
%z to print block head (arg must be struct buffer_head *
%b to print buffer_head
*/
#define do_reiserfs_warning(fmt)\
{\
va_list args;\
va_start( args, fmt );\
prepare_error_buf( fmt, args );\
va_end( args );\
}
void __reiserfs_warning(struct super_block *sb, const char *id,
const char *function, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
if (sb)
printk(KERN_WARNING "REISERFS warning (device %s): %s%s%s: "
"%s\n", sb->s_id, id ? id : "", id ? " " : "",
function, error_buf);
else
printk(KERN_WARNING "REISERFS warning: %s%s%s: %s\n",
id ? id : "", id ? " " : "", function, error_buf);
}
/* No newline.. reiserfs_info calls can be followed by printk's */
void reiserfs_info(struct super_block *sb, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
if (sb)
printk(KERN_NOTICE "REISERFS (device %s): %s",
sb->s_id, error_buf);
else
printk(KERN_NOTICE "REISERFS %s:", error_buf);
}
/* No newline.. reiserfs_printk calls can be followed by printk's */
static void reiserfs_printk(const char *fmt, ...)
{
do_reiserfs_warning(fmt);
printk(error_buf);
}
void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...)
{
#ifdef CONFIG_REISERFS_CHECK
do_reiserfs_warning(fmt);
if (s)
printk(KERN_DEBUG "REISERFS debug (device %s): %s\n",
s->s_id, error_buf);
else
printk(KERN_DEBUG "REISERFS debug: %s\n", error_buf);
#endif
}
/* The format:
maintainer-errorid: [function-name:] message
where errorid is unique to the maintainer and function-name is
optional, is recommended, so that anyone can easily find the bug
with a simple grep for the short to type string
maintainer-errorid. Don't bother with reusing errorids, there are
lots of numbers out there.
Example:
reiserfs_panic(
p_sb, "reiser-29: reiserfs_new_blocknrs: "
"one of search_start or rn(%d) is equal to MAX_B_NUM,"
"which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
rn, bh
);
Regular panic()s sometimes clear the screen before the message can
be read, thus the need for the while loop.
Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it
pointless complexity):
panics in reiserfs.h have numbers from 1000 to 1999
super.c 2000 to 2999
preserve.c (unused) 3000 to 3999
bitmap.c 4000 to 4999
stree.c 5000 to 5999
prints.c 6000 to 6999
namei.c 7000 to 7999
fix_nodes.c 8000 to 8999
dir.c 9000 to 9999
lbalance.c 10000 to 10999
ibalance.c 11000 to 11999 not ready
do_balan.c 12000 to 12999
inode.c 13000 to 13999
file.c 14000 to 14999
objectid.c 15000 - 15999
buffer.c 16000 - 16999
symlink.c 17000 - 17999
. */
void __reiserfs_panic(struct super_block *sb, const char *id,
const char *function, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
#ifdef CONFIG_REISERFS_CHECK
dump_stack();
#endif
if (sb)
panic(KERN_WARNING "REISERFS panic (device %s): %s%s%s: %s\n",
sb->s_id, id ? id : "", id ? " " : "",
function, error_buf);
else
panic(KERN_WARNING "REISERFS panic: %s%s%s: %s\n",
id ? id : "", id ? " " : "", function, error_buf);
}
void __reiserfs_error(struct super_block *sb, const char *id,
const char *function, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
BUG_ON(sb == NULL);
if (reiserfs_error_panic(sb))
__reiserfs_panic(sb, id, function, error_buf);
if (id && id[0])
printk(KERN_CRIT "REISERFS error (device %s): %s %s: %s\n",
sb->s_id, id, function, error_buf);
else
printk(KERN_CRIT "REISERFS error (device %s): %s: %s\n",
sb->s_id, function, error_buf);
if (sb->s_flags & MS_RDONLY)
return;
reiserfs_info(sb, "Remounting filesystem read-only\n");
sb->s_flags |= MS_RDONLY;
reiserfs_abort_journal(sb, -EIO);
}
void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
if (reiserfs_error_panic(sb)) {
panic(KERN_CRIT "REISERFS panic (device %s): %s\n", sb->s_id,
error_buf);
}
if (reiserfs_is_journal_aborted(SB_JOURNAL(sb)))
return;
printk(KERN_CRIT "REISERFS abort (device %s): %s\n", sb->s_id,
error_buf);
sb->s_flags |= MS_RDONLY;
reiserfs_abort_journal(sb, errno);
}
/* this prints internal nodes (4 keys/items in line) (dc_number,
dc_size)[k_dirid, k_objectid, k_offset, k_uniqueness](dc_number,
dc_size)...*/
static int print_internal(struct buffer_head *bh, int first, int last)
{
struct reiserfs_key *key;
struct disk_child *dc;
int i;
int from, to;
if (!B_IS_KEYS_LEVEL(bh))
return 1;
check_internal(bh);
if (first == -1) {
from = 0;
to = B_NR_ITEMS(bh);
} else {
from = first;
to = last < B_NR_ITEMS(bh) ? last : B_NR_ITEMS(bh);
}
reiserfs_printk("INTERNAL NODE (%ld) contains %z\n", bh->b_blocknr, bh);
dc = B_N_CHILD(bh, from);
reiserfs_printk("PTR %d: %y ", from, dc);
for (i = from, key = B_N_PDELIM_KEY(bh, from), dc++; i < to;
i++, key++, dc++) {
reiserfs_printk("KEY %d: %k PTR %d: %y ", i, key, i + 1, dc);
if (i && i % 4 == 0)
printk("\n");
}
printk("\n");
return 0;
}
static int print_leaf(struct buffer_head *bh, int print_mode, int first,
int last)
{
struct block_head *blkh;
struct item_head *ih;
int i, nr;
int from, to;
if (!B_IS_ITEMS_LEVEL(bh))
return 1;
check_leaf(bh);
blkh = B_BLK_HEAD(bh);
ih = B_N_PITEM_HEAD(bh, 0);
nr = blkh_nr_item(blkh);
printk
("\n===================================================================\n");
reiserfs_printk("LEAF NODE (%ld) contains %z\n", bh->b_blocknr, bh);
if (!(print_mode & PRINT_LEAF_ITEMS)) {
reiserfs_printk("FIRST ITEM_KEY: %k, LAST ITEM KEY: %k\n",
&(ih->ih_key), &((ih + nr - 1)->ih_key));
return 0;
}
if (first < 0 || first > nr - 1)
from = 0;
else
from = first;
if (last < 0 || last > nr)
to = nr;
else
to = last;
ih += from;
printk
("-------------------------------------------------------------------------------\n");
printk
("|##| type | key | ilen | free_space | version | loc |\n");
for (i = from; i < to; i++, ih++) {
printk
("-------------------------------------------------------------------------------\n");
reiserfs_printk("|%2d| %h |\n", i, ih);
if (print_mode & PRINT_LEAF_ITEMS)
op_print_item(ih, B_I_PITEM(bh, ih));
}
printk
("===================================================================\n");
return 0;
}
char *reiserfs_hashname(int code)
{
if (code == YURA_HASH)
return "rupasov";
if (code == TEA_HASH)
return "tea";
if (code == R5_HASH)
return "r5";
return "unknown";
}
/* return 1 if this is not super block */
static int print_super_block(struct buffer_head *bh)
{
struct reiserfs_super_block *rs =
(struct reiserfs_super_block *)(bh->b_data);
int skipped, data_blocks;
char *version;
char b[BDEVNAME_SIZE];
if (is_reiserfs_3_5(rs)) {
version = "3.5";
} else if (is_reiserfs_3_6(rs)) {
version = "3.6";
} else if (is_reiserfs_jr(rs)) {
version = ((sb_version(rs) == REISERFS_VERSION_2) ?
"3.6" : "3.5");
} else {
return 1;
}
printk("%s\'s super block is in block %llu\n", bdevname(bh->b_bdev, b),
(unsigned long long)bh->b_blocknr);
printk("Reiserfs version %s\n", version);
printk("Block count %u\n", sb_block_count(rs));
printk("Blocksize %d\n", sb_blocksize(rs));
printk("Free blocks %u\n", sb_free_blocks(rs));
// FIXME: this would be confusing if
// someone stores reiserfs super block in some data block ;)
// skipped = (bh->b_blocknr * bh->b_size) / sb_blocksize(rs);
skipped = bh->b_blocknr;
data_blocks = sb_block_count(rs) - skipped - 1 - sb_bmap_nr(rs) -
(!is_reiserfs_jr(rs) ? sb_jp_journal_size(rs) +
1 : sb_reserved_for_journal(rs)) - sb_free_blocks(rs);
printk
("Busy blocks (skipped %d, bitmaps - %d, journal (or reserved) blocks - %d\n"
"1 super block, %d data blocks\n", skipped, sb_bmap_nr(rs),
(!is_reiserfs_jr(rs) ? (sb_jp_journal_size(rs) + 1) :
sb_reserved_for_journal(rs)), data_blocks);
printk("Root block %u\n", sb_root_block(rs));
printk("Journal block (first) %d\n", sb_jp_journal_1st_block(rs));
printk("Journal dev %d\n", sb_jp_journal_dev(rs));
printk("Journal orig size %d\n", sb_jp_journal_size(rs));
printk("FS state %d\n", sb_fs_state(rs));
printk("Hash function \"%s\"\n",
reiserfs_hashname(sb_hash_function_code(rs)));
printk("Tree height %d\n", sb_tree_height(rs));
return 0;
}
static int print_desc_block(struct buffer_head *bh)
{
struct reiserfs_journal_desc *desc;
if (memcmp(get_journal_desc_magic(bh), JOURNAL_DESC_MAGIC, 8))
return 1;
desc = (struct reiserfs_journal_desc *)(bh->b_data);
printk("Desc block %llu (j_trans_id %d, j_mount_id %d, j_len %d)",
(unsigned long long)bh->b_blocknr, get_desc_trans_id(desc),
get_desc_mount_id(desc), get_desc_trans_len(desc));
return 0;
}
void print_block(struct buffer_head *bh, ...) //int print_mode, int first, int last)
{
va_list args;
int mode, first, last;
if (!bh) {
printk("print_block: buffer is NULL\n");
return;
}
va_start(args, bh);
mode = va_arg(args, int);
first = va_arg(args, int);
last = va_arg(args, int);
if (print_leaf(bh, mode, first, last))
if (print_internal(bh, first, last))
if (print_super_block(bh))
if (print_desc_block(bh))
printk
("Block %llu contains unformatted data\n",
(unsigned long long)bh->b_blocknr);
va_end(args);
}
static char print_tb_buf[2048];
/* this stores initial state of tree balance in the print_tb_buf */
void store_print_tb(struct tree_balance *tb)
{
int h = 0;
int i;
struct buffer_head *tbSh, *tbFh;
if (!tb)
return;
sprintf(print_tb_buf, "\n"
"BALANCING %d\n"
"MODE=%c, ITEM_POS=%d POS_IN_ITEM=%d\n"
"=====================================================================\n"
"* h * S * L * R * F * FL * FR * CFL * CFR *\n",
REISERFS_SB(tb->tb_sb)->s_do_balance,
tb->tb_mode, PATH_LAST_POSITION(tb->tb_path),
tb->tb_path->pos_in_item);
for (h = 0; h < ARRAY_SIZE(tb->insert_size); h++) {
if (PATH_H_PATH_OFFSET(tb->tb_path, h) <=
tb->tb_path->path_length
&& PATH_H_PATH_OFFSET(tb->tb_path,
h) > ILLEGAL_PATH_ELEMENT_OFFSET) {
tbSh = PATH_H_PBUFFER(tb->tb_path, h);
tbFh = PATH_H_PPARENT(tb->tb_path, h);
} else {
tbSh = NULL;
tbFh = NULL;
}
sprintf(print_tb_buf + strlen(print_tb_buf),
"* %d * %3lld(%2d) * %3lld(%2d) * %3lld(%2d) * %5lld * %5lld * %5lld * %5lld * %5lld *\n",
h,
(tbSh) ? (long long)(tbSh->b_blocknr) : (-1LL),
(tbSh) ? atomic_read(&(tbSh->b_count)) : -1,
(tb->L[h]) ? (long long)(tb->L[h]->b_blocknr) : (-1LL),
(tb->L[h]) ? atomic_read(&(tb->L[h]->b_count)) : -1,
(tb->R[h]) ? (long long)(tb->R[h]->b_blocknr) : (-1LL),
(tb->R[h]) ? atomic_read(&(tb->R[h]->b_count)) : -1,
(tbFh) ? (long long)(tbFh->b_blocknr) : (-1LL),
(tb->FL[h]) ? (long long)(tb->FL[h]->
b_blocknr) : (-1LL),
(tb->FR[h]) ? (long long)(tb->FR[h]->
b_blocknr) : (-1LL),
(tb->CFL[h]) ? (long long)(tb->CFL[h]->
b_blocknr) : (-1LL),
(tb->CFR[h]) ? (long long)(tb->CFR[h]->
b_blocknr) : (-1LL));
}
sprintf(print_tb_buf + strlen(print_tb_buf),
"=====================================================================\n"
"* h * size * ln * lb * rn * rb * blkn * s0 * s1 * s1b * s2 * s2b * curb * lk * rk *\n"
"* 0 * %4d * %2d * %2d * %2d * %2d * %4d * %2d * %2d * %3d * %2d * %3d * %4d * %2d * %2d *\n",
tb->insert_size[0], tb->lnum[0], tb->lbytes, tb->rnum[0],
tb->rbytes, tb->blknum[0], tb->s0num, tb->s1num, tb->s1bytes,
tb->s2num, tb->s2bytes, tb->cur_blknum, tb->lkey[0],
tb->rkey[0]);
/* this prints balance parameters for non-leaf levels */
h = 0;
do {
h++;
sprintf(print_tb_buf + strlen(print_tb_buf),
"* %d * %4d * %2d * * %2d * * %2d *\n",
h, tb->insert_size[h], tb->lnum[h], tb->rnum[h],
tb->blknum[h]);
} while (tb->insert_size[h]);
sprintf(print_tb_buf + strlen(print_tb_buf),
"=====================================================================\n"
"FEB list: ");
/* print FEB list (list of buffers in form (bh (b_blocknr, b_count), that will be used for new nodes) */
h = 0;
for (i = 0; i < ARRAY_SIZE(tb->FEB); i++)
sprintf(print_tb_buf + strlen(print_tb_buf),
"%p (%llu %d)%s", tb->FEB[i],
tb->FEB[i] ? (unsigned long long)tb->FEB[i]->
b_blocknr : 0ULL,
tb->FEB[i] ? atomic_read(&(tb->FEB[i]->b_count)) : 0,
(i == ARRAY_SIZE(tb->FEB) - 1) ? "\n" : ", ");
sprintf(print_tb_buf + strlen(print_tb_buf),
"======================== the end ====================================\n");
}
void print_cur_tb(char *mes)
{
printk("%s\n%s", mes, print_tb_buf);
}
static void check_leaf_block_head(struct buffer_head *bh)
{
struct block_head *blkh;
int nr;
blkh = B_BLK_HEAD(bh);
nr = blkh_nr_item(blkh);
if (nr > (bh->b_size - BLKH_SIZE) / IH_SIZE)
reiserfs_panic(NULL, "vs-6010", "invalid item number %z",
bh);
if (blkh_free_space(blkh) > bh->b_size - BLKH_SIZE - IH_SIZE * nr)
reiserfs_panic(NULL, "vs-6020", "invalid free space %z",
bh);
}
static void check_internal_block_head(struct buffer_head *bh)
{
struct block_head *blkh;
blkh = B_BLK_HEAD(bh);
if (!(B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL && B_LEVEL(bh) <= MAX_HEIGHT))
reiserfs_panic(NULL, "vs-6025", "invalid level %z", bh);
if (B_NR_ITEMS(bh) > (bh->b_size - BLKH_SIZE) / IH_SIZE)
reiserfs_panic(NULL, "vs-6030", "invalid item number %z", bh);
if (B_FREE_SPACE(bh) !=
bh->b_size - BLKH_SIZE - KEY_SIZE * B_NR_ITEMS(bh) -
DC_SIZE * (B_NR_ITEMS(bh) + 1))
reiserfs_panic(NULL, "vs-6040", "invalid free space %z", bh);
}
void check_leaf(struct buffer_head *bh)
{
int i;
struct item_head *ih;
if (!bh)
return;
check_leaf_block_head(bh);
for (i = 0, ih = B_N_PITEM_HEAD(bh, 0); i < B_NR_ITEMS(bh); i++, ih++)
op_check_item(ih, B_I_PITEM(bh, ih));
}
void check_internal(struct buffer_head *bh)
{
if (!bh)
return;
check_internal_block_head(bh);
}
void print_statistics(struct super_block *s)
{
/*
printk ("reiserfs_put_super: session statistics: balances %d, fix_nodes %d, \
bmap with search %d, without %d, dir2ind %d, ind2dir %d\n",
REISERFS_SB(s)->s_do_balance, REISERFS_SB(s)->s_fix_nodes,
REISERFS_SB(s)->s_bmaps, REISERFS_SB(s)->s_bmaps_without_search,
REISERFS_SB(s)->s_direct2indirect, REISERFS_SB(s)->s_indirect2direct);
*/
}
| gpl-2.0 |
joryb/android_kernel_samsung_jf | drivers/usb/misc/uss720.c | 8071 | 22868 | /*****************************************************************************/
/*
* uss720.c -- USS720 USB Parport Cable.
*
* Copyright (C) 1999, 2005, 2010
* Thomas Sailer (t.sailer@alumni.ethz.ch)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Based on parport_pc.c
*
* History:
* 0.1 04.08.1999 Created
* 0.2 07.08.1999 Some fixes mainly suggested by Tim Waugh
* Interrupt handling currently disabled because
* usb_request_irq crashes somewhere within ohci.c
* for no apparent reason (that is for me, anyway)
* ECP currently untested
* 0.3 10.08.1999 fixing merge errors
* 0.4 13.08.1999 Added Vendor/Product ID of Brad Hard's cable
* 0.5 20.09.1999 usb_control_msg wrapper used
* Nov01.2000 usb_device_table support by Adam J. Richter
* 08.04.2001 Identify version on module load. gb
* 0.6 02.09.2005 Fix "scheduling in interrupt" problem by making save/restore
* context asynchronous
*
*/
/*****************************************************************************/
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/parport.h>
#include <linux/init.h>
#include <linux/usb.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/kref.h>
#include <linux/slab.h>
/*
* Version Information
*/
#define DRIVER_VERSION "v0.6"
#define DRIVER_AUTHOR "Thomas M. Sailer, t.sailer@alumni.ethz.ch"
#define DRIVER_DESC "USB Parport Cable driver for Cables using the Lucent Technologies USS720 Chip"
/* --------------------------------------------------------------------- */
struct parport_uss720_private {
struct usb_device *usbdev;
struct parport *pp;
struct kref ref_count;
__u8 reg[7]; /* USB registers */
struct list_head asynclist;
spinlock_t asynclock;
};
struct uss720_async_request {
struct parport_uss720_private *priv;
struct kref ref_count;
struct list_head asynclist;
struct completion compl;
struct urb *urb;
struct usb_ctrlrequest dr;
__u8 reg[7];
};
/* --------------------------------------------------------------------- */
static void destroy_priv(struct kref *kref)
{
struct parport_uss720_private *priv = container_of(kref, struct parport_uss720_private, ref_count);
usb_put_dev(priv->usbdev);
kfree(priv);
dbg("destroying priv datastructure");
}
static void destroy_async(struct kref *kref)
{
struct uss720_async_request *rq = container_of(kref, struct uss720_async_request, ref_count);
struct parport_uss720_private *priv = rq->priv;
unsigned long flags;
if (likely(rq->urb))
usb_free_urb(rq->urb);
spin_lock_irqsave(&priv->asynclock, flags);
list_del_init(&rq->asynclist);
spin_unlock_irqrestore(&priv->asynclock, flags);
kfree(rq);
kref_put(&priv->ref_count, destroy_priv);
}
/* --------------------------------------------------------------------- */
static void async_complete(struct urb *urb)
{
struct uss720_async_request *rq;
struct parport *pp;
struct parport_uss720_private *priv;
int status = urb->status;
rq = urb->context;
priv = rq->priv;
pp = priv->pp;
if (status) {
err("async_complete: urb error %d", status);
} else if (rq->dr.bRequest == 3) {
memcpy(priv->reg, rq->reg, sizeof(priv->reg));
#if 0
dbg("async_complete regs %02x %02x %02x %02x %02x %02x %02x",
(unsigned int)priv->reg[0], (unsigned int)priv->reg[1], (unsigned int)priv->reg[2],
(unsigned int)priv->reg[3], (unsigned int)priv->reg[4], (unsigned int)priv->reg[5],
(unsigned int)priv->reg[6]);
#endif
/* if nAck interrupts are enabled and we have an interrupt, call the interrupt procedure */
if (rq->reg[2] & rq->reg[1] & 0x10 && pp)
parport_generic_irq(pp);
}
complete(&rq->compl);
kref_put(&rq->ref_count, destroy_async);
}
static struct uss720_async_request *submit_async_request(struct parport_uss720_private *priv,
__u8 request, __u8 requesttype, __u16 value, __u16 index,
gfp_t mem_flags)
{
struct usb_device *usbdev;
struct uss720_async_request *rq;
unsigned long flags;
int ret;
if (!priv)
return NULL;
usbdev = priv->usbdev;
if (!usbdev)
return NULL;
rq = kmalloc(sizeof(struct uss720_async_request), mem_flags);
if (!rq) {
err("submit_async_request out of memory");
return NULL;
}
kref_init(&rq->ref_count);
INIT_LIST_HEAD(&rq->asynclist);
init_completion(&rq->compl);
kref_get(&priv->ref_count);
rq->priv = priv;
rq->urb = usb_alloc_urb(0, mem_flags);
if (!rq->urb) {
kref_put(&rq->ref_count, destroy_async);
err("submit_async_request out of memory");
return NULL;
}
rq->dr.bRequestType = requesttype;
rq->dr.bRequest = request;
rq->dr.wValue = cpu_to_le16(value);
rq->dr.wIndex = cpu_to_le16(index);
rq->dr.wLength = cpu_to_le16((request == 3) ? sizeof(rq->reg) : 0);
usb_fill_control_urb(rq->urb, usbdev, (requesttype & 0x80) ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0),
(unsigned char *)&rq->dr,
(request == 3) ? rq->reg : NULL, (request == 3) ? sizeof(rq->reg) : 0, async_complete, rq);
/* rq->urb->transfer_flags |= URB_ASYNC_UNLINK; */
spin_lock_irqsave(&priv->asynclock, flags);
list_add_tail(&rq->asynclist, &priv->asynclist);
spin_unlock_irqrestore(&priv->asynclock, flags);
kref_get(&rq->ref_count);
ret = usb_submit_urb(rq->urb, mem_flags);
if (!ret)
return rq;
destroy_async(&rq->ref_count);
err("submit_async_request submit_urb failed with %d", ret);
return NULL;
}
static unsigned int kill_all_async_requests_priv(struct parport_uss720_private *priv)
{
struct uss720_async_request *rq;
unsigned long flags;
unsigned int ret = 0;
spin_lock_irqsave(&priv->asynclock, flags);
list_for_each_entry(rq, &priv->asynclist, asynclist) {
usb_unlink_urb(rq->urb);
ret++;
}
spin_unlock_irqrestore(&priv->asynclock, flags);
return ret;
}
/* --------------------------------------------------------------------- */
static int get_1284_register(struct parport *pp, unsigned char reg, unsigned char *val, gfp_t mem_flags)
{
struct parport_uss720_private *priv;
struct uss720_async_request *rq;
static const unsigned char regindex[9] = {
4, 0, 1, 5, 5, 0, 2, 3, 6
};
int ret;
if (!pp)
return -EIO;
priv = pp->private_data;
rq = submit_async_request(priv, 3, 0xc0, ((unsigned int)reg) << 8, 0, mem_flags);
if (!rq) {
err("get_1284_register(%u) failed", (unsigned int)reg);
return -EIO;
}
if (!val) {
kref_put(&rq->ref_count, destroy_async);
return 0;
}
if (wait_for_completion_timeout(&rq->compl, HZ)) {
ret = rq->urb->status;
*val = priv->reg[(reg >= 9) ? 0 : regindex[reg]];
if (ret)
printk(KERN_WARNING "get_1284_register: "
"usb error %d\n", ret);
kref_put(&rq->ref_count, destroy_async);
return ret;
}
printk(KERN_WARNING "get_1284_register timeout\n");
kill_all_async_requests_priv(priv);
return -EIO;
}
static int set_1284_register(struct parport *pp, unsigned char reg, unsigned char val, gfp_t mem_flags)
{
struct parport_uss720_private *priv;
struct uss720_async_request *rq;
if (!pp)
return -EIO;
priv = pp->private_data;
rq = submit_async_request(priv, 4, 0x40, (((unsigned int)reg) << 8) | val, 0, mem_flags);
if (!rq) {
err("set_1284_register(%u,%u) failed", (unsigned int)reg, (unsigned int)val);
return -EIO;
}
kref_put(&rq->ref_count, destroy_async);
return 0;
}
/* --------------------------------------------------------------------- */
/* ECR modes */
#define ECR_SPP 00
#define ECR_PS2 01
#define ECR_PPF 02
#define ECR_ECP 03
#define ECR_EPP 04
/* Safely change the mode bits in the ECR */
static int change_mode(struct parport *pp, int m)
{
struct parport_uss720_private *priv = pp->private_data;
int mode;
__u8 reg;
if (get_1284_register(pp, 6, ®, GFP_KERNEL))
return -EIO;
/* Bits <7:5> contain the mode. */
mode = (priv->reg[2] >> 5) & 0x7;
if (mode == m)
return 0;
/* We have to go through mode 000 or 001 */
if (mode > ECR_PS2 && m > ECR_PS2)
if (change_mode(pp, ECR_PS2))
return -EIO;
if (m <= ECR_PS2 && !(priv->reg[1] & 0x20)) {
/* This mode resets the FIFO, so we may
* have to wait for it to drain first. */
unsigned long expire = jiffies + pp->physport->cad->timeout;
switch (mode) {
case ECR_PPF: /* Parallel Port FIFO mode */
case ECR_ECP: /* ECP Parallel Port mode */
/* Poll slowly. */
for (;;) {
if (get_1284_register(pp, 6, ®, GFP_KERNEL))
return -EIO;
if (priv->reg[2] & 0x01)
break;
if (time_after_eq (jiffies, expire))
/* The FIFO is stuck. */
return -EBUSY;
msleep_interruptible(10);
if (signal_pending (current))
break;
}
}
}
/* Set the mode. */
if (set_1284_register(pp, 6, m << 5, GFP_KERNEL))
return -EIO;
if (get_1284_register(pp, 6, ®, GFP_KERNEL))
return -EIO;
return 0;
}
/*
* Clear TIMEOUT BIT in EPP MODE
*/
static int clear_epp_timeout(struct parport *pp)
{
unsigned char stat;
if (get_1284_register(pp, 1, &stat, GFP_KERNEL))
return 1;
return stat & 1;
}
/*
* Access functions.
*/
#if 0
static int uss720_irq(int usbstatus, void *buffer, int len, void *dev_id)
{
struct parport *pp = (struct parport *)dev_id;
struct parport_uss720_private *priv = pp->private_data;
if (usbstatus != 0 || len < 4 || !buffer)
return 1;
memcpy(priv->reg, buffer, 4);
/* if nAck interrupts are enabled and we have an interrupt, call the interrupt procedure */
if (priv->reg[2] & priv->reg[1] & 0x10)
parport_generic_irq(pp);
return 1;
}
#endif
static void parport_uss720_write_data(struct parport *pp, unsigned char d)
{
set_1284_register(pp, 0, d, GFP_KERNEL);
}
static unsigned char parport_uss720_read_data(struct parport *pp)
{
unsigned char ret;
if (get_1284_register(pp, 0, &ret, GFP_KERNEL))
return 0;
return ret;
}
static void parport_uss720_write_control(struct parport *pp, unsigned char d)
{
struct parport_uss720_private *priv = pp->private_data;
d = (d & 0xf) | (priv->reg[1] & 0xf0);
if (set_1284_register(pp, 2, d, GFP_KERNEL))
return;
priv->reg[1] = d;
}
static unsigned char parport_uss720_read_control(struct parport *pp)
{
struct parport_uss720_private *priv = pp->private_data;
return priv->reg[1] & 0xf; /* Use soft copy */
}
static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned char mask, unsigned char val)
{
struct parport_uss720_private *priv = pp->private_data;
unsigned char d;
mask &= 0x0f;
val &= 0x0f;
d = (priv->reg[1] & (~mask)) ^ val;
if (set_1284_register(pp, 2, d, GFP_KERNEL))
return 0;
priv->reg[1] = d;
return d & 0xf;
}
static unsigned char parport_uss720_read_status(struct parport *pp)
{
unsigned char ret;
if (get_1284_register(pp, 1, &ret, GFP_KERNEL))
return 0;
return ret & 0xf8;
}
static void parport_uss720_disable_irq(struct parport *pp)
{
struct parport_uss720_private *priv = pp->private_data;
unsigned char d;
d = priv->reg[1] & ~0x10;
if (set_1284_register(pp, 2, d, GFP_KERNEL))
return;
priv->reg[1] = d;
}
static void parport_uss720_enable_irq(struct parport *pp)
{
struct parport_uss720_private *priv = pp->private_data;
unsigned char d;
d = priv->reg[1] | 0x10;
if (set_1284_register(pp, 2, d, GFP_KERNEL))
return;
priv->reg[1] = d;
}
static void parport_uss720_data_forward (struct parport *pp)
{
struct parport_uss720_private *priv = pp->private_data;
unsigned char d;
d = priv->reg[1] & ~0x20;
if (set_1284_register(pp, 2, d, GFP_KERNEL))
return;
priv->reg[1] = d;
}
static void parport_uss720_data_reverse (struct parport *pp)
{
struct parport_uss720_private *priv = pp->private_data;
unsigned char d;
d = priv->reg[1] | 0x20;
if (set_1284_register(pp, 2, d, GFP_KERNEL))
return;
priv->reg[1] = d;
}
static void parport_uss720_init_state(struct pardevice *dev, struct parport_state *s)
{
s->u.pc.ctr = 0xc | (dev->irq_func ? 0x10 : 0x0);
s->u.pc.ecr = 0x24;
}
static void parport_uss720_save_state(struct parport *pp, struct parport_state *s)
{
struct parport_uss720_private *priv = pp->private_data;
#if 0
if (get_1284_register(pp, 2, NULL, GFP_ATOMIC))
return;
#endif
s->u.pc.ctr = priv->reg[1];
s->u.pc.ecr = priv->reg[2];
}
static void parport_uss720_restore_state(struct parport *pp, struct parport_state *s)
{
struct parport_uss720_private *priv = pp->private_data;
set_1284_register(pp, 2, s->u.pc.ctr, GFP_ATOMIC);
set_1284_register(pp, 6, s->u.pc.ecr, GFP_ATOMIC);
get_1284_register(pp, 2, NULL, GFP_ATOMIC);
priv->reg[1] = s->u.pc.ctr;
priv->reg[2] = s->u.pc.ecr;
}
static size_t parport_uss720_epp_read_data(struct parport *pp, void *buf, size_t length, int flags)
{
struct parport_uss720_private *priv = pp->private_data;
size_t got = 0;
if (change_mode(pp, ECR_EPP))
return 0;
for (; got < length; got++) {
if (get_1284_register(pp, 4, (char *)buf, GFP_KERNEL))
break;
buf++;
if (priv->reg[0] & 0x01) {
clear_epp_timeout(pp);
break;
}
}
change_mode(pp, ECR_PS2);
return got;
}
static size_t parport_uss720_epp_write_data(struct parport *pp, const void *buf, size_t length, int flags)
{
#if 0
struct parport_uss720_private *priv = pp->private_data;
size_t written = 0;
if (change_mode(pp, ECR_EPP))
return 0;
for (; written < length; written++) {
if (set_1284_register(pp, 4, (char *)buf, GFP_KERNEL))
break;
((char*)buf)++;
if (get_1284_register(pp, 1, NULL, GFP_KERNEL))
break;
if (priv->reg[0] & 0x01) {
clear_epp_timeout(pp);
break;
}
}
change_mode(pp, ECR_PS2);
return written;
#else
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
int rlen;
int i;
if (!usbdev)
return 0;
if (change_mode(pp, ECR_EPP))
return 0;
i = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 1), (void *)buf, length, &rlen, 20000);
if (i)
printk(KERN_ERR "uss720: sendbulk ep 1 buf %p len %Zu rlen %u\n", buf, length, rlen);
change_mode(pp, ECR_PS2);
return rlen;
#endif
}
static size_t parport_uss720_epp_read_addr(struct parport *pp, void *buf, size_t length, int flags)
{
struct parport_uss720_private *priv = pp->private_data;
size_t got = 0;
if (change_mode(pp, ECR_EPP))
return 0;
for (; got < length; got++) {
if (get_1284_register(pp, 3, (char *)buf, GFP_KERNEL))
break;
buf++;
if (priv->reg[0] & 0x01) {
clear_epp_timeout(pp);
break;
}
}
change_mode(pp, ECR_PS2);
return got;
}
static size_t parport_uss720_epp_write_addr(struct parport *pp, const void *buf, size_t length, int flags)
{
struct parport_uss720_private *priv = pp->private_data;
size_t written = 0;
if (change_mode(pp, ECR_EPP))
return 0;
for (; written < length; written++) {
if (set_1284_register(pp, 3, *(char *)buf, GFP_KERNEL))
break;
buf++;
if (get_1284_register(pp, 1, NULL, GFP_KERNEL))
break;
if (priv->reg[0] & 0x01) {
clear_epp_timeout(pp);
break;
}
}
change_mode(pp, ECR_PS2);
return written;
}
static size_t parport_uss720_ecp_write_data(struct parport *pp, const void *buffer, size_t len, int flags)
{
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
int rlen;
int i;
if (!usbdev)
return 0;
if (change_mode(pp, ECR_ECP))
return 0;
i = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 1), (void *)buffer, len, &rlen, 20000);
if (i)
printk(KERN_ERR "uss720: sendbulk ep 1 buf %p len %Zu rlen %u\n", buffer, len, rlen);
change_mode(pp, ECR_PS2);
return rlen;
}
static size_t parport_uss720_ecp_read_data(struct parport *pp, void *buffer, size_t len, int flags)
{
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
int rlen;
int i;
if (!usbdev)
return 0;
if (change_mode(pp, ECR_ECP))
return 0;
i = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, 2), buffer, len, &rlen, 20000);
if (i)
printk(KERN_ERR "uss720: recvbulk ep 2 buf %p len %Zu rlen %u\n", buffer, len, rlen);
change_mode(pp, ECR_PS2);
return rlen;
}
static size_t parport_uss720_ecp_write_addr(struct parport *pp, const void *buffer, size_t len, int flags)
{
size_t written = 0;
if (change_mode(pp, ECR_ECP))
return 0;
for (; written < len; written++) {
if (set_1284_register(pp, 5, *(char *)buffer, GFP_KERNEL))
break;
buffer++;
}
change_mode(pp, ECR_PS2);
return written;
}
static size_t parport_uss720_write_compat(struct parport *pp, const void *buffer, size_t len, int flags)
{
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
int rlen;
int i;
if (!usbdev)
return 0;
if (change_mode(pp, ECR_PPF))
return 0;
i = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 1), (void *)buffer, len, &rlen, 20000);
if (i)
printk(KERN_ERR "uss720: sendbulk ep 1 buf %p len %Zu rlen %u\n", buffer, len, rlen);
change_mode(pp, ECR_PS2);
return rlen;
}
/* --------------------------------------------------------------------- */
static struct parport_operations parport_uss720_ops =
{
.owner = THIS_MODULE,
.write_data = parport_uss720_write_data,
.read_data = parport_uss720_read_data,
.write_control = parport_uss720_write_control,
.read_control = parport_uss720_read_control,
.frob_control = parport_uss720_frob_control,
.read_status = parport_uss720_read_status,
.enable_irq = parport_uss720_enable_irq,
.disable_irq = parport_uss720_disable_irq,
.data_forward = parport_uss720_data_forward,
.data_reverse = parport_uss720_data_reverse,
.init_state = parport_uss720_init_state,
.save_state = parport_uss720_save_state,
.restore_state = parport_uss720_restore_state,
.epp_write_data = parport_uss720_epp_write_data,
.epp_read_data = parport_uss720_epp_read_data,
.epp_write_addr = parport_uss720_epp_write_addr,
.epp_read_addr = parport_uss720_epp_read_addr,
.ecp_write_data = parport_uss720_ecp_write_data,
.ecp_read_data = parport_uss720_ecp_read_data,
.ecp_write_addr = parport_uss720_ecp_write_addr,
.compat_write_data = parport_uss720_write_compat,
.nibble_read_data = parport_ieee1284_read_nibble,
.byte_read_data = parport_ieee1284_read_byte,
};
/* --------------------------------------------------------------------- */
static int uss720_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *usbdev = usb_get_dev(interface_to_usbdev(intf));
struct usb_host_interface *interface;
struct usb_host_endpoint *endpoint;
struct parport_uss720_private *priv;
struct parport *pp;
unsigned char reg;
int i;
dbg("probe: vendor id 0x%x, device id 0x%x\n",
le16_to_cpu(usbdev->descriptor.idVendor),
le16_to_cpu(usbdev->descriptor.idProduct));
/* our known interfaces have 3 alternate settings */
if (intf->num_altsetting != 3) {
usb_put_dev(usbdev);
return -ENODEV;
}
i = usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 2);
dbg("set inteface result %d", i);
interface = intf->cur_altsetting;
/*
* Allocate parport interface
*/
if (!(priv = kzalloc(sizeof(struct parport_uss720_private), GFP_KERNEL))) {
usb_put_dev(usbdev);
return -ENOMEM;
}
priv->pp = NULL;
priv->usbdev = usbdev;
kref_init(&priv->ref_count);
spin_lock_init(&priv->asynclock);
INIT_LIST_HEAD(&priv->asynclist);
if (!(pp = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, &parport_uss720_ops))) {
printk(KERN_WARNING "uss720: could not register parport\n");
goto probe_abort;
}
priv->pp = pp;
pp->private_data = priv;
pp->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_TRISTATE | PARPORT_MODE_EPP | PARPORT_MODE_ECP | PARPORT_MODE_COMPAT;
/* set the USS720 control register to manual mode, no ECP compression, enable all ints */
set_1284_register(pp, 7, 0x00, GFP_KERNEL);
set_1284_register(pp, 6, 0x30, GFP_KERNEL); /* PS/2 mode */
set_1284_register(pp, 2, 0x0c, GFP_KERNEL);
/* debugging */
get_1284_register(pp, 0, ®, GFP_KERNEL);
dbg("reg: %02x %02x %02x %02x %02x %02x %02x",
priv->reg[0], priv->reg[1], priv->reg[2], priv->reg[3], priv->reg[4], priv->reg[5], priv->reg[6]);
endpoint = &interface->endpoint[2];
dbg("epaddr %d interval %d", endpoint->desc.bEndpointAddress, endpoint->desc.bInterval);
parport_announce_port(pp);
usb_set_intfdata(intf, pp);
return 0;
probe_abort:
kill_all_async_requests_priv(priv);
kref_put(&priv->ref_count, destroy_priv);
return -ENODEV;
}
static void uss720_disconnect(struct usb_interface *intf)
{
struct parport *pp = usb_get_intfdata(intf);
struct parport_uss720_private *priv;
struct usb_device *usbdev;
dbg("disconnect");
usb_set_intfdata(intf, NULL);
if (pp) {
priv = pp->private_data;
usbdev = priv->usbdev;
priv->usbdev = NULL;
priv->pp = NULL;
dbg("parport_remove_port");
parport_remove_port(pp);
parport_put_port(pp);
kill_all_async_requests_priv(priv);
kref_put(&priv->ref_count, destroy_priv);
}
dbg("disconnect done");
}
/* table of cables that work through this driver */
static const struct usb_device_id uss720_table[] = {
{ USB_DEVICE(0x047e, 0x1001) },
{ USB_DEVICE(0x0557, 0x2001) },
{ USB_DEVICE(0x0729, 0x1284) },
{ USB_DEVICE(0x1293, 0x0002) },
{ USB_DEVICE(0x050d, 0x0002) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, uss720_table);
static struct usb_driver uss720_driver = {
.name = "uss720",
.probe = uss720_probe,
.disconnect = uss720_disconnect,
.id_table = uss720_table,
};
/* --------------------------------------------------------------------- */
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static int __init uss720_init(void)
{
int retval;
retval = usb_register(&uss720_driver);
if (retval)
goto out;
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
printk(KERN_INFO KBUILD_MODNAME ": NOTE: this is a special purpose "
"driver to allow nonstandard\n");
printk(KERN_INFO KBUILD_MODNAME ": protocols (eg. bitbang) over "
"USS720 usb to parallel cables\n");
printk(KERN_INFO KBUILD_MODNAME ": If you just want to connect to a "
"printer, use usblp instead\n");
out:
return retval;
}
static void __exit uss720_cleanup(void)
{
usb_deregister(&uss720_driver);
}
module_init(uss720_init);
module_exit(uss720_cleanup);
/* --------------------------------------------------------------------- */
| gpl-2.0 |
classicsong/Range-Lock | drivers/media/dvb/frontends/stv0900_sw.c | 9095 | 51504 | /*
* stv0900_sw.c
*
* Driver for ST STV0900 satellite demodulator IC.
*
* Copyright (C) ST Microelectronics.
* Copyright (C) 2009 NetUP Inc.
* Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "stv0900.h"
#include "stv0900_reg.h"
#include "stv0900_priv.h"
s32 shiftx(s32 x, int demod, s32 shift)
{
if (demod == 1)
return x - shift;
return x;
}
int stv0900_check_signal_presence(struct stv0900_internal *intp,
enum fe_stv0900_demod_num demod)
{
s32 carr_offset,
agc2_integr,
max_carrier;
int no_signal = FALSE;
carr_offset = (stv0900_read_reg(intp, CFR2) << 8)
| stv0900_read_reg(intp, CFR1);
carr_offset = ge2comp(carr_offset, 16);
agc2_integr = (stv0900_read_reg(intp, AGC2I1) << 8)
| stv0900_read_reg(intp, AGC2I0);
max_carrier = intp->srch_range[demod] / 1000;
max_carrier += (max_carrier / 10);
max_carrier = 65536 * (max_carrier / 2);
max_carrier /= intp->mclk / 1000;
if (max_carrier > 0x4000)
max_carrier = 0x4000;
if ((agc2_integr > 0x2000)
|| (carr_offset > (2 * max_carrier))
|| (carr_offset < (-2 * max_carrier)))
no_signal = TRUE;
return no_signal;
}
static void stv0900_get_sw_loop_params(struct stv0900_internal *intp,
s32 *frequency_inc, s32 *sw_timeout,
s32 *steps,
enum fe_stv0900_demod_num demod)
{
s32 timeout, freq_inc, max_steps, srate, max_carrier;
enum fe_stv0900_search_standard standard;
srate = intp->symbol_rate[demod];
max_carrier = intp->srch_range[demod] / 1000;
max_carrier += max_carrier / 10;
standard = intp->srch_standard[demod];
max_carrier = 65536 * (max_carrier / 2);
max_carrier /= intp->mclk / 1000;
if (max_carrier > 0x4000)
max_carrier = 0x4000;
freq_inc = srate;
freq_inc /= intp->mclk >> 10;
freq_inc = freq_inc << 6;
switch (standard) {
case STV0900_SEARCH_DVBS1:
case STV0900_SEARCH_DSS:
freq_inc *= 3;
timeout = 20;
break;
case STV0900_SEARCH_DVBS2:
freq_inc *= 4;
timeout = 25;
break;
case STV0900_AUTO_SEARCH:
default:
freq_inc *= 3;
timeout = 25;
break;
}
freq_inc /= 100;
if ((freq_inc > max_carrier) || (freq_inc < 0))
freq_inc = max_carrier / 2;
timeout *= 27500;
if (srate > 0)
timeout /= srate / 1000;
if ((timeout > 100) || (timeout < 0))
timeout = 100;
max_steps = (max_carrier / freq_inc) + 1;
if ((max_steps > 100) || (max_steps < 0)) {
max_steps = 100;
freq_inc = max_carrier / max_steps;
}
*frequency_inc = freq_inc;
*sw_timeout = timeout;
*steps = max_steps;
}
static int stv0900_search_carr_sw_loop(struct stv0900_internal *intp,
s32 FreqIncr, s32 Timeout, int zigzag,
s32 MaxStep, enum fe_stv0900_demod_num demod)
{
int no_signal,
lock = FALSE;
s32 stepCpt,
freqOffset,
max_carrier;
max_carrier = intp->srch_range[demod] / 1000;
max_carrier += (max_carrier / 10);
max_carrier = 65536 * (max_carrier / 2);
max_carrier /= intp->mclk / 1000;
if (max_carrier > 0x4000)
max_carrier = 0x4000;
if (zigzag == TRUE)
freqOffset = 0;
else
freqOffset = -max_carrier + FreqIncr;
stepCpt = 0;
do {
stv0900_write_reg(intp, DMDISTATE, 0x1c);
stv0900_write_reg(intp, CFRINIT1, (freqOffset / 256) & 0xff);
stv0900_write_reg(intp, CFRINIT0, freqOffset & 0xff);
stv0900_write_reg(intp, DMDISTATE, 0x18);
stv0900_write_bits(intp, ALGOSWRST, 1);
if (intp->chip_id == 0x12) {
stv0900_write_bits(intp, RST_HWARE, 1);
stv0900_write_bits(intp, RST_HWARE, 0);
}
if (zigzag == TRUE) {
if (freqOffset >= 0)
freqOffset = -freqOffset - 2 * FreqIncr;
else
freqOffset = -freqOffset;
} else
freqOffset += + 2 * FreqIncr;
stepCpt++;
lock = stv0900_get_demod_lock(intp, demod, Timeout);
no_signal = stv0900_check_signal_presence(intp, demod);
} while ((lock == FALSE)
&& (no_signal == FALSE)
&& ((freqOffset - FreqIncr) < max_carrier)
&& ((freqOffset + FreqIncr) > -max_carrier)
&& (stepCpt < MaxStep));
stv0900_write_bits(intp, ALGOSWRST, 0);
return lock;
}
static int stv0900_sw_algo(struct stv0900_internal *intp,
enum fe_stv0900_demod_num demod)
{
int lock = FALSE,
no_signal,
zigzag;
s32 s2fw,
fqc_inc,
sft_stp_tout,
trial_cntr,
max_steps;
stv0900_get_sw_loop_params(intp, &fqc_inc, &sft_stp_tout,
&max_steps, demod);
switch (intp->srch_standard[demod]) {
case STV0900_SEARCH_DVBS1:
case STV0900_SEARCH_DSS:
if (intp->chip_id >= 0x20)
stv0900_write_reg(intp, CARFREQ, 0x3b);
else
stv0900_write_reg(intp, CARFREQ, 0xef);
stv0900_write_reg(intp, DMDCFGMD, 0x49);
zigzag = FALSE;
break;
case STV0900_SEARCH_DVBS2:
if (intp->chip_id >= 0x20)
stv0900_write_reg(intp, CORRELABS, 0x79);
else
stv0900_write_reg(intp, CORRELABS, 0x68);
stv0900_write_reg(intp, DMDCFGMD, 0x89);
zigzag = TRUE;
break;
case STV0900_AUTO_SEARCH:
default:
if (intp->chip_id >= 0x20) {
stv0900_write_reg(intp, CARFREQ, 0x3b);
stv0900_write_reg(intp, CORRELABS, 0x79);
} else {
stv0900_write_reg(intp, CARFREQ, 0xef);
stv0900_write_reg(intp, CORRELABS, 0x68);
}
stv0900_write_reg(intp, DMDCFGMD, 0xc9);
zigzag = FALSE;
break;
}
trial_cntr = 0;
do {
lock = stv0900_search_carr_sw_loop(intp,
fqc_inc,
sft_stp_tout,
zigzag,
max_steps,
demod);
no_signal = stv0900_check_signal_presence(intp, demod);
trial_cntr++;
if ((lock == TRUE)
|| (no_signal == TRUE)
|| (trial_cntr == 2)) {
if (intp->chip_id >= 0x20) {
stv0900_write_reg(intp, CARFREQ, 0x49);
stv0900_write_reg(intp, CORRELABS, 0x9e);
} else {
stv0900_write_reg(intp, CARFREQ, 0xed);
stv0900_write_reg(intp, CORRELABS, 0x88);
}
if ((stv0900_get_bits(intp, HEADER_MODE) ==
STV0900_DVBS2_FOUND) &&
(lock == TRUE)) {
msleep(sft_stp_tout);
s2fw = stv0900_get_bits(intp, FLYWHEEL_CPT);
if (s2fw < 0xd) {
msleep(sft_stp_tout);
s2fw = stv0900_get_bits(intp,
FLYWHEEL_CPT);
}
if (s2fw < 0xd) {
lock = FALSE;
if (trial_cntr < 2) {
if (intp->chip_id >= 0x20)
stv0900_write_reg(intp,
CORRELABS,
0x79);
else
stv0900_write_reg(intp,
CORRELABS,
0x68);
stv0900_write_reg(intp,
DMDCFGMD,
0x89);
}
}
}
}
} while ((lock == FALSE)
&& (trial_cntr < 2)
&& (no_signal == FALSE));
return lock;
}
static u32 stv0900_get_symbol_rate(struct stv0900_internal *intp,
u32 mclk,
enum fe_stv0900_demod_num demod)
{
s32 rem1, rem2, intval1, intval2, srate;
srate = (stv0900_get_bits(intp, SYMB_FREQ3) << 24) +
(stv0900_get_bits(intp, SYMB_FREQ2) << 16) +
(stv0900_get_bits(intp, SYMB_FREQ1) << 8) +
(stv0900_get_bits(intp, SYMB_FREQ0));
dprintk("lock: srate=%d r0=0x%x r1=0x%x r2=0x%x r3=0x%x \n",
srate, stv0900_get_bits(intp, SYMB_FREQ0),
stv0900_get_bits(intp, SYMB_FREQ1),
stv0900_get_bits(intp, SYMB_FREQ2),
stv0900_get_bits(intp, SYMB_FREQ3));
intval1 = (mclk) >> 16;
intval2 = (srate) >> 16;
rem1 = (mclk) % 0x10000;
rem2 = (srate) % 0x10000;
srate = (intval1 * intval2) +
((intval1 * rem2) >> 16) +
((intval2 * rem1) >> 16);
return srate;
}
static void stv0900_set_symbol_rate(struct stv0900_internal *intp,
u32 mclk, u32 srate,
enum fe_stv0900_demod_num demod)
{
u32 symb;
dprintk("%s: Mclk %d, SR %d, Dmd %d\n", __func__, mclk,
srate, demod);
if (srate > 60000000) {
symb = srate << 4;
symb /= (mclk >> 12);
} else if (srate > 6000000) {
symb = srate << 6;
symb /= (mclk >> 10);
} else {
symb = srate << 9;
symb /= (mclk >> 7);
}
stv0900_write_reg(intp, SFRINIT1, (symb >> 8) & 0x7f);
stv0900_write_reg(intp, SFRINIT1 + 1, (symb & 0xff));
}
static void stv0900_set_max_symbol_rate(struct stv0900_internal *intp,
u32 mclk, u32 srate,
enum fe_stv0900_demod_num demod)
{
u32 symb;
srate = 105 * (srate / 100);
if (srate > 60000000) {
symb = srate << 4;
symb /= (mclk >> 12);
} else if (srate > 6000000) {
symb = srate << 6;
symb /= (mclk >> 10);
} else {
symb = srate << 9;
symb /= (mclk >> 7);
}
if (symb < 0x7fff) {
stv0900_write_reg(intp, SFRUP1, (symb >> 8) & 0x7f);
stv0900_write_reg(intp, SFRUP1 + 1, (symb & 0xff));
} else {
stv0900_write_reg(intp, SFRUP1, 0x7f);
stv0900_write_reg(intp, SFRUP1 + 1, 0xff);
}
}
static void stv0900_set_min_symbol_rate(struct stv0900_internal *intp,
u32 mclk, u32 srate,
enum fe_stv0900_demod_num demod)
{
u32 symb;
srate = 95 * (srate / 100);
if (srate > 60000000) {
symb = srate << 4;
symb /= (mclk >> 12);
} else if (srate > 6000000) {
symb = srate << 6;
symb /= (mclk >> 10);
} else {
symb = srate << 9;
symb /= (mclk >> 7);
}
stv0900_write_reg(intp, SFRLOW1, (symb >> 8) & 0xff);
stv0900_write_reg(intp, SFRLOW1 + 1, (symb & 0xff));
}
static s32 stv0900_get_timing_offst(struct stv0900_internal *intp,
u32 srate,
enum fe_stv0900_demod_num demod)
{
s32 timingoffset;
timingoffset = (stv0900_read_reg(intp, TMGREG2) << 16) +
(stv0900_read_reg(intp, TMGREG2 + 1) << 8) +
(stv0900_read_reg(intp, TMGREG2 + 2));
timingoffset = ge2comp(timingoffset, 24);
if (timingoffset == 0)
timingoffset = 1;
timingoffset = ((s32)srate * 10) / ((s32)0x1000000 / timingoffset);
timingoffset /= 320;
return timingoffset;
}
static void stv0900_set_dvbs2_rolloff(struct stv0900_internal *intp,
enum fe_stv0900_demod_num demod)
{
s32 rolloff;
if (intp->chip_id == 0x10) {
stv0900_write_bits(intp, MANUALSX_ROLLOFF, 1);
rolloff = stv0900_read_reg(intp, MATSTR1) & 0x03;
stv0900_write_bits(intp, ROLLOFF_CONTROL, rolloff);
} else if (intp->chip_id <= 0x20)
stv0900_write_bits(intp, MANUALSX_ROLLOFF, 0);
else /* cut 3.0 */
stv0900_write_bits(intp, MANUALS2_ROLLOFF, 0);
}
static u32 stv0900_carrier_width(u32 srate, enum fe_stv0900_rolloff ro)
{
u32 rolloff;
switch (ro) {
case STV0900_20:
rolloff = 20;
break;
case STV0900_25:
rolloff = 25;
break;
case STV0900_35:
default:
rolloff = 35;
break;
}
return srate + (srate * rolloff) / 100;
}
static int stv0900_check_timing_lock(struct stv0900_internal *intp,
enum fe_stv0900_demod_num demod)
{
int timingLock = FALSE;
s32 i,
timingcpt = 0;
u8 car_freq,
tmg_th_high,
tmg_th_low;
car_freq = stv0900_read_reg(intp, CARFREQ);
tmg_th_high = stv0900_read_reg(intp, TMGTHRISE);
tmg_th_low = stv0900_read_reg(intp, TMGTHFALL);
stv0900_write_reg(intp, TMGTHRISE, 0x20);
stv0900_write_reg(intp, TMGTHFALL, 0x0);
stv0900_write_bits(intp, CFR_AUTOSCAN, 0);
stv0900_write_reg(intp, RTC, 0x80);
stv0900_write_reg(intp, RTCS2, 0x40);
stv0900_write_reg(intp, CARFREQ, 0x0);
stv0900_write_reg(intp, CFRINIT1, 0x0);
stv0900_write_reg(intp, CFRINIT0, 0x0);
stv0900_write_reg(intp, AGC2REF, 0x65);
stv0900_write_reg(intp, DMDISTATE, 0x18);
msleep(7);
for (i = 0; i < 10; i++) {
if (stv0900_get_bits(intp, TMGLOCK_QUALITY) >= 2)
timingcpt++;
msleep(1);
}
if (timingcpt >= 3)
timingLock = TRUE;
stv0900_write_reg(intp, AGC2REF, 0x38);
stv0900_write_reg(intp, RTC, 0x88);
stv0900_write_reg(intp, RTCS2, 0x68);
stv0900_write_reg(intp, CARFREQ, car_freq);
stv0900_write_reg(intp, TMGTHRISE, tmg_th_high);
stv0900_write_reg(intp, TMGTHFALL, tmg_th_low);
return timingLock;
}
static int stv0900_get_demod_cold_lock(struct dvb_frontend *fe,
s32 demod_timeout)
{
struct stv0900_state *state = fe->demodulator_priv;
struct stv0900_internal *intp = state->internal;
enum fe_stv0900_demod_num demod = state->demod;
int lock = FALSE,
d = demod;
s32 srate,
search_range,
locktimeout,
currier_step,
nb_steps,
current_step,
direction,
tuner_freq,
timeout,
freq;
srate = intp->symbol_rate[d];
search_range = intp->srch_range[d];
if (srate >= 10000000)
locktimeout = demod_timeout / 3;
else
locktimeout = demod_timeout / 2;
lock = stv0900_get_demod_lock(intp, d, locktimeout);
if (lock != FALSE)
return lock;
if (srate >= 10000000) {
if (stv0900_check_timing_lock(intp, d) == TRUE) {
stv0900_write_reg(intp, DMDISTATE, 0x1f);
stv0900_write_reg(intp, DMDISTATE, 0x15);
lock = stv0900_get_demod_lock(intp, d, demod_timeout);
} else
lock = FALSE;
return lock;
}
if (intp->chip_id <= 0x20) {
if (srate <= 1000000)
currier_step = 500;
else if (srate <= 4000000)
currier_step = 1000;
else if (srate <= 7000000)
currier_step = 2000;
else if (srate <= 10000000)
currier_step = 3000;
else
currier_step = 5000;
if (srate >= 2000000) {
timeout = (demod_timeout / 3);
if (timeout > 1000)
timeout = 1000;
} else
timeout = (demod_timeout / 2);
} else {
/*cut 3.0 */
currier_step = srate / 4000;
timeout = (demod_timeout * 3) / 4;
}
nb_steps = ((search_range / 1000) / currier_step);
if ((nb_steps % 2) != 0)
nb_steps += 1;
if (nb_steps <= 0)
nb_steps = 2;
else if (nb_steps > 12)
nb_steps = 12;
current_step = 1;
direction = 1;
if (intp->chip_id <= 0x20) {
tuner_freq = intp->freq[d];
intp->bw[d] = stv0900_carrier_width(intp->symbol_rate[d],
intp->rolloff) + intp->symbol_rate[d];
} else
tuner_freq = 0;
while ((current_step <= nb_steps) && (lock == FALSE)) {
if (direction > 0)
tuner_freq += (current_step * currier_step);
else
tuner_freq -= (current_step * currier_step);
if (intp->chip_id <= 0x20) {
if (intp->tuner_type[d] == 3)
stv0900_set_tuner_auto(intp, tuner_freq,
intp->bw[d], demod);
else
stv0900_set_tuner(fe, tuner_freq, intp->bw[d]);
stv0900_write_reg(intp, DMDISTATE, 0x1c);
stv0900_write_reg(intp, CFRINIT1, 0);
stv0900_write_reg(intp, CFRINIT0, 0);
stv0900_write_reg(intp, DMDISTATE, 0x1f);
stv0900_write_reg(intp, DMDISTATE, 0x15);
} else {
stv0900_write_reg(intp, DMDISTATE, 0x1c);
freq = (tuner_freq * 65536) / (intp->mclk / 1000);
stv0900_write_bits(intp, CFR_INIT1, MSB(freq));
stv0900_write_bits(intp, CFR_INIT0, LSB(freq));
stv0900_write_reg(intp, DMDISTATE, 0x1f);
stv0900_write_reg(intp, DMDISTATE, 0x05);
}
lock = stv0900_get_demod_lock(intp, d, timeout);
direction *= -1;
current_step++;
}
return lock;
}
static void stv0900_get_lock_timeout(s32 *demod_timeout, s32 *fec_timeout,
s32 srate,
enum fe_stv0900_search_algo algo)
{
switch (algo) {
case STV0900_BLIND_SEARCH:
if (srate <= 1500000) {
(*demod_timeout) = 1500;
(*fec_timeout) = 400;
} else if (srate <= 5000000) {
(*demod_timeout) = 1000;
(*fec_timeout) = 300;
} else {
(*demod_timeout) = 700;
(*fec_timeout) = 100;
}
break;
case STV0900_COLD_START:
case STV0900_WARM_START:
default:
if (srate <= 1000000) {
(*demod_timeout) = 3000;
(*fec_timeout) = 1700;
} else if (srate <= 2000000) {
(*demod_timeout) = 2500;
(*fec_timeout) = 1100;
} else if (srate <= 5000000) {
(*demod_timeout) = 1000;
(*fec_timeout) = 550;
} else if (srate <= 10000000) {
(*demod_timeout) = 700;
(*fec_timeout) = 250;
} else if (srate <= 20000000) {
(*demod_timeout) = 400;
(*fec_timeout) = 130;
} else {
(*demod_timeout) = 300;
(*fec_timeout) = 100;
}
break;
}
if (algo == STV0900_WARM_START)
(*demod_timeout) /= 2;
}
static void stv0900_set_viterbi_tracq(struct stv0900_internal *intp,
enum fe_stv0900_demod_num demod)
{
s32 vth_reg = VTH12;
dprintk("%s\n", __func__);
stv0900_write_reg(intp, vth_reg++, 0xd0);
stv0900_write_reg(intp, vth_reg++, 0x7d);
stv0900_write_reg(intp, vth_reg++, 0x53);
stv0900_write_reg(intp, vth_reg++, 0x2f);
stv0900_write_reg(intp, vth_reg++, 0x24);
stv0900_write_reg(intp, vth_reg++, 0x1f);
}
static void stv0900_set_viterbi_standard(struct stv0900_internal *intp,
enum fe_stv0900_search_standard standard,
enum fe_stv0900_fec fec,
enum fe_stv0900_demod_num demod)
{
dprintk("%s: ViterbiStandard = ", __func__);
switch (standard) {
case STV0900_AUTO_SEARCH:
dprintk("Auto\n");
stv0900_write_reg(intp, FECM, 0x10);
stv0900_write_reg(intp, PRVIT, 0x3f);
break;
case STV0900_SEARCH_DVBS1:
dprintk("DVBS1\n");
stv0900_write_reg(intp, FECM, 0x00);
switch (fec) {
case STV0900_FEC_UNKNOWN:
default:
stv0900_write_reg(intp, PRVIT, 0x2f);
break;
case STV0900_FEC_1_2:
stv0900_write_reg(intp, PRVIT, 0x01);
break;
case STV0900_FEC_2_3:
stv0900_write_reg(intp, PRVIT, 0x02);
break;
case STV0900_FEC_3_4:
stv0900_write_reg(intp, PRVIT, 0x04);
break;
case STV0900_FEC_5_6:
stv0900_write_reg(intp, PRVIT, 0x08);
break;
case STV0900_FEC_7_8:
stv0900_write_reg(intp, PRVIT, 0x20);
break;
}
break;
case STV0900_SEARCH_DSS:
dprintk("DSS\n");
stv0900_write_reg(intp, FECM, 0x80);
switch (fec) {
case STV0900_FEC_UNKNOWN:
default:
stv0900_write_reg(intp, PRVIT, 0x13);
break;
case STV0900_FEC_1_2:
stv0900_write_reg(intp, PRVIT, 0x01);
break;
case STV0900_FEC_2_3:
stv0900_write_reg(intp, PRVIT, 0x02);
break;
case STV0900_FEC_6_7:
stv0900_write_reg(intp, PRVIT, 0x10);
break;
}
break;
default:
break;
}
}
static enum fe_stv0900_fec stv0900_get_vit_fec(struct stv0900_internal *intp,
enum fe_stv0900_demod_num demod)
{
enum fe_stv0900_fec prate;
s32 rate_fld = stv0900_get_bits(intp, VIT_CURPUN);
switch (rate_fld) {
case 13:
prate = STV0900_FEC_1_2;
break;
case 18:
prate = STV0900_FEC_2_3;
break;
case 21:
prate = STV0900_FEC_3_4;
break;
case 24:
prate = STV0900_FEC_5_6;
break;
case 25:
prate = STV0900_FEC_6_7;
break;
case 26:
prate = STV0900_FEC_7_8;
break;
default:
prate = STV0900_FEC_UNKNOWN;
break;
}
return prate;
}
static void stv0900_set_dvbs1_track_car_loop(struct stv0900_internal *intp,
enum fe_stv0900_demod_num demod,
u32 srate)
{
if (intp->chip_id >= 0x30) {
if (srate >= 15000000) {
stv0900_write_reg(intp, ACLC, 0x2b);
stv0900_write_reg(intp, BCLC, 0x1a);
} else if ((srate >= 7000000) && (15000000 > srate)) {
stv0900_write_reg(intp, ACLC, 0x0c);
stv0900_write_reg(intp, BCLC, 0x1b);
} else if (srate < 7000000) {
stv0900_write_reg(intp, ACLC, 0x2c);
stv0900_write_reg(intp, BCLC, 0x1c);
}
} else { /*cut 2.0 and 1.x*/
stv0900_write_reg(intp, ACLC, 0x1a);
stv0900_write_reg(intp, BCLC, 0x09);
}
}
static void stv0900_track_optimization(struct dvb_frontend *fe)
{
struct stv0900_state *state = fe->demodulator_priv;
struct stv0900_internal *intp = state->internal;
enum fe_stv0900_demod_num demod = state->demod;
s32 srate,
pilots,
aclc,
freq1,
freq0,
i = 0,
timed,
timef,
blind_tun_sw = 0,
modulation;
enum fe_stv0900_rolloff rolloff;
enum fe_stv0900_modcode foundModcod;
dprintk("%s\n", __func__);
srate = stv0900_get_symbol_rate(intp, intp->mclk, demod);
srate += stv0900_get_timing_offst(intp, srate, demod);
switch (intp->result[demod].standard) {
case STV0900_DVBS1_STANDARD:
case STV0900_DSS_STANDARD:
dprintk("%s: found DVB-S or DSS\n", __func__);
if (intp->srch_standard[demod] == STV0900_AUTO_SEARCH) {
stv0900_write_bits(intp, DVBS1_ENABLE, 1);
stv0900_write_bits(intp, DVBS2_ENABLE, 0);
}
stv0900_write_bits(intp, ROLLOFF_CONTROL, intp->rolloff);
stv0900_write_bits(intp, MANUALSX_ROLLOFF, 1);
if (intp->chip_id < 0x30) {
stv0900_write_reg(intp, ERRCTRL1, 0x75);
break;
}
if (stv0900_get_vit_fec(intp, demod) == STV0900_FEC_1_2) {
stv0900_write_reg(intp, GAUSSR0, 0x98);
stv0900_write_reg(intp, CCIR0, 0x18);
} else {
stv0900_write_reg(intp, GAUSSR0, 0x18);
stv0900_write_reg(intp, CCIR0, 0x18);
}
stv0900_write_reg(intp, ERRCTRL1, 0x75);
break;
case STV0900_DVBS2_STANDARD:
dprintk("%s: found DVB-S2\n", __func__);
stv0900_write_bits(intp, DVBS1_ENABLE, 0);
stv0900_write_bits(intp, DVBS2_ENABLE, 1);
stv0900_write_reg(intp, ACLC, 0);
stv0900_write_reg(intp, BCLC, 0);
if (intp->result[demod].frame_len == STV0900_LONG_FRAME) {
foundModcod = stv0900_get_bits(intp, DEMOD_MODCOD);
pilots = stv0900_get_bits(intp, DEMOD_TYPE) & 0x01;
aclc = stv0900_get_optim_carr_loop(srate,
foundModcod,
pilots,
intp->chip_id);
if (foundModcod <= STV0900_QPSK_910)
stv0900_write_reg(intp, ACLC2S2Q, aclc);
else if (foundModcod <= STV0900_8PSK_910) {
stv0900_write_reg(intp, ACLC2S2Q, 0x2a);
stv0900_write_reg(intp, ACLC2S28, aclc);
}
if ((intp->demod_mode == STV0900_SINGLE) &&
(foundModcod > STV0900_8PSK_910)) {
if (foundModcod <= STV0900_16APSK_910) {
stv0900_write_reg(intp, ACLC2S2Q, 0x2a);
stv0900_write_reg(intp, ACLC2S216A,
aclc);
} else if (foundModcod <= STV0900_32APSK_910) {
stv0900_write_reg(intp, ACLC2S2Q, 0x2a);
stv0900_write_reg(intp, ACLC2S232A,
aclc);
}
}
} else {
modulation = intp->result[demod].modulation;
aclc = stv0900_get_optim_short_carr_loop(srate,
modulation, intp->chip_id);
if (modulation == STV0900_QPSK)
stv0900_write_reg(intp, ACLC2S2Q, aclc);
else if (modulation == STV0900_8PSK) {
stv0900_write_reg(intp, ACLC2S2Q, 0x2a);
stv0900_write_reg(intp, ACLC2S28, aclc);
} else if (modulation == STV0900_16APSK) {
stv0900_write_reg(intp, ACLC2S2Q, 0x2a);
stv0900_write_reg(intp, ACLC2S216A, aclc);
} else if (modulation == STV0900_32APSK) {
stv0900_write_reg(intp, ACLC2S2Q, 0x2a);
stv0900_write_reg(intp, ACLC2S232A, aclc);
}
}
if (intp->chip_id <= 0x11) {
if (intp->demod_mode != STV0900_SINGLE)
stv0900_activate_s2_modcod(intp, demod);
}
stv0900_write_reg(intp, ERRCTRL1, 0x67);
break;
case STV0900_UNKNOWN_STANDARD:
default:
dprintk("%s: found unknown standard\n", __func__);
stv0900_write_bits(intp, DVBS1_ENABLE, 1);
stv0900_write_bits(intp, DVBS2_ENABLE, 1);
break;
}
freq1 = stv0900_read_reg(intp, CFR2);
freq0 = stv0900_read_reg(intp, CFR1);
rolloff = stv0900_get_bits(intp, ROLLOFF_STATUS);
if (intp->srch_algo[demod] == STV0900_BLIND_SEARCH) {
stv0900_write_reg(intp, SFRSTEP, 0x00);
stv0900_write_bits(intp, SCAN_ENABLE, 0);
stv0900_write_bits(intp, CFR_AUTOSCAN, 0);
stv0900_write_reg(intp, TMGCFG2, 0xc1);
stv0900_set_symbol_rate(intp, intp->mclk, srate, demod);
blind_tun_sw = 1;
if (intp->result[demod].standard != STV0900_DVBS2_STANDARD)
stv0900_set_dvbs1_track_car_loop(intp, demod, srate);
}
if (intp->chip_id >= 0x20) {
if ((intp->srch_standard[demod] == STV0900_SEARCH_DVBS1) ||
(intp->srch_standard[demod] ==
STV0900_SEARCH_DSS) ||
(intp->srch_standard[demod] ==
STV0900_AUTO_SEARCH)) {
stv0900_write_reg(intp, VAVSRVIT, 0x0a);
stv0900_write_reg(intp, VITSCALE, 0x0);
}
}
if (intp->chip_id < 0x20)
stv0900_write_reg(intp, CARHDR, 0x08);
if (intp->chip_id == 0x10)
stv0900_write_reg(intp, CORRELEXP, 0x0a);
stv0900_write_reg(intp, AGC2REF, 0x38);
if ((intp->chip_id >= 0x20) ||
(blind_tun_sw == 1) ||
(intp->symbol_rate[demod] < 10000000)) {
stv0900_write_reg(intp, CFRINIT1, freq1);
stv0900_write_reg(intp, CFRINIT0, freq0);
intp->bw[demod] = stv0900_carrier_width(srate,
intp->rolloff) + 10000000;
if ((intp->chip_id >= 0x20) || (blind_tun_sw == 1)) {
if (intp->srch_algo[demod] != STV0900_WARM_START) {
if (intp->tuner_type[demod] == 3)
stv0900_set_tuner_auto(intp,
intp->freq[demod],
intp->bw[demod],
demod);
else
stv0900_set_bandwidth(fe,
intp->bw[demod]);
}
}
if ((intp->srch_algo[demod] == STV0900_BLIND_SEARCH) ||
(intp->symbol_rate[demod] < 10000000))
msleep(50);
else
msleep(5);
stv0900_get_lock_timeout(&timed, &timef, srate,
STV0900_WARM_START);
if (stv0900_get_demod_lock(intp, demod, timed / 2) == FALSE) {
stv0900_write_reg(intp, DMDISTATE, 0x1f);
stv0900_write_reg(intp, CFRINIT1, freq1);
stv0900_write_reg(intp, CFRINIT0, freq0);
stv0900_write_reg(intp, DMDISTATE, 0x18);
i = 0;
while ((stv0900_get_demod_lock(intp,
demod,
timed / 2) == FALSE) &&
(i <= 2)) {
stv0900_write_reg(intp, DMDISTATE, 0x1f);
stv0900_write_reg(intp, CFRINIT1, freq1);
stv0900_write_reg(intp, CFRINIT0, freq0);
stv0900_write_reg(intp, DMDISTATE, 0x18);
i++;
}
}
}
if (intp->chip_id >= 0x20)
stv0900_write_reg(intp, CARFREQ, 0x49);
if ((intp->result[demod].standard == STV0900_DVBS1_STANDARD) ||
(intp->result[demod].standard == STV0900_DSS_STANDARD))
stv0900_set_viterbi_tracq(intp, demod);
}
static int stv0900_get_fec_lock(struct stv0900_internal *intp,
enum fe_stv0900_demod_num demod, s32 time_out)
{
s32 timer = 0, lock = 0;
enum fe_stv0900_search_state dmd_state;
dprintk("%s\n", __func__);
dmd_state = stv0900_get_bits(intp, HEADER_MODE);
while ((timer < time_out) && (lock == 0)) {
switch (dmd_state) {
case STV0900_SEARCH:
case STV0900_PLH_DETECTED:
default:
lock = 0;
break;
case STV0900_DVBS2_FOUND:
lock = stv0900_get_bits(intp, PKTDELIN_LOCK);
break;
case STV0900_DVBS_FOUND:
lock = stv0900_get_bits(intp, LOCKEDVIT);
break;
}
if (lock == 0) {
msleep(10);
timer += 10;
}
}
if (lock)
dprintk("%s: DEMOD FEC LOCK OK\n", __func__);
else
dprintk("%s: DEMOD FEC LOCK FAIL\n", __func__);
return lock;
}
static int stv0900_wait_for_lock(struct stv0900_internal *intp,
enum fe_stv0900_demod_num demod,
s32 dmd_timeout, s32 fec_timeout)
{
s32 timer = 0, lock = 0;
dprintk("%s\n", __func__);
lock = stv0900_get_demod_lock(intp, demod, dmd_timeout);
if (lock)
lock = lock && stv0900_get_fec_lock(intp, demod, fec_timeout);
if (lock) {
lock = 0;
dprintk("%s: Timer = %d, time_out = %d\n",
__func__, timer, fec_timeout);
while ((timer < fec_timeout) && (lock == 0)) {
lock = stv0900_get_bits(intp, TSFIFO_LINEOK);
msleep(1);
timer++;
}
}
if (lock)
dprintk("%s: DEMOD LOCK OK\n", __func__);
else
dprintk("%s: DEMOD LOCK FAIL\n", __func__);
if (lock)
return TRUE;
else
return FALSE;
}
enum fe_stv0900_tracking_standard stv0900_get_standard(struct dvb_frontend *fe,
enum fe_stv0900_demod_num demod)
{
struct stv0900_state *state = fe->demodulator_priv;
struct stv0900_internal *intp = state->internal;
enum fe_stv0900_tracking_standard fnd_standard;
int hdr_mode = stv0900_get_bits(intp, HEADER_MODE);
switch (hdr_mode) {
case 2:
fnd_standard = STV0900_DVBS2_STANDARD;
break;
case 3:
if (stv0900_get_bits(intp, DSS_DVB) == 1)
fnd_standard = STV0900_DSS_STANDARD;
else
fnd_standard = STV0900_DVBS1_STANDARD;
break;
default:
fnd_standard = STV0900_UNKNOWN_STANDARD;
}
dprintk("%s: standard %d\n", __func__, fnd_standard);
return fnd_standard;
}
static s32 stv0900_get_carr_freq(struct stv0900_internal *intp, u32 mclk,
enum fe_stv0900_demod_num demod)
{
s32 derot,
rem1,
rem2,
intval1,
intval2;
derot = (stv0900_get_bits(intp, CAR_FREQ2) << 16) +
(stv0900_get_bits(intp, CAR_FREQ1) << 8) +
(stv0900_get_bits(intp, CAR_FREQ0));
derot = ge2comp(derot, 24);
intval1 = mclk >> 12;
intval2 = derot >> 12;
rem1 = mclk % 0x1000;
rem2 = derot % 0x1000;
derot = (intval1 * intval2) +
((intval1 * rem2) >> 12) +
((intval2 * rem1) >> 12);
return derot;
}
static u32 stv0900_get_tuner_freq(struct dvb_frontend *fe)
{
struct dvb_frontend_ops *frontend_ops = NULL;
struct dvb_tuner_ops *tuner_ops = NULL;
u32 freq = 0;
if (&fe->ops)
frontend_ops = &fe->ops;
if (&frontend_ops->tuner_ops)
tuner_ops = &frontend_ops->tuner_ops;
if (tuner_ops->get_frequency) {
if ((tuner_ops->get_frequency(fe, &freq)) < 0)
dprintk("%s: Invalid parameter\n", __func__);
else
dprintk("%s: Frequency=%d\n", __func__, freq);
}
return freq;
}
static enum
fe_stv0900_signal_type stv0900_get_signal_params(struct dvb_frontend *fe)
{
struct stv0900_state *state = fe->demodulator_priv;
struct stv0900_internal *intp = state->internal;
enum fe_stv0900_demod_num demod = state->demod;
enum fe_stv0900_signal_type range = STV0900_OUTOFRANGE;
struct stv0900_signal_info *result = &intp->result[demod];
s32 offsetFreq,
srate_offset;
int i = 0,
d = demod;
u8 timing;
msleep(5);
if (intp->srch_algo[d] == STV0900_BLIND_SEARCH) {
timing = stv0900_read_reg(intp, TMGREG2);
i = 0;
stv0900_write_reg(intp, SFRSTEP, 0x5c);
while ((i <= 50) && (timing != 0) && (timing != 0xff)) {
timing = stv0900_read_reg(intp, TMGREG2);
msleep(5);
i += 5;
}
}
result->standard = stv0900_get_standard(fe, d);
if (intp->tuner_type[demod] == 3)
result->frequency = stv0900_get_freq_auto(intp, d);
else
result->frequency = stv0900_get_tuner_freq(fe);
offsetFreq = stv0900_get_carr_freq(intp, intp->mclk, d) / 1000;
result->frequency += offsetFreq;
result->symbol_rate = stv0900_get_symbol_rate(intp, intp->mclk, d);
srate_offset = stv0900_get_timing_offst(intp, result->symbol_rate, d);
result->symbol_rate += srate_offset;
result->fec = stv0900_get_vit_fec(intp, d);
result->modcode = stv0900_get_bits(intp, DEMOD_MODCOD);
result->pilot = stv0900_get_bits(intp, DEMOD_TYPE) & 0x01;
result->frame_len = ((u32)stv0900_get_bits(intp, DEMOD_TYPE)) >> 1;
result->rolloff = stv0900_get_bits(intp, ROLLOFF_STATUS);
dprintk("%s: modcode=0x%x \n", __func__, result->modcode);
switch (result->standard) {
case STV0900_DVBS2_STANDARD:
result->spectrum = stv0900_get_bits(intp, SPECINV_DEMOD);
if (result->modcode <= STV0900_QPSK_910)
result->modulation = STV0900_QPSK;
else if (result->modcode <= STV0900_8PSK_910)
result->modulation = STV0900_8PSK;
else if (result->modcode <= STV0900_16APSK_910)
result->modulation = STV0900_16APSK;
else if (result->modcode <= STV0900_32APSK_910)
result->modulation = STV0900_32APSK;
else
result->modulation = STV0900_UNKNOWN;
break;
case STV0900_DVBS1_STANDARD:
case STV0900_DSS_STANDARD:
result->spectrum = stv0900_get_bits(intp, IQINV);
result->modulation = STV0900_QPSK;
break;
default:
break;
}
if ((intp->srch_algo[d] == STV0900_BLIND_SEARCH) ||
(intp->symbol_rate[d] < 10000000)) {
offsetFreq = result->frequency - intp->freq[d];
if (intp->tuner_type[demod] == 3)
intp->freq[d] = stv0900_get_freq_auto(intp, d);
else
intp->freq[d] = stv0900_get_tuner_freq(fe);
if (ABS(offsetFreq) <= ((intp->srch_range[d] / 2000) + 500))
range = STV0900_RANGEOK;
else if (ABS(offsetFreq) <=
(stv0900_carrier_width(result->symbol_rate,
result->rolloff) / 2000))
range = STV0900_RANGEOK;
} else if (ABS(offsetFreq) <= ((intp->srch_range[d] / 2000) + 500))
range = STV0900_RANGEOK;
dprintk("%s: range %d\n", __func__, range);
return range;
}
static enum
fe_stv0900_signal_type stv0900_dvbs1_acq_workaround(struct dvb_frontend *fe)
{
struct stv0900_state *state = fe->demodulator_priv;
struct stv0900_internal *intp = state->internal;
enum fe_stv0900_demod_num demod = state->demod;
enum fe_stv0900_signal_type signal_type = STV0900_NODATA;
s32 srate,
demod_timeout,
fec_timeout,
freq1,
freq0;
intp->result[demod].locked = FALSE;
if (stv0900_get_bits(intp, HEADER_MODE) == STV0900_DVBS_FOUND) {
srate = stv0900_get_symbol_rate(intp, intp->mclk, demod);
srate += stv0900_get_timing_offst(intp, srate, demod);
if (intp->srch_algo[demod] == STV0900_BLIND_SEARCH)
stv0900_set_symbol_rate(intp, intp->mclk, srate, demod);
stv0900_get_lock_timeout(&demod_timeout, &fec_timeout,
srate, STV0900_WARM_START);
freq1 = stv0900_read_reg(intp, CFR2);
freq0 = stv0900_read_reg(intp, CFR1);
stv0900_write_bits(intp, CFR_AUTOSCAN, 0);
stv0900_write_bits(intp, SPECINV_CONTROL,
STV0900_IQ_FORCE_SWAPPED);
stv0900_write_reg(intp, DMDISTATE, 0x1c);
stv0900_write_reg(intp, CFRINIT1, freq1);
stv0900_write_reg(intp, CFRINIT0, freq0);
stv0900_write_reg(intp, DMDISTATE, 0x18);
if (stv0900_wait_for_lock(intp, demod,
demod_timeout, fec_timeout) == TRUE) {
intp->result[demod].locked = TRUE;
signal_type = stv0900_get_signal_params(fe);
stv0900_track_optimization(fe);
} else {
stv0900_write_bits(intp, SPECINV_CONTROL,
STV0900_IQ_FORCE_NORMAL);
stv0900_write_reg(intp, DMDISTATE, 0x1c);
stv0900_write_reg(intp, CFRINIT1, freq1);
stv0900_write_reg(intp, CFRINIT0, freq0);
stv0900_write_reg(intp, DMDISTATE, 0x18);
if (stv0900_wait_for_lock(intp, demod,
demod_timeout, fec_timeout) == TRUE) {
intp->result[demod].locked = TRUE;
signal_type = stv0900_get_signal_params(fe);
stv0900_track_optimization(fe);
}
}
} else
intp->result[demod].locked = FALSE;
return signal_type;
}
static u16 stv0900_blind_check_agc2_min_level(struct stv0900_internal *intp,
enum fe_stv0900_demod_num demod)
{
u32 minagc2level = 0xffff,
agc2level,
init_freq, freq_step;
s32 i, j, nb_steps, direction;
dprintk("%s\n", __func__);
stv0900_write_reg(intp, AGC2REF, 0x38);
stv0900_write_bits(intp, SCAN_ENABLE, 0);
stv0900_write_bits(intp, CFR_AUTOSCAN, 0);
stv0900_write_bits(intp, AUTO_GUP, 1);
stv0900_write_bits(intp, AUTO_GLOW, 1);
stv0900_write_reg(intp, DMDT0M, 0x0);
stv0900_set_symbol_rate(intp, intp->mclk, 1000000, demod);
nb_steps = -1 + (intp->srch_range[demod] / 1000000);
nb_steps /= 2;
nb_steps = (2 * nb_steps) + 1;
if (nb_steps < 0)
nb_steps = 1;
direction = 1;
freq_step = (1000000 << 8) / (intp->mclk >> 8);
init_freq = 0;
for (i = 0; i < nb_steps; i++) {
if (direction > 0)
init_freq = init_freq + (freq_step * i);
else
init_freq = init_freq - (freq_step * i);
direction *= -1;
stv0900_write_reg(intp, DMDISTATE, 0x5C);
stv0900_write_reg(intp, CFRINIT1, (init_freq >> 8) & 0xff);
stv0900_write_reg(intp, CFRINIT0, init_freq & 0xff);
stv0900_write_reg(intp, DMDISTATE, 0x58);
msleep(10);
agc2level = 0;
for (j = 0; j < 10; j++)
agc2level += (stv0900_read_reg(intp, AGC2I1) << 8)
| stv0900_read_reg(intp, AGC2I0);
agc2level /= 10;
if (agc2level < minagc2level)
minagc2level = agc2level;
}
return (u16)minagc2level;
}
static u32 stv0900_search_srate_coarse(struct dvb_frontend *fe)
{
struct stv0900_state *state = fe->demodulator_priv;
struct stv0900_internal *intp = state->internal;
enum fe_stv0900_demod_num demod = state->demod;
int timing_lck = FALSE;
s32 i, timingcpt = 0,
direction = 1,
nb_steps,
current_step = 0,
tuner_freq;
u32 agc2_th,
coarse_srate = 0,
agc2_integr = 0,
currier_step = 1200;
if (intp->chip_id >= 0x30)
agc2_th = 0x2e00;
else
agc2_th = 0x1f00;
stv0900_write_bits(intp, DEMOD_MODE, 0x1f);
stv0900_write_reg(intp, TMGCFG, 0x12);
stv0900_write_reg(intp, TMGTHRISE, 0xf0);
stv0900_write_reg(intp, TMGTHFALL, 0xe0);
stv0900_write_bits(intp, SCAN_ENABLE, 1);
stv0900_write_bits(intp, CFR_AUTOSCAN, 1);
stv0900_write_reg(intp, SFRUP1, 0x83);
stv0900_write_reg(intp, SFRUP0, 0xc0);
stv0900_write_reg(intp, SFRLOW1, 0x82);
stv0900_write_reg(intp, SFRLOW0, 0xa0);
stv0900_write_reg(intp, DMDT0M, 0x0);
stv0900_write_reg(intp, AGC2REF, 0x50);
if (intp->chip_id >= 0x30) {
stv0900_write_reg(intp, CARFREQ, 0x99);
stv0900_write_reg(intp, SFRSTEP, 0x98);
} else if (intp->chip_id >= 0x20) {
stv0900_write_reg(intp, CARFREQ, 0x6a);
stv0900_write_reg(intp, SFRSTEP, 0x95);
} else {
stv0900_write_reg(intp, CARFREQ, 0xed);
stv0900_write_reg(intp, SFRSTEP, 0x73);
}
if (intp->symbol_rate[demod] <= 2000000)
currier_step = 1000;
else if (intp->symbol_rate[demod] <= 5000000)
currier_step = 2000;
else if (intp->symbol_rate[demod] <= 12000000)
currier_step = 3000;
else
currier_step = 5000;
nb_steps = -1 + ((intp->srch_range[demod] / 1000) / currier_step);
nb_steps /= 2;
nb_steps = (2 * nb_steps) + 1;
if (nb_steps < 0)
nb_steps = 1;
else if (nb_steps > 10) {
nb_steps = 11;
currier_step = (intp->srch_range[demod] / 1000) / 10;
}
current_step = 0;
direction = 1;
tuner_freq = intp->freq[demod];
while ((timing_lck == FALSE) && (current_step < nb_steps)) {
stv0900_write_reg(intp, DMDISTATE, 0x5f);
stv0900_write_bits(intp, DEMOD_MODE, 0);
msleep(50);
for (i = 0; i < 10; i++) {
if (stv0900_get_bits(intp, TMGLOCK_QUALITY) >= 2)
timingcpt++;
agc2_integr += (stv0900_read_reg(intp, AGC2I1) << 8) |
stv0900_read_reg(intp, AGC2I0);
}
agc2_integr /= 10;
coarse_srate = stv0900_get_symbol_rate(intp, intp->mclk, demod);
current_step++;
direction *= -1;
dprintk("lock: I2C_DEMOD_MODE_FIELD =0. Search started."
" tuner freq=%d agc2=0x%x srate_coarse=%d tmg_cpt=%d\n",
tuner_freq, agc2_integr, coarse_srate, timingcpt);
if ((timingcpt >= 5) &&
(agc2_integr < agc2_th) &&
(coarse_srate < 55000000) &&
(coarse_srate > 850000))
timing_lck = TRUE;
else if (current_step < nb_steps) {
if (direction > 0)
tuner_freq += (current_step * currier_step);
else
tuner_freq -= (current_step * currier_step);
if (intp->tuner_type[demod] == 3)
stv0900_set_tuner_auto(intp, tuner_freq,
intp->bw[demod], demod);
else
stv0900_set_tuner(fe, tuner_freq,
intp->bw[demod]);
}
}
if (timing_lck == FALSE)
coarse_srate = 0;
else
coarse_srate = stv0900_get_symbol_rate(intp, intp->mclk, demod);
return coarse_srate;
}
static u32 stv0900_search_srate_fine(struct dvb_frontend *fe)
{
struct stv0900_state *state = fe->demodulator_priv;
struct stv0900_internal *intp = state->internal;
enum fe_stv0900_demod_num demod = state->demod;
u32 coarse_srate,
coarse_freq,
symb,
symbmax,
symbmin,
symbcomp;
coarse_srate = stv0900_get_symbol_rate(intp, intp->mclk, demod);
if (coarse_srate > 3000000) {
symbmax = 13 * (coarse_srate / 10);
symbmax = (symbmax / 1000) * 65536;
symbmax /= (intp->mclk / 1000);
symbmin = 10 * (coarse_srate / 13);
symbmin = (symbmin / 1000)*65536;
symbmin /= (intp->mclk / 1000);
symb = (coarse_srate / 1000) * 65536;
symb /= (intp->mclk / 1000);
} else {
symbmax = 13 * (coarse_srate / 10);
symbmax = (symbmax / 100) * 65536;
symbmax /= (intp->mclk / 100);
symbmin = 10 * (coarse_srate / 14);
symbmin = (symbmin / 100) * 65536;
symbmin /= (intp->mclk / 100);
symb = (coarse_srate / 100) * 65536;
symb /= (intp->mclk / 100);
}
symbcomp = 13 * (coarse_srate / 10);
coarse_freq = (stv0900_read_reg(intp, CFR2) << 8)
| stv0900_read_reg(intp, CFR1);
if (symbcomp < intp->symbol_rate[demod])
coarse_srate = 0;
else {
stv0900_write_reg(intp, DMDISTATE, 0x1f);
stv0900_write_reg(intp, TMGCFG2, 0xc1);
stv0900_write_reg(intp, TMGTHRISE, 0x20);
stv0900_write_reg(intp, TMGTHFALL, 0x00);
stv0900_write_reg(intp, TMGCFG, 0xd2);
stv0900_write_bits(intp, CFR_AUTOSCAN, 0);
stv0900_write_reg(intp, AGC2REF, 0x38);
if (intp->chip_id >= 0x30)
stv0900_write_reg(intp, CARFREQ, 0x79);
else if (intp->chip_id >= 0x20)
stv0900_write_reg(intp, CARFREQ, 0x49);
else
stv0900_write_reg(intp, CARFREQ, 0xed);
stv0900_write_reg(intp, SFRUP1, (symbmax >> 8) & 0x7f);
stv0900_write_reg(intp, SFRUP0, (symbmax & 0xff));
stv0900_write_reg(intp, SFRLOW1, (symbmin >> 8) & 0x7f);
stv0900_write_reg(intp, SFRLOW0, (symbmin & 0xff));
stv0900_write_reg(intp, SFRINIT1, (symb >> 8) & 0xff);
stv0900_write_reg(intp, SFRINIT0, (symb & 0xff));
stv0900_write_reg(intp, DMDT0M, 0x20);
stv0900_write_reg(intp, CFRINIT1, (coarse_freq >> 8) & 0xff);
stv0900_write_reg(intp, CFRINIT0, coarse_freq & 0xff);
stv0900_write_reg(intp, DMDISTATE, 0x15);
}
return coarse_srate;
}
static int stv0900_blind_search_algo(struct dvb_frontend *fe)
{
struct stv0900_state *state = fe->demodulator_priv;
struct stv0900_internal *intp = state->internal;
enum fe_stv0900_demod_num demod = state->demod;
u8 k_ref_tmg,
k_ref_tmg_max,
k_ref_tmg_min;
u32 coarse_srate,
agc2_th;
int lock = FALSE,
coarse_fail = FALSE;
s32 demod_timeout = 500,
fec_timeout = 50,
fail_cpt,
i,
agc2_overflow;
u16 agc2_int;
u8 dstatus2;
dprintk("%s\n", __func__);
if (intp->chip_id < 0x20) {
k_ref_tmg_max = 233;
k_ref_tmg_min = 143;
} else {
k_ref_tmg_max = 110;
k_ref_tmg_min = 10;
}
if (intp->chip_id <= 0x20)
agc2_th = STV0900_BLIND_SEARCH_AGC2_TH;
else
agc2_th = STV0900_BLIND_SEARCH_AGC2_TH_CUT30;
agc2_int = stv0900_blind_check_agc2_min_level(intp, demod);
dprintk("%s agc2_int=%d agc2_th=%d \n", __func__, agc2_int, agc2_th);
if (agc2_int > agc2_th)
return FALSE;
if (intp->chip_id == 0x10)
stv0900_write_reg(intp, CORRELEXP, 0xaa);
if (intp->chip_id < 0x20)
stv0900_write_reg(intp, CARHDR, 0x55);
else
stv0900_write_reg(intp, CARHDR, 0x20);
if (intp->chip_id <= 0x20)
stv0900_write_reg(intp, CARCFG, 0xc4);
else
stv0900_write_reg(intp, CARCFG, 0x6);
stv0900_write_reg(intp, RTCS2, 0x44);
if (intp->chip_id >= 0x20) {
stv0900_write_reg(intp, EQUALCFG, 0x41);
stv0900_write_reg(intp, FFECFG, 0x41);
stv0900_write_reg(intp, VITSCALE, 0x82);
stv0900_write_reg(intp, VAVSRVIT, 0x0);
}
k_ref_tmg = k_ref_tmg_max;
do {
stv0900_write_reg(intp, KREFTMG, k_ref_tmg);
if (stv0900_search_srate_coarse(fe) != 0) {
coarse_srate = stv0900_search_srate_fine(fe);
if (coarse_srate != 0) {
stv0900_get_lock_timeout(&demod_timeout,
&fec_timeout,
coarse_srate,
STV0900_BLIND_SEARCH);
lock = stv0900_get_demod_lock(intp,
demod,
demod_timeout);
} else
lock = FALSE;
} else {
fail_cpt = 0;
agc2_overflow = 0;
for (i = 0; i < 10; i++) {
agc2_int = (stv0900_read_reg(intp, AGC2I1) << 8)
| stv0900_read_reg(intp, AGC2I0);
if (agc2_int >= 0xff00)
agc2_overflow++;
dstatus2 = stv0900_read_reg(intp, DSTATUS2);
if (((dstatus2 & 0x1) == 0x1) &&
((dstatus2 >> 7) == 1))
fail_cpt++;
}
if ((fail_cpt > 7) || (agc2_overflow > 7))
coarse_fail = TRUE;
lock = FALSE;
}
k_ref_tmg -= 30;
} while ((k_ref_tmg >= k_ref_tmg_min) &&
(lock == FALSE) &&
(coarse_fail == FALSE));
return lock;
}
static void stv0900_set_viterbi_acq(struct stv0900_internal *intp,
enum fe_stv0900_demod_num demod)
{
s32 vth_reg = VTH12;
dprintk("%s\n", __func__);
stv0900_write_reg(intp, vth_reg++, 0x96);
stv0900_write_reg(intp, vth_reg++, 0x64);
stv0900_write_reg(intp, vth_reg++, 0x36);
stv0900_write_reg(intp, vth_reg++, 0x23);
stv0900_write_reg(intp, vth_reg++, 0x1e);
stv0900_write_reg(intp, vth_reg++, 0x19);
}
static void stv0900_set_search_standard(struct stv0900_internal *intp,
enum fe_stv0900_demod_num demod)
{
dprintk("%s\n", __func__);
switch (intp->srch_standard[demod]) {
case STV0900_SEARCH_DVBS1:
dprintk("Search Standard = DVBS1\n");
break;
case STV0900_SEARCH_DSS:
dprintk("Search Standard = DSS\n");
case STV0900_SEARCH_DVBS2:
break;
dprintk("Search Standard = DVBS2\n");
case STV0900_AUTO_SEARCH:
default:
dprintk("Search Standard = AUTO\n");
break;
}
switch (intp->srch_standard[demod]) {
case STV0900_SEARCH_DVBS1:
case STV0900_SEARCH_DSS:
stv0900_write_bits(intp, DVBS1_ENABLE, 1);
stv0900_write_bits(intp, DVBS2_ENABLE, 0);
stv0900_write_bits(intp, STOP_CLKVIT, 0);
stv0900_set_dvbs1_track_car_loop(intp,
demod,
intp->symbol_rate[demod]);
stv0900_write_reg(intp, CAR2CFG, 0x22);
stv0900_set_viterbi_acq(intp, demod);
stv0900_set_viterbi_standard(intp,
intp->srch_standard[demod],
intp->fec[demod], demod);
break;
case STV0900_SEARCH_DVBS2:
stv0900_write_bits(intp, DVBS1_ENABLE, 0);
stv0900_write_bits(intp, DVBS2_ENABLE, 1);
stv0900_write_bits(intp, STOP_CLKVIT, 1);
stv0900_write_reg(intp, ACLC, 0x1a);
stv0900_write_reg(intp, BCLC, 0x09);
if (intp->chip_id <= 0x20) /*cut 1.x and 2.0*/
stv0900_write_reg(intp, CAR2CFG, 0x26);
else
stv0900_write_reg(intp, CAR2CFG, 0x66);
if (intp->demod_mode != STV0900_SINGLE) {
if (intp->chip_id <= 0x11)
stv0900_stop_all_s2_modcod(intp, demod);
else
stv0900_activate_s2_modcod(intp, demod);
} else
stv0900_activate_s2_modcod_single(intp, demod);
stv0900_set_viterbi_tracq(intp, demod);
break;
case STV0900_AUTO_SEARCH:
default:
stv0900_write_bits(intp, DVBS1_ENABLE, 1);
stv0900_write_bits(intp, DVBS2_ENABLE, 1);
stv0900_write_bits(intp, STOP_CLKVIT, 0);
stv0900_write_reg(intp, ACLC, 0x1a);
stv0900_write_reg(intp, BCLC, 0x09);
stv0900_set_dvbs1_track_car_loop(intp,
demod,
intp->symbol_rate[demod]);
if (intp->chip_id <= 0x20) /*cut 1.x and 2.0*/
stv0900_write_reg(intp, CAR2CFG, 0x26);
else
stv0900_write_reg(intp, CAR2CFG, 0x66);
if (intp->demod_mode != STV0900_SINGLE) {
if (intp->chip_id <= 0x11)
stv0900_stop_all_s2_modcod(intp, demod);
else
stv0900_activate_s2_modcod(intp, demod);
} else
stv0900_activate_s2_modcod_single(intp, demod);
stv0900_set_viterbi_tracq(intp, demod);
stv0900_set_viterbi_standard(intp,
intp->srch_standard[demod],
intp->fec[demod], demod);
break;
}
}
enum fe_stv0900_signal_type stv0900_algo(struct dvb_frontend *fe)
{
struct stv0900_state *state = fe->demodulator_priv;
struct stv0900_internal *intp = state->internal;
enum fe_stv0900_demod_num demod = state->demod;
s32 demod_timeout = 500, fec_timeout = 50;
s32 aq_power, agc1_power, i;
int lock = FALSE, low_sr = FALSE;
enum fe_stv0900_signal_type signal_type = STV0900_NOCARRIER;
enum fe_stv0900_search_algo algo;
int no_signal = FALSE;
dprintk("%s\n", __func__);
algo = intp->srch_algo[demod];
stv0900_write_bits(intp, RST_HWARE, 1);
stv0900_write_reg(intp, DMDISTATE, 0x5c);
if (intp->chip_id >= 0x20) {
if (intp->symbol_rate[demod] > 5000000)
stv0900_write_reg(intp, CORRELABS, 0x9e);
else
stv0900_write_reg(intp, CORRELABS, 0x82);
} else
stv0900_write_reg(intp, CORRELABS, 0x88);
stv0900_get_lock_timeout(&demod_timeout, &fec_timeout,
intp->symbol_rate[demod],
intp->srch_algo[demod]);
if (intp->srch_algo[demod] == STV0900_BLIND_SEARCH) {
intp->bw[demod] = 2 * 36000000;
stv0900_write_reg(intp, TMGCFG2, 0xc0);
stv0900_write_reg(intp, CORRELMANT, 0x70);
stv0900_set_symbol_rate(intp, intp->mclk, 1000000, demod);
} else {
stv0900_write_reg(intp, DMDT0M, 0x20);
stv0900_write_reg(intp, TMGCFG, 0xd2);
if (intp->symbol_rate[demod] < 2000000)
stv0900_write_reg(intp, CORRELMANT, 0x63);
else
stv0900_write_reg(intp, CORRELMANT, 0x70);
stv0900_write_reg(intp, AGC2REF, 0x38);
intp->bw[demod] =
stv0900_carrier_width(intp->symbol_rate[demod],
intp->rolloff);
if (intp->chip_id >= 0x20) {
stv0900_write_reg(intp, KREFTMG, 0x5a);
if (intp->srch_algo[demod] == STV0900_COLD_START) {
intp->bw[demod] += 10000000;
intp->bw[demod] *= 15;
intp->bw[demod] /= 10;
} else if (intp->srch_algo[demod] == STV0900_WARM_START)
intp->bw[demod] += 10000000;
} else {
stv0900_write_reg(intp, KREFTMG, 0xc1);
intp->bw[demod] += 10000000;
intp->bw[demod] *= 15;
intp->bw[demod] /= 10;
}
stv0900_write_reg(intp, TMGCFG2, 0xc1);
stv0900_set_symbol_rate(intp, intp->mclk,
intp->symbol_rate[demod], demod);
stv0900_set_max_symbol_rate(intp, intp->mclk,
intp->symbol_rate[demod], demod);
stv0900_set_min_symbol_rate(intp, intp->mclk,
intp->symbol_rate[demod], demod);
if (intp->symbol_rate[demod] >= 10000000)
low_sr = FALSE;
else
low_sr = TRUE;
}
if (intp->tuner_type[demod] == 3)
stv0900_set_tuner_auto(intp, intp->freq[demod],
intp->bw[demod], demod);
else
stv0900_set_tuner(fe, intp->freq[demod], intp->bw[demod]);
agc1_power = MAKEWORD(stv0900_get_bits(intp, AGCIQ_VALUE1),
stv0900_get_bits(intp, AGCIQ_VALUE0));
aq_power = 0;
if (agc1_power == 0) {
for (i = 0; i < 5; i++)
aq_power += (stv0900_get_bits(intp, POWER_I) +
stv0900_get_bits(intp, POWER_Q)) / 2;
aq_power /= 5;
}
if ((agc1_power == 0) && (aq_power < IQPOWER_THRESHOLD)) {
intp->result[demod].locked = FALSE;
signal_type = STV0900_NOAGC1;
dprintk("%s: NO AGC1, POWERI, POWERQ\n", __func__);
} else {
stv0900_write_bits(intp, SPECINV_CONTROL,
intp->srch_iq_inv[demod]);
if (intp->chip_id <= 0x20) /*cut 2.0*/
stv0900_write_bits(intp, MANUALSX_ROLLOFF, 1);
else /*cut 3.0*/
stv0900_write_bits(intp, MANUALS2_ROLLOFF, 1);
stv0900_set_search_standard(intp, demod);
if (intp->srch_algo[demod] != STV0900_BLIND_SEARCH)
stv0900_start_search(intp, demod);
}
if (signal_type == STV0900_NOAGC1)
return signal_type;
if (intp->chip_id == 0x12) {
stv0900_write_bits(intp, RST_HWARE, 0);
msleep(3);
stv0900_write_bits(intp, RST_HWARE, 1);
stv0900_write_bits(intp, RST_HWARE, 0);
}
if (algo == STV0900_BLIND_SEARCH)
lock = stv0900_blind_search_algo(fe);
else if (algo == STV0900_COLD_START)
lock = stv0900_get_demod_cold_lock(fe, demod_timeout);
else if (algo == STV0900_WARM_START)
lock = stv0900_get_demod_lock(intp, demod, demod_timeout);
if ((lock == FALSE) && (algo == STV0900_COLD_START)) {
if (low_sr == FALSE) {
if (stv0900_check_timing_lock(intp, demod) == TRUE)
lock = stv0900_sw_algo(intp, demod);
}
}
if (lock == TRUE)
signal_type = stv0900_get_signal_params(fe);
if ((lock == TRUE) && (signal_type == STV0900_RANGEOK)) {
stv0900_track_optimization(fe);
if (intp->chip_id <= 0x11) {
if ((stv0900_get_standard(fe, 0) ==
STV0900_DVBS1_STANDARD) &&
(stv0900_get_standard(fe, 1) ==
STV0900_DVBS1_STANDARD)) {
msleep(20);
stv0900_write_bits(intp, RST_HWARE, 0);
} else {
stv0900_write_bits(intp, RST_HWARE, 0);
msleep(3);
stv0900_write_bits(intp, RST_HWARE, 1);
stv0900_write_bits(intp, RST_HWARE, 0);
}
} else if (intp->chip_id >= 0x20) {
stv0900_write_bits(intp, RST_HWARE, 0);
msleep(3);
stv0900_write_bits(intp, RST_HWARE, 1);
stv0900_write_bits(intp, RST_HWARE, 0);
}
if (stv0900_wait_for_lock(intp, demod,
fec_timeout, fec_timeout) == TRUE) {
lock = TRUE;
intp->result[demod].locked = TRUE;
if (intp->result[demod].standard ==
STV0900_DVBS2_STANDARD) {
stv0900_set_dvbs2_rolloff(intp, demod);
stv0900_write_bits(intp, RESET_UPKO_COUNT, 1);
stv0900_write_bits(intp, RESET_UPKO_COUNT, 0);
stv0900_write_reg(intp, ERRCTRL1, 0x67);
} else {
stv0900_write_reg(intp, ERRCTRL1, 0x75);
}
stv0900_write_reg(intp, FBERCPT4, 0);
stv0900_write_reg(intp, ERRCTRL2, 0xc1);
} else {
lock = FALSE;
signal_type = STV0900_NODATA;
no_signal = stv0900_check_signal_presence(intp, demod);
intp->result[demod].locked = FALSE;
}
}
if ((signal_type != STV0900_NODATA) || (no_signal != FALSE))
return signal_type;
if (intp->chip_id > 0x11) {
intp->result[demod].locked = FALSE;
return signal_type;
}
if ((stv0900_get_bits(intp, HEADER_MODE) == STV0900_DVBS_FOUND) &&
(intp->srch_iq_inv[demod] <= STV0900_IQ_AUTO_NORMAL_FIRST))
signal_type = stv0900_dvbs1_acq_workaround(fe);
return signal_type;
}
| gpl-2.0 |
SkrilaxCZ/android_kernel_motorola_omap3-common | arch/mips/gt64120/wrppmc/serial.c | 9351 | 2120 | /*
* Registration of WRPPMC UART platform device.
*
* Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <asm/gt64120.h>
static struct resource wrppmc_uart_resource[] __initdata = {
{
.start = WRPPMC_UART16550_BASE,
.end = WRPPMC_UART16550_BASE + 7,
.flags = IORESOURCE_MEM,
},
{
.start = WRPPMC_UART16550_IRQ,
.end = WRPPMC_UART16550_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct plat_serial8250_port wrppmc_serial8250_port[] = {
{
.irq = WRPPMC_UART16550_IRQ,
.uartclk = WRPPMC_UART16550_CLOCK,
.iotype = UPIO_MEM,
.flags = UPF_IOREMAP | UPF_SKIP_TEST,
.mapbase = WRPPMC_UART16550_BASE,
},
{},
};
static __init int wrppmc_uart_add(void)
{
struct platform_device *pdev;
int retval;
pdev = platform_device_alloc("serial8250", -1);
if (!pdev)
return -ENOMEM;
pdev->id = PLAT8250_DEV_PLATFORM;
pdev->dev.platform_data = wrppmc_serial8250_port;
retval = platform_device_add_resources(pdev, wrppmc_uart_resource,
ARRAY_SIZE(wrppmc_uart_resource));
if (retval)
goto err_free_device;
retval = platform_device_add(pdev);
if (retval)
goto err_free_device;
return 0;
err_free_device:
platform_device_put(pdev);
return retval;
}
device_initcall(wrppmc_uart_add);
| gpl-2.0 |
github-easyway/linux-1 | arch/cris/arch-v32/mm/tlb.c | 12935 | 5009 | /*
* Low level TLB handling.
*
* Copyright (C) 2000-2003, Axis Communications AB.
*
* Authors: Bjorn Wesen <bjornw@axis.com>
* Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
*/
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include <arch/hwregs/asm/mmu_defs_asm.h>
#include <arch/hwregs/supp_reg.h>
#define UPDATE_TLB_SEL_IDX(val) \
do { \
unsigned long tlb_sel; \
\
tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val); \
SUPP_REG_WR(RW_MM_TLB_SEL, tlb_sel); \
} while(0)
#define UPDATE_TLB_HILO(tlb_hi, tlb_lo) \
do { \
SUPP_REG_WR(RW_MM_TLB_HI, tlb_hi); \
SUPP_REG_WR(RW_MM_TLB_LO, tlb_lo); \
} while(0)
/*
* The TLB can host up to 256 different mm contexts at the same time. The running
* context is found in the PID register. Each TLB entry contains a page_id that
* has to match the PID register to give a hit. page_id_map keeps track of which
* mm's is assigned to which page_id's, making sure it's known when to
* invalidate TLB entries.
*
* The last page_id is never running, it is used as an invalid page_id so that
* it's possible to make TLB entries that will nerver match.
*
* Note; the flushes needs to be atomic otherwise an interrupt hander that uses
* vmalloc'ed memory might cause a TLB load in the middle of a flush.
*/
/* Flush all TLB entries. */
void
__flush_tlb_all(void)
{
int i;
int mmu;
unsigned long flags;
unsigned long mmu_tlb_hi;
unsigned long mmu_tlb_sel;
/*
* Mask with 0xf so similar TLB entries aren't written in the same 4-way
* entry group.
*/
local_irq_save(flags);
for (mmu = 1; mmu <= 2; mmu++) {
SUPP_BANK_SEL(mmu); /* Select the MMU */
for (i = 0; i < NUM_TLB_ENTRIES; i++) {
/* Store invalid entry */
mmu_tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, i);
mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID)
| REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf));
SUPP_REG_WR(RW_MM_TLB_SEL, mmu_tlb_sel);
SUPP_REG_WR(RW_MM_TLB_HI, mmu_tlb_hi);
SUPP_REG_WR(RW_MM_TLB_LO, 0);
}
}
local_irq_restore(flags);
}
/* Flush an entire user address space. */
void
__flush_tlb_mm(struct mm_struct *mm)
{
int i;
int mmu;
unsigned long flags;
unsigned long page_id;
unsigned long tlb_hi;
unsigned long mmu_tlb_hi;
page_id = mm->context.page_id;
if (page_id == NO_CONTEXT)
return;
/* Mark the TLB entries that match the page_id as invalid. */
local_irq_save(flags);
for (mmu = 1; mmu <= 2; mmu++) {
SUPP_BANK_SEL(mmu);
for (i = 0; i < NUM_TLB_ENTRIES; i++) {
UPDATE_TLB_SEL_IDX(i);
/* Get the page_id */
SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi);
/* Check if the page_id match. */
if ((tlb_hi & 0xff) == page_id) {
mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid,
INVALID_PAGEID)
| REG_FIELD(mmu, rw_mm_tlb_hi, vpn,
i & 0xf));
UPDATE_TLB_HILO(mmu_tlb_hi, 0);
}
}
}
local_irq_restore(flags);
}
/* Invalidate a single page. */
void
__flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
int i;
int mmu;
unsigned long page_id;
unsigned long flags;
unsigned long tlb_hi;
unsigned long mmu_tlb_hi;
page_id = vma->vm_mm->context.page_id;
if (page_id == NO_CONTEXT)
return;
addr &= PAGE_MASK;
/*
* Invalidate those TLB entries that match both the mm context and the
* requested virtual address.
*/
local_irq_save(flags);
for (mmu = 1; mmu <= 2; mmu++) {
SUPP_BANK_SEL(mmu);
for (i = 0; i < NUM_TLB_ENTRIES; i++) {
UPDATE_TLB_SEL_IDX(i);
SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi);
/* Check if page_id and address matches */
if (((tlb_hi & 0xff) == page_id) &&
((tlb_hi & PAGE_MASK) == addr)) {
mmu_tlb_hi = REG_FIELD(mmu, rw_mm_tlb_hi, pid,
INVALID_PAGEID) | addr;
UPDATE_TLB_HILO(mmu_tlb_hi, 0);
}
}
}
local_irq_restore(flags);
}
/*
* Initialize the context related info for a new mm_struct
* instance.
*/
int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context.page_id = NO_CONTEXT;
return 0;
}
static DEFINE_SPINLOCK(mmu_context_lock);
/* Called in schedule() just before actually doing the switch_to. */
void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
if (prev != next) {
int cpu = smp_processor_id();
/* Make sure there is a MMU context. */
spin_lock(&mmu_context_lock);
get_mmu_context(next);
cpumask_set_cpu(cpu, mm_cpumask(next));
spin_unlock(&mmu_context_lock);
/*
* Remember the pgd for the fault handlers. Keep a separate
* copy of it because current and active_mm might be invalid
* at points where * there's still a need to derefer the pgd.
*/
per_cpu(current_pgd, cpu) = next->pgd;
/* Switch context in the MMU. */
if (tsk && task_thread_info(tsk)) {
SPEC_REG_WR(SPEC_REG_PID, next->context.page_id |
task_thread_info(tsk)->tls);
} else {
SPEC_REG_WR(SPEC_REG_PID, next->context.page_id);
}
}
}
| gpl-2.0 |
crimeofheart/Solid_Kernel-GEE | net/ipv6/xfrm6_mode_transport.c | 14215 | 2307 | /*
* xfrm6_mode_transport.c - Transport mode encapsulation for IPv6.
*
* Copyright (C) 2002 USAGI/WIDE Project
* Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/stringify.h>
#include <net/dst.h>
#include <net/ipv6.h>
#include <net/xfrm.h>
/* Add encapsulation header.
*
* The IP header and mutable extension headers will be moved forward to make
* space for the encapsulation header.
*/
static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
{
struct ipv6hdr *iph;
u8 *prevhdr;
int hdr_len;
iph = ipv6_hdr(skb);
hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
skb_set_network_header(skb, -x->props.header_len);
skb->transport_header = skb->network_header + hdr_len;
__skb_pull(skb, hdr_len);
memmove(ipv6_hdr(skb), iph, hdr_len);
return 0;
}
/* Remove encapsulation header.
*
* The IP header will be moved over the top of the encapsulation header.
*
* On entry, skb->h shall point to where the IP header should be and skb->nh
* shall be set to where the IP header currently is. skb->data shall point
* to the start of the payload.
*/
static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
{
int ihl = skb->data - skb_transport_header(skb);
if (skb->transport_header != skb->network_header) {
memmove(skb_transport_header(skb),
skb_network_header(skb), ihl);
skb->network_header = skb->transport_header;
}
ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
sizeof(struct ipv6hdr));
skb_reset_transport_header(skb);
return 0;
}
static struct xfrm_mode xfrm6_transport_mode = {
.input = xfrm6_transport_input,
.output = xfrm6_transport_output,
.owner = THIS_MODULE,
.encap = XFRM_MODE_TRANSPORT,
};
static int __init xfrm6_transport_init(void)
{
return xfrm_register_mode(&xfrm6_transport_mode, AF_INET6);
}
static void __exit xfrm6_transport_exit(void)
{
int err;
err = xfrm_unregister_mode(&xfrm6_transport_mode, AF_INET6);
BUG_ON(err);
}
module_init(xfrm6_transport_init);
module_exit(xfrm6_transport_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_MODE(AF_INET6, XFRM_MODE_TRANSPORT);
| gpl-2.0 |
stephenR/gcc-fpp | gcc/testsuite/gfortran.dg/elemental_subroutine_2.f90 | 136 | 1782 | ! { dg-do run }
! Test the fix for pr22146, where and elemental subroutine with
! array actual arguments would cause an ICE in gfc_conv_function_call.
! This test checks that the main uses for elemental subroutines work
! correctly; namely, as module procedures and as procedures called
! from elemental functions. The compiler would ICE on the former with
! the first version of the patch.
!
! Contributed by Paul Thomas <pault@gcc.gnu.org>
module type
type itype
integer :: i
character(1) :: ch
end type itype
end module type
module assign
interface assignment (=)
module procedure itype_to_int
end interface
contains
elemental subroutine itype_to_int (i, it)
use type
type(itype), intent(in) :: it
integer, intent(out) :: i
i = it%i
end subroutine itype_to_int
elemental function i_from_itype (it) result (i)
use type
type(itype), intent(in) :: it
integer :: i
i = it
end function i_from_itype
end module assign
program test_assign
use type
use assign
type(itype) :: x(2, 2)
integer :: i(2, 2)
! Test an elemental subroutine call from an elementary function.
x = reshape ((/(itype (j, "a"), j = 1,4)/), (/2,2/))
forall (j = 1:2, k = 1:2)
i(j, k) = i_from_itype (x (j, k))
end forall
if (any(reshape (i, (/4/)).ne.(/1,2,3,4/))) call abort ()
! Check the interface assignment (not part of the patch).
x = reshape ((/(itype (j**2, "b"), j = 1,4)/), (/2,2/))
i = x
if (any(reshape (i, (/4/)).ne.(/1,4,9,16/))) call abort ()
! Use the interface assignment within a forall block.
x = reshape ((/(itype (j**3, "c"), j = 1,4)/), (/2,2/))
forall (j = 1:2, k = 1:2)
i(j, k) = x (j, k)
end forall
if (any(reshape (i, (/4/)).ne.(/1,8,27,64/))) call abort ()
end program test_assign
| gpl-2.0 |
xiaowei942/kernel_2.6.36_t21 | mm/ksm.c | 136 | 53723 | /*
* Memory merging support.
*
* This code enables dynamic sharing of identical pages found in different
* memory areas, even if they are not shared by fork()
*
* Copyright (C) 2008-2009 Red Hat, Inc.
* Authors:
* Izik Eidus
* Andrea Arcangeli
* Chris Wright
* Hugh Dickins
*
* This work is licensed under the terms of the GNU GPL, version 2.
*/
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/mman.h>
#include <linux/sched.h>
#include <linux/rwsem.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/spinlock.h>
#include <linux/jhash.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/memory.h>
#include <linux/mmu_notifier.h>
#include <linux/swap.h>
#include <linux/ksm.h>
#include <linux/hash.h>
#include <asm/tlbflush.h>
#include "internal.h"
/*
* A few notes about the KSM scanning process,
* to make it easier to understand the data structures below:
*
* In order to reduce excessive scanning, KSM sorts the memory pages by their
* contents into a data structure that holds pointers to the pages' locations.
*
* Since the contents of the pages may change at any moment, KSM cannot just
* insert the pages into a normal sorted tree and expect it to find anything.
* Therefore KSM uses two data structures - the stable and the unstable tree.
*
* The stable tree holds pointers to all the merged pages (ksm pages), sorted
* by their contents. Because each such page is write-protected, searching on
* this tree is fully assured to be working (except when pages are unmapped),
* and therefore this tree is called the stable tree.
*
* In addition to the stable tree, KSM uses a second data structure called the
* unstable tree: this tree holds pointers to pages which have been found to
* be "unchanged for a period of time". The unstable tree sorts these pages
* by their contents, but since they are not write-protected, KSM cannot rely
* upon the unstable tree to work correctly - the unstable tree is liable to
* be corrupted as its contents are modified, and so it is called unstable.
*
* KSM solves this problem by several techniques:
*
* 1) The unstable tree is flushed every time KSM completes scanning all
* memory areas, and then the tree is rebuilt again from the beginning.
* 2) KSM will only insert into the unstable tree, pages whose hash value
* has not changed since the previous scan of all memory areas.
* 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
* colors of the nodes and not on their contents, assuring that even when
* the tree gets "corrupted" it won't get out of balance, so scanning time
* remains the same (also, searching and inserting nodes in an rbtree uses
* the same algorithm, so we have no overhead when we flush and rebuild).
* 4) KSM never flushes the stable tree, which means that even if it were to
* take 10 attempts to find a page in the unstable tree, once it is found,
* it is secured in the stable tree. (When we scan a new page, we first
* compare it against the stable tree, and then against the unstable tree.)
*/
/**
* struct mm_slot - ksm information per mm that is being scanned
* @link: link to the mm_slots hash list
* @mm_list: link into the mm_slots list, rooted in ksm_mm_head
* @rmap_list: head for this mm_slot's singly-linked list of rmap_items
* @mm: the mm that this information is valid for
*/
struct mm_slot {
struct hlist_node link;
struct list_head mm_list;
struct rmap_item *rmap_list;
struct mm_struct *mm;
};
/**
* struct ksm_scan - cursor for scanning
* @mm_slot: the current mm_slot we are scanning
* @address: the next address inside that to be scanned
* @rmap_list: link to the next rmap to be scanned in the rmap_list
* @seqnr: count of completed full scans (needed when removing unstable node)
*
* There is only the one ksm_scan instance of this cursor structure.
*/
struct ksm_scan {
struct mm_slot *mm_slot;
unsigned long address;
struct rmap_item **rmap_list;
unsigned long seqnr;
};
/**
* struct stable_node - node of the stable rbtree
* @node: rb node of this ksm page in the stable tree
* @hlist: hlist head of rmap_items using this ksm page
* @kpfn: page frame number of this ksm page
*/
struct stable_node {
struct rb_node node;
struct hlist_head hlist;
unsigned long kpfn;
};
/**
* struct rmap_item - reverse mapping item for virtual addresses
* @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
* @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
* @mm: the memory structure this rmap_item is pointing into
* @address: the virtual address this rmap_item tracks (+ flags in low bits)
* @oldchecksum: previous checksum of the page at that virtual address
* @node: rb node of this rmap_item in the unstable tree
* @head: pointer to stable_node heading this list in the stable tree
* @hlist: link into hlist of rmap_items hanging off that stable_node
*/
struct rmap_item {
struct rmap_item *rmap_list;
struct anon_vma *anon_vma; /* when stable */
struct mm_struct *mm;
unsigned long address; /* + low bits used for flags below */
unsigned int oldchecksum; /* when unstable */
union {
struct rb_node node; /* when node of unstable tree */
struct { /* when listed from stable tree */
struct stable_node *head;
struct hlist_node hlist;
};
};
};
#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
#define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
#define STABLE_FLAG 0x200 /* is listed from the stable tree */
/* The stable and unstable tree heads */
static struct rb_root root_stable_tree = RB_ROOT;
static struct rb_root root_unstable_tree = RB_ROOT;
#define MM_SLOTS_HASH_SHIFT 10
#define MM_SLOTS_HASH_HEADS (1 << MM_SLOTS_HASH_SHIFT)
static struct hlist_head mm_slots_hash[MM_SLOTS_HASH_HEADS];
static struct mm_slot ksm_mm_head = {
.mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
};
static struct ksm_scan ksm_scan = {
.mm_slot = &ksm_mm_head,
};
static struct kmem_cache *rmap_item_cache;
static struct kmem_cache *stable_node_cache;
static struct kmem_cache *mm_slot_cache;
/* The number of nodes in the stable tree */
static unsigned long ksm_pages_shared;
/* The number of page slots additionally sharing those nodes */
static unsigned long ksm_pages_sharing;
/* The number of nodes in the unstable tree */
static unsigned long ksm_pages_unshared;
/* The number of rmap_items in use: to calculate pages_volatile */
static unsigned long ksm_rmap_items;
/* Number of pages ksmd should scan in one batch */
static unsigned int ksm_thread_pages_to_scan = 100;
/* Milliseconds ksmd should sleep between batches */
static unsigned int ksm_thread_sleep_millisecs = 20;
#define KSM_RUN_STOP 0
#define KSM_RUN_MERGE 1
#define KSM_RUN_UNMERGE 2
static unsigned int ksm_run = KSM_RUN_STOP;
static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
static DEFINE_MUTEX(ksm_thread_mutex);
static DEFINE_SPINLOCK(ksm_mmlist_lock);
#define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
sizeof(struct __struct), __alignof__(struct __struct),\
(__flags), NULL)
static int __init ksm_slab_init(void)
{
rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
if (!rmap_item_cache)
goto out;
stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
if (!stable_node_cache)
goto out_free1;
mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
if (!mm_slot_cache)
goto out_free2;
return 0;
out_free2:
kmem_cache_destroy(stable_node_cache);
out_free1:
kmem_cache_destroy(rmap_item_cache);
out:
return -ENOMEM;
}
static void __init ksm_slab_free(void)
{
kmem_cache_destroy(mm_slot_cache);
kmem_cache_destroy(stable_node_cache);
kmem_cache_destroy(rmap_item_cache);
mm_slot_cache = NULL;
}
static inline struct rmap_item *alloc_rmap_item(void)
{
struct rmap_item *rmap_item;
rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
if (rmap_item)
ksm_rmap_items++;
return rmap_item;
}
static inline void free_rmap_item(struct rmap_item *rmap_item)
{
ksm_rmap_items--;
rmap_item->mm = NULL; /* debug safety */
kmem_cache_free(rmap_item_cache, rmap_item);
}
static inline struct stable_node *alloc_stable_node(void)
{
return kmem_cache_alloc(stable_node_cache, GFP_KERNEL);
}
static inline void free_stable_node(struct stable_node *stable_node)
{
kmem_cache_free(stable_node_cache, stable_node);
}
static inline struct mm_slot *alloc_mm_slot(void)
{
if (!mm_slot_cache) /* initialization failed */
return NULL;
return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
}
static inline void free_mm_slot(struct mm_slot *mm_slot)
{
kmem_cache_free(mm_slot_cache, mm_slot);
}
static struct mm_slot *get_mm_slot(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
struct hlist_head *bucket;
struct hlist_node *node;
bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)];
hlist_for_each_entry(mm_slot, node, bucket, link) {
if (mm == mm_slot->mm)
return mm_slot;
}
return NULL;
}
static void insert_to_mm_slots_hash(struct mm_struct *mm,
struct mm_slot *mm_slot)
{
struct hlist_head *bucket;
bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)];
mm_slot->mm = mm;
hlist_add_head(&mm_slot->link, bucket);
}
static inline int in_stable_tree(struct rmap_item *rmap_item)
{
return rmap_item->address & STABLE_FLAG;
}
static void hold_anon_vma(struct rmap_item *rmap_item,
struct anon_vma *anon_vma)
{
rmap_item->anon_vma = anon_vma;
get_anon_vma(anon_vma);
}
static void ksm_drop_anon_vma(struct rmap_item *rmap_item)
{
struct anon_vma *anon_vma = rmap_item->anon_vma;
drop_anon_vma(anon_vma);
}
/*
* ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
* page tables after it has passed through ksm_exit() - which, if necessary,
* takes mmap_sem briefly to serialize against them. ksm_exit() does not set
* a special flag: they can just back out as soon as mm_users goes to zero.
* ksm_test_exit() is used throughout to make this test for exit: in some
* places for correctness, in some places just to avoid unnecessary work.
*/
static inline bool ksm_test_exit(struct mm_struct *mm)
{
return atomic_read(&mm->mm_users) == 0;
}
/*
* We use break_ksm to break COW on a ksm page: it's a stripped down
*
* if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1)
* put_page(page);
*
* but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
* in case the application has unmapped and remapped mm,addr meanwhile.
* Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
* mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
*/
static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
{
struct page *page;
int ret = 0;
do {
cond_resched();
page = follow_page(vma, addr, FOLL_GET);
if (IS_ERR_OR_NULL(page))
break;
if (PageKsm(page))
ret = handle_mm_fault(vma->vm_mm, vma, addr,
FAULT_FLAG_WRITE);
else
ret = VM_FAULT_WRITE;
put_page(page);
} while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
/*
* We must loop because handle_mm_fault() may back out if there's
* any difficulty e.g. if pte accessed bit gets updated concurrently.
*
* VM_FAULT_WRITE is what we have been hoping for: it indicates that
* COW has been broken, even if the vma does not permit VM_WRITE;
* but note that a concurrent fault might break PageKsm for us.
*
* VM_FAULT_SIGBUS could occur if we race with truncation of the
* backing file, which also invalidates anonymous pages: that's
* okay, that truncation will have unmapped the PageKsm for us.
*
* VM_FAULT_OOM: at the time of writing (late July 2009), setting
* aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
* current task has TIF_MEMDIE set, and will be OOM killed on return
* to user; and ksmd, having no mm, would never be chosen for that.
*
* But if the mm is in a limited mem_cgroup, then the fault may fail
* with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
* even ksmd can fail in this way - though it's usually breaking ksm
* just to undo a merge it made a moment before, so unlikely to oom.
*
* That's a pity: we might therefore have more kernel pages allocated
* than we're counting as nodes in the stable tree; but ksm_do_scan
* will retry to break_cow on each pass, so should recover the page
* in due course. The important thing is to not let VM_MERGEABLE
* be cleared while any such pages might remain in the area.
*/
return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
}
static void break_cow(struct rmap_item *rmap_item)
{
struct mm_struct *mm = rmap_item->mm;
unsigned long addr = rmap_item->address;
struct vm_area_struct *vma;
/*
* It is not an accident that whenever we want to break COW
* to undo, we also need to drop a reference to the anon_vma.
*/
ksm_drop_anon_vma(rmap_item);
down_read(&mm->mmap_sem);
if (ksm_test_exit(mm))
goto out;
vma = find_vma(mm, addr);
if (!vma || vma->vm_start > addr)
goto out;
if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
goto out;
break_ksm(vma, addr);
out:
up_read(&mm->mmap_sem);
}
static struct page *get_mergeable_page(struct rmap_item *rmap_item)
{
struct mm_struct *mm = rmap_item->mm;
unsigned long addr = rmap_item->address;
struct vm_area_struct *vma;
struct page *page;
down_read(&mm->mmap_sem);
if (ksm_test_exit(mm))
goto out;
vma = find_vma(mm, addr);
if (!vma || vma->vm_start > addr)
goto out;
if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
goto out;
page = follow_page(vma, addr, FOLL_GET);
if (IS_ERR_OR_NULL(page))
goto out;
if (PageAnon(page)) {
flush_anon_page(vma, page, addr);
flush_dcache_page(page);
} else {
put_page(page);
out: page = NULL;
}
up_read(&mm->mmap_sem);
return page;
}
static void remove_node_from_stable_tree(struct stable_node *stable_node)
{
struct rmap_item *rmap_item;
struct hlist_node *hlist;
hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
if (rmap_item->hlist.next)
ksm_pages_sharing--;
else
ksm_pages_shared--;
ksm_drop_anon_vma(rmap_item);
rmap_item->address &= PAGE_MASK;
cond_resched();
}
rb_erase(&stable_node->node, &root_stable_tree);
free_stable_node(stable_node);
}
/*
* get_ksm_page: checks if the page indicated by the stable node
* is still its ksm page, despite having held no reference to it.
* In which case we can trust the content of the page, and it
* returns the gotten page; but if the page has now been zapped,
* remove the stale node from the stable tree and return NULL.
*
* You would expect the stable_node to hold a reference to the ksm page.
* But if it increments the page's count, swapping out has to wait for
* ksmd to come around again before it can free the page, which may take
* seconds or even minutes: much too unresponsive. So instead we use a
* "keyhole reference": access to the ksm page from the stable node peeps
* out through its keyhole to see if that page still holds the right key,
* pointing back to this stable node. This relies on freeing a PageAnon
* page to reset its page->mapping to NULL, and relies on no other use of
* a page to put something that might look like our key in page->mapping.
*
* include/linux/pagemap.h page_cache_get_speculative() is a good reference,
* but this is different - made simpler by ksm_thread_mutex being held, but
* interesting for assuming that no other use of the struct page could ever
* put our expected_mapping into page->mapping (or a field of the union which
* coincides with page->mapping). The RCU calls are not for KSM at all, but
* to keep the page_count protocol described with page_cache_get_speculative.
*
* Note: it is possible that get_ksm_page() will return NULL one moment,
* then page the next, if the page is in between page_freeze_refs() and
* page_unfreeze_refs(): this shouldn't be a problem anywhere, the page
* is on its way to being freed; but it is an anomaly to bear in mind.
*/
static struct page *get_ksm_page(struct stable_node *stable_node)
{
struct page *page;
void *expected_mapping;
page = pfn_to_page(stable_node->kpfn);
expected_mapping = (void *)stable_node +
(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
rcu_read_lock();
if (page->mapping != expected_mapping)
goto stale;
if (!get_page_unless_zero(page))
goto stale;
if (page->mapping != expected_mapping) {
put_page(page);
goto stale;
}
rcu_read_unlock();
return page;
stale:
rcu_read_unlock();
remove_node_from_stable_tree(stable_node);
return NULL;
}
/*
* Removing rmap_item from stable or unstable tree.
* This function will clean the information from the stable/unstable tree.
*/
static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
{
if (rmap_item->address & STABLE_FLAG) {
struct stable_node *stable_node;
struct page *page;
stable_node = rmap_item->head;
page = get_ksm_page(stable_node);
if (!page)
goto out;
lock_page(page);
hlist_del(&rmap_item->hlist);
unlock_page(page);
put_page(page);
if (stable_node->hlist.first)
ksm_pages_sharing--;
else
ksm_pages_shared--;
ksm_drop_anon_vma(rmap_item);
rmap_item->address &= PAGE_MASK;
} else if (rmap_item->address & UNSTABLE_FLAG) {
unsigned char age;
/*
* Usually ksmd can and must skip the rb_erase, because
* root_unstable_tree was already reset to RB_ROOT.
* But be careful when an mm is exiting: do the rb_erase
* if this rmap_item was inserted by this scan, rather
* than left over from before.
*/
age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
BUG_ON(age > 1);
if (!age)
rb_erase(&rmap_item->node, &root_unstable_tree);
ksm_pages_unshared--;
rmap_item->address &= PAGE_MASK;
}
out:
cond_resched(); /* we're called from many long loops */
}
static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
struct rmap_item **rmap_list)
{
while (*rmap_list) {
struct rmap_item *rmap_item = *rmap_list;
*rmap_list = rmap_item->rmap_list;
remove_rmap_item_from_tree(rmap_item);
free_rmap_item(rmap_item);
}
}
/*
* Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather
* than check every pte of a given vma, the locking doesn't quite work for
* that - an rmap_item is assigned to the stable tree after inserting ksm
* page and upping mmap_sem. Nor does it fit with the way we skip dup'ing
* rmap_items from parent to child at fork time (so as not to waste time
* if exit comes before the next scan reaches it).
*
* Similarly, although we'd like to remove rmap_items (so updating counts
* and freeing memory) when unmerging an area, it's easier to leave that
* to the next pass of ksmd - consider, for example, how ksmd might be
* in cmp_and_merge_page on one of the rmap_items we would be removing.
*/
static int unmerge_ksm_pages(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
unsigned long addr;
int err = 0;
for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
if (ksm_test_exit(vma->vm_mm))
break;
if (signal_pending(current))
err = -ERESTARTSYS;
else
err = break_ksm(vma, addr);
}
return err;
}
#ifdef CONFIG_SYSFS
/*
* Only called through the sysfs control interface:
*/
static int unmerge_and_remove_all_rmap_items(void)
{
struct mm_slot *mm_slot;
struct mm_struct *mm;
struct vm_area_struct *vma;
int err = 0;
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
struct mm_slot, mm_list);
spin_unlock(&ksm_mmlist_lock);
for (mm_slot = ksm_scan.mm_slot;
mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
mm = mm_slot->mm;
down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (ksm_test_exit(mm))
break;
if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
continue;
err = unmerge_ksm_pages(vma,
vma->vm_start, vma->vm_end);
if (err)
goto error;
}
remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
struct mm_slot, mm_list);
if (ksm_test_exit(mm)) {
hlist_del(&mm_slot->link);
list_del(&mm_slot->mm_list);
spin_unlock(&ksm_mmlist_lock);
free_mm_slot(mm_slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
up_read(&mm->mmap_sem);
mmdrop(mm);
} else {
spin_unlock(&ksm_mmlist_lock);
up_read(&mm->mmap_sem);
}
}
ksm_scan.seqnr = 0;
return 0;
error:
up_read(&mm->mmap_sem);
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = &ksm_mm_head;
spin_unlock(&ksm_mmlist_lock);
return err;
}
#endif /* CONFIG_SYSFS */
static u32 calc_checksum(struct page *page)
{
u32 checksum;
void *addr = kmap_atomic(page, KM_USER0);
checksum = jhash2(addr, PAGE_SIZE / 4, 17);
kunmap_atomic(addr, KM_USER0);
return checksum;
}
static int memcmp_pages(struct page *page1, struct page *page2)
{
char *addr1, *addr2;
int ret;
addr1 = kmap_atomic(page1, KM_USER0);
addr2 = kmap_atomic(page2, KM_USER1);
ret = memcmp(addr1, addr2, PAGE_SIZE);
kunmap_atomic(addr2, KM_USER1);
kunmap_atomic(addr1, KM_USER0);
return ret;
}
static inline int pages_identical(struct page *page1, struct page *page2)
{
return !memcmp_pages(page1, page2);
}
static int write_protect_page(struct vm_area_struct *vma, struct page *page,
pte_t *orig_pte)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long addr;
pte_t *ptep;
spinlock_t *ptl;
int swapped;
int err = -EFAULT;
addr = page_address_in_vma(page, vma);
if (addr == -EFAULT)
goto out;
ptep = page_check_address(page, mm, addr, &ptl, 0);
if (!ptep)
goto out;
if (pte_write(*ptep) || pte_dirty(*ptep)) {
pte_t entry;
swapped = PageSwapCache(page);
flush_cache_page(vma, addr, page_to_pfn(page));
/*
* Ok this is tricky, when get_user_pages_fast() run it doesnt
* take any lock, therefore the check that we are going to make
* with the pagecount against the mapcount is racey and
* O_DIRECT can happen right after the check.
* So we clear the pte and flush the tlb before the check
* this assure us that no O_DIRECT can happen after the check
* or in the middle of the check.
*/
entry = ptep_clear_flush(vma, addr, ptep);
/*
* Check that no O_DIRECT or similar I/O is in progress on the
* page
*/
if (page_mapcount(page) + 1 + swapped != page_count(page)) {
set_pte_at(mm, addr, ptep, entry);
goto out_unlock;
}
if (pte_dirty(entry))
set_page_dirty(page);
entry = pte_mkclean(pte_wrprotect(entry));
set_pte_at_notify(mm, addr, ptep, entry);
}
*orig_pte = *ptep;
err = 0;
out_unlock:
pte_unmap_unlock(ptep, ptl);
out:
return err;
}
/**
* replace_page - replace page in vma by new ksm page
* @vma: vma that holds the pte pointing to page
* @page: the page we are replacing by kpage
* @kpage: the ksm page we replace page by
* @orig_pte: the original value of the pte
*
* Returns 0 on success, -EFAULT on failure.
*/
static int replace_page(struct vm_area_struct *vma, struct page *page,
struct page *kpage, pte_t orig_pte)
{
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep;
spinlock_t *ptl;
unsigned long addr;
int err = -EFAULT;
addr = page_address_in_vma(page, vma);
if (addr == -EFAULT)
goto out;
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
goto out;
pud = pud_offset(pgd, addr);
if (!pud_present(*pud))
goto out;
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd))
goto out;
ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!pte_same(*ptep, orig_pte)) {
pte_unmap_unlock(ptep, ptl);
goto out;
}
get_page(kpage);
page_add_anon_rmap(kpage, vma, addr);
flush_cache_page(vma, addr, pte_pfn(*ptep));
ptep_clear_flush(vma, addr, ptep);
set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
page_remove_rmap(page);
put_page(page);
pte_unmap_unlock(ptep, ptl);
err = 0;
out:
return err;
}
/*
* try_to_merge_one_page - take two pages and merge them into one
* @vma: the vma that holds the pte pointing to page
* @page: the PageAnon page that we want to replace with kpage
* @kpage: the PageKsm page that we want to map instead of page,
* or NULL the first time when we want to use page as kpage.
*
* This function returns 0 if the pages were merged, -EFAULT otherwise.
*/
static int try_to_merge_one_page(struct vm_area_struct *vma,
struct page *page, struct page *kpage)
{
pte_t orig_pte = __pte(0);
int err = -EFAULT;
if (page == kpage) /* ksm page forked */
return 0;
if (!(vma->vm_flags & VM_MERGEABLE))
goto out;
if (!PageAnon(page))
goto out;
/*
* We need the page lock to read a stable PageSwapCache in
* write_protect_page(). We use trylock_page() instead of
* lock_page() because we don't want to wait here - we
* prefer to continue scanning and merging different pages,
* then come back to this page when it is unlocked.
*/
if (!trylock_page(page))
goto out;
/*
* If this anonymous page is mapped only here, its pte may need
* to be write-protected. If it's mapped elsewhere, all of its
* ptes are necessarily already write-protected. But in either
* case, we need to lock and check page_count is not raised.
*/
if (write_protect_page(vma, page, &orig_pte) == 0) {
if (!kpage) {
/*
* While we hold page lock, upgrade page from
* PageAnon+anon_vma to PageKsm+NULL stable_node:
* stable_tree_insert() will update stable_node.
*/
set_page_stable_node(page, NULL);
mark_page_accessed(page);
err = 0;
} else if (pages_identical(page, kpage))
err = replace_page(vma, page, kpage, orig_pte);
}
if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
munlock_vma_page(page);
if (!PageMlocked(kpage)) {
unlock_page(page);
lock_page(kpage);
mlock_vma_page(kpage);
page = kpage; /* for final unlock */
}
}
unlock_page(page);
out:
return err;
}
/*
* try_to_merge_with_ksm_page - like try_to_merge_two_pages,
* but no new kernel page is allocated: kpage must already be a ksm page.
*
* This function returns 0 if the pages were merged, -EFAULT otherwise.
*/
static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
struct page *page, struct page *kpage)
{
struct mm_struct *mm = rmap_item->mm;
struct vm_area_struct *vma;
int err = -EFAULT;
down_read(&mm->mmap_sem);
if (ksm_test_exit(mm))
goto out;
vma = find_vma(mm, rmap_item->address);
if (!vma || vma->vm_start > rmap_item->address)
goto out;
err = try_to_merge_one_page(vma, page, kpage);
if (err)
goto out;
/* Must get reference to anon_vma while still holding mmap_sem */
hold_anon_vma(rmap_item, vma->anon_vma);
out:
up_read(&mm->mmap_sem);
return err;
}
/*
* try_to_merge_two_pages - take two identical pages and prepare them
* to be merged into one page.
*
* This function returns the kpage if we successfully merged two identical
* pages into one ksm page, NULL otherwise.
*
* Note that this function upgrades page to ksm page: if one of the pages
* is already a ksm page, try_to_merge_with_ksm_page should be used.
*/
static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
struct page *page,
struct rmap_item *tree_rmap_item,
struct page *tree_page)
{
int err;
err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
if (!err) {
err = try_to_merge_with_ksm_page(tree_rmap_item,
tree_page, page);
/*
* If that fails, we have a ksm page with only one pte
* pointing to it: so break it.
*/
if (err)
break_cow(rmap_item);
}
return err ? NULL : page;
}
/*
* stable_tree_search - search for page inside the stable tree
*
* This function checks if there is a page inside the stable tree
* with identical content to the page that we are scanning right now.
*
* This function returns the stable tree node of identical content if found,
* NULL otherwise.
*/
static struct page *stable_tree_search(struct page *page)
{
struct rb_node *node = root_stable_tree.rb_node;
struct stable_node *stable_node;
stable_node = page_stable_node(page);
if (stable_node) { /* ksm page forked */
get_page(page);
return page;
}
while (node) {
struct page *tree_page;
int ret;
cond_resched();
stable_node = rb_entry(node, struct stable_node, node);
tree_page = get_ksm_page(stable_node);
if (!tree_page)
return NULL;
ret = memcmp_pages(page, tree_page);
if (ret < 0) {
put_page(tree_page);
node = node->rb_left;
} else if (ret > 0) {
put_page(tree_page);
node = node->rb_right;
} else
return tree_page;
}
return NULL;
}
/*
* stable_tree_insert - insert rmap_item pointing to new ksm page
* into the stable tree.
*
* This function returns the stable tree node just allocated on success,
* NULL otherwise.
*/
static struct stable_node *stable_tree_insert(struct page *kpage)
{
struct rb_node **new = &root_stable_tree.rb_node;
struct rb_node *parent = NULL;
struct stable_node *stable_node;
while (*new) {
struct page *tree_page;
int ret;
cond_resched();
stable_node = rb_entry(*new, struct stable_node, node);
tree_page = get_ksm_page(stable_node);
if (!tree_page)
return NULL;
ret = memcmp_pages(kpage, tree_page);
put_page(tree_page);
parent = *new;
if (ret < 0)
new = &parent->rb_left;
else if (ret > 0)
new = &parent->rb_right;
else {
/*
* It is not a bug that stable_tree_search() didn't
* find this node: because at that time our page was
* not yet write-protected, so may have changed since.
*/
return NULL;
}
}
stable_node = alloc_stable_node();
if (!stable_node)
return NULL;
rb_link_node(&stable_node->node, parent, new);
rb_insert_color(&stable_node->node, &root_stable_tree);
INIT_HLIST_HEAD(&stable_node->hlist);
stable_node->kpfn = page_to_pfn(kpage);
set_page_stable_node(kpage, stable_node);
return stable_node;
}
/*
* unstable_tree_search_insert - search for identical page,
* else insert rmap_item into the unstable tree.
*
* This function searches for a page in the unstable tree identical to the
* page currently being scanned; and if no identical page is found in the
* tree, we insert rmap_item as a new object into the unstable tree.
*
* This function returns pointer to rmap_item found to be identical
* to the currently scanned page, NULL otherwise.
*
* This function does both searching and inserting, because they share
* the same walking algorithm in an rbtree.
*/
static
struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
struct page *page,
struct page **tree_pagep)
{
struct rb_node **new = &root_unstable_tree.rb_node;
struct rb_node *parent = NULL;
while (*new) {
struct rmap_item *tree_rmap_item;
struct page *tree_page;
int ret;
cond_resched();
tree_rmap_item = rb_entry(*new, struct rmap_item, node);
tree_page = get_mergeable_page(tree_rmap_item);
if (IS_ERR_OR_NULL(tree_page))
return NULL;
/*
* Don't substitute a ksm page for a forked page.
*/
if (page == tree_page) {
put_page(tree_page);
return NULL;
}
ret = memcmp_pages(page, tree_page);
parent = *new;
if (ret < 0) {
put_page(tree_page);
new = &parent->rb_left;
} else if (ret > 0) {
put_page(tree_page);
new = &parent->rb_right;
} else {
*tree_pagep = tree_page;
return tree_rmap_item;
}
}
rmap_item->address |= UNSTABLE_FLAG;
rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
rb_link_node(&rmap_item->node, parent, new);
rb_insert_color(&rmap_item->node, &root_unstable_tree);
ksm_pages_unshared++;
return NULL;
}
/*
* stable_tree_append - add another rmap_item to the linked list of
* rmap_items hanging off a given node of the stable tree, all sharing
* the same ksm page.
*/
static void stable_tree_append(struct rmap_item *rmap_item,
struct stable_node *stable_node)
{
rmap_item->head = stable_node;
rmap_item->address |= STABLE_FLAG;
hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
if (rmap_item->hlist.next)
ksm_pages_sharing++;
else
ksm_pages_shared++;
}
/*
* cmp_and_merge_page - first see if page can be merged into the stable tree;
* if not, compare checksum to previous and if it's the same, see if page can
* be inserted into the unstable tree, or merged with a page already there and
* both transferred to the stable tree.
*
* @page: the page that we are searching identical page to.
* @rmap_item: the reverse mapping into the virtual address of this page
*/
static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
{
struct rmap_item *tree_rmap_item;
struct page *tree_page = NULL;
struct stable_node *stable_node;
struct page *kpage;
unsigned int checksum;
int err;
remove_rmap_item_from_tree(rmap_item);
/* We first start with searching the page inside the stable tree */
kpage = stable_tree_search(page);
if (kpage) {
err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
if (!err) {
/*
* The page was successfully merged:
* add its rmap_item to the stable tree.
*/
lock_page(kpage);
stable_tree_append(rmap_item, page_stable_node(kpage));
unlock_page(kpage);
}
put_page(kpage);
return;
}
/*
* If the hash value of the page has changed from the last time
* we calculated it, this page is changing frequently: therefore we
* don't want to insert it in the unstable tree, and we don't want
* to waste our time searching for something identical to it there.
*/
checksum = calc_checksum(page);
if (rmap_item->oldchecksum != checksum) {
rmap_item->oldchecksum = checksum;
return;
}
tree_rmap_item =
unstable_tree_search_insert(rmap_item, page, &tree_page);
if (tree_rmap_item) {
kpage = try_to_merge_two_pages(rmap_item, page,
tree_rmap_item, tree_page);
put_page(tree_page);
/*
* As soon as we merge this page, we want to remove the
* rmap_item of the page we have merged with from the unstable
* tree, and insert it instead as new node in the stable tree.
*/
if (kpage) {
remove_rmap_item_from_tree(tree_rmap_item);
lock_page(kpage);
stable_node = stable_tree_insert(kpage);
if (stable_node) {
stable_tree_append(tree_rmap_item, stable_node);
stable_tree_append(rmap_item, stable_node);
}
unlock_page(kpage);
/*
* If we fail to insert the page into the stable tree,
* we will have 2 virtual addresses that are pointing
* to a ksm page left outside the stable tree,
* in which case we need to break_cow on both.
*/
if (!stable_node) {
break_cow(tree_rmap_item);
break_cow(rmap_item);
}
}
}
}
static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
struct rmap_item **rmap_list,
unsigned long addr)
{
struct rmap_item *rmap_item;
while (*rmap_list) {
rmap_item = *rmap_list;
if ((rmap_item->address & PAGE_MASK) == addr)
return rmap_item;
if (rmap_item->address > addr)
break;
*rmap_list = rmap_item->rmap_list;
remove_rmap_item_from_tree(rmap_item);
free_rmap_item(rmap_item);
}
rmap_item = alloc_rmap_item();
if (rmap_item) {
/* It has already been zeroed */
rmap_item->mm = mm_slot->mm;
rmap_item->address = addr;
rmap_item->rmap_list = *rmap_list;
*rmap_list = rmap_item;
}
return rmap_item;
}
static struct rmap_item *scan_get_next_rmap_item(struct page **page)
{
struct mm_struct *mm;
struct mm_slot *slot;
struct vm_area_struct *vma;
struct rmap_item *rmap_item;
if (list_empty(&ksm_mm_head.mm_list))
return NULL;
slot = ksm_scan.mm_slot;
if (slot == &ksm_mm_head) {
root_unstable_tree = RB_ROOT;
spin_lock(&ksm_mmlist_lock);
slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
ksm_scan.mm_slot = slot;
spin_unlock(&ksm_mmlist_lock);
next_mm:
ksm_scan.address = 0;
ksm_scan.rmap_list = &slot->rmap_list;
}
mm = slot->mm;
down_read(&mm->mmap_sem);
if (ksm_test_exit(mm))
vma = NULL;
else
vma = find_vma(mm, ksm_scan.address);
for (; vma; vma = vma->vm_next) {
if (!(vma->vm_flags & VM_MERGEABLE))
continue;
if (ksm_scan.address < vma->vm_start)
ksm_scan.address = vma->vm_start;
if (!vma->anon_vma)
ksm_scan.address = vma->vm_end;
while (ksm_scan.address < vma->vm_end) {
if (ksm_test_exit(mm))
break;
*page = follow_page(vma, ksm_scan.address, FOLL_GET);
if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) {
flush_anon_page(vma, *page, ksm_scan.address);
flush_dcache_page(*page);
rmap_item = get_next_rmap_item(slot,
ksm_scan.rmap_list, ksm_scan.address);
if (rmap_item) {
ksm_scan.rmap_list =
&rmap_item->rmap_list;
ksm_scan.address += PAGE_SIZE;
} else
put_page(*page);
up_read(&mm->mmap_sem);
return rmap_item;
}
if (!IS_ERR_OR_NULL(*page))
put_page(*page);
ksm_scan.address += PAGE_SIZE;
cond_resched();
}
}
if (ksm_test_exit(mm)) {
ksm_scan.address = 0;
ksm_scan.rmap_list = &slot->rmap_list;
}
/*
* Nuke all the rmap_items that are above this current rmap:
* because there were no VM_MERGEABLE vmas with such addresses.
*/
remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = list_entry(slot->mm_list.next,
struct mm_slot, mm_list);
if (ksm_scan.address == 0) {
/*
* We've completed a full scan of all vmas, holding mmap_sem
* throughout, and found no VM_MERGEABLE: so do the same as
* __ksm_exit does to remove this mm from all our lists now.
* This applies either when cleaning up after __ksm_exit
* (but beware: we can reach here even before __ksm_exit),
* or when all VM_MERGEABLE areas have been unmapped (and
* mmap_sem then protects against race with MADV_MERGEABLE).
*/
hlist_del(&slot->link);
list_del(&slot->mm_list);
spin_unlock(&ksm_mmlist_lock);
free_mm_slot(slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
up_read(&mm->mmap_sem);
mmdrop(mm);
} else {
spin_unlock(&ksm_mmlist_lock);
up_read(&mm->mmap_sem);
}
/* Repeat until we've completed scanning the whole list */
slot = ksm_scan.mm_slot;
if (slot != &ksm_mm_head)
goto next_mm;
ksm_scan.seqnr++;
return NULL;
}
/**
* ksm_do_scan - the ksm scanner main worker function.
* @scan_npages - number of pages we want to scan before we return.
*/
static void ksm_do_scan(unsigned int scan_npages)
{
struct rmap_item *rmap_item;
struct page *uninitialized_var(page);
while (scan_npages--) {
cond_resched();
rmap_item = scan_get_next_rmap_item(&page);
if (!rmap_item)
return;
if (!PageKsm(page) || !in_stable_tree(rmap_item))
cmp_and_merge_page(page, rmap_item);
put_page(page);
}
}
static int ksmd_should_run(void)
{
return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
}
static int ksm_scan_thread(void *nothing)
{
set_user_nice(current, 5);
while (!kthread_should_stop()) {
mutex_lock(&ksm_thread_mutex);
if (ksmd_should_run())
ksm_do_scan(ksm_thread_pages_to_scan);
mutex_unlock(&ksm_thread_mutex);
if (ksmd_should_run()) {
schedule_timeout_interruptible(
msecs_to_jiffies(ksm_thread_sleep_millisecs));
} else {
wait_event_interruptible(ksm_thread_wait,
ksmd_should_run() || kthread_should_stop());
}
}
return 0;
}
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags)
{
struct mm_struct *mm = vma->vm_mm;
int err;
switch (advice) {
case MADV_MERGEABLE:
/*
* Be somewhat over-protective for now!
*/
if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
VM_PFNMAP | VM_IO | VM_DONTEXPAND |
VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
VM_NONLINEAR | VM_MIXEDMAP | VM_SAO))
return 0; /* just ignore the advice */
if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
err = __ksm_enter(mm);
if (err)
return err;
}
*vm_flags |= VM_MERGEABLE;
break;
case MADV_UNMERGEABLE:
if (!(*vm_flags & VM_MERGEABLE))
return 0; /* just ignore the advice */
if (vma->anon_vma) {
err = unmerge_ksm_pages(vma, start, end);
if (err)
return err;
}
*vm_flags &= ~VM_MERGEABLE;
break;
}
return 0;
}
int __ksm_enter(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
int needs_wakeup;
mm_slot = alloc_mm_slot();
if (!mm_slot)
return -ENOMEM;
/* Check ksm_run too? Would need tighter locking */
needs_wakeup = list_empty(&ksm_mm_head.mm_list);
spin_lock(&ksm_mmlist_lock);
insert_to_mm_slots_hash(mm, mm_slot);
/*
* Insert just behind the scanning cursor, to let the area settle
* down a little; when fork is followed by immediate exec, we don't
* want ksmd to waste time setting up and tearing down an rmap_list.
*/
list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
spin_unlock(&ksm_mmlist_lock);
set_bit(MMF_VM_MERGEABLE, &mm->flags);
atomic_inc(&mm->mm_count);
if (needs_wakeup)
wake_up_interruptible(&ksm_thread_wait);
return 0;
}
void __ksm_exit(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
int easy_to_free = 0;
/*
* This process is exiting: if it's straightforward (as is the
* case when ksmd was never running), free mm_slot immediately.
* But if it's at the cursor or has rmap_items linked to it, use
* mmap_sem to synchronize with any break_cows before pagetables
* are freed, and leave the mm_slot on the list for ksmd to free.
* Beware: ksm may already have noticed it exiting and freed the slot.
*/
spin_lock(&ksm_mmlist_lock);
mm_slot = get_mm_slot(mm);
if (mm_slot && ksm_scan.mm_slot != mm_slot) {
if (!mm_slot->rmap_list) {
hlist_del(&mm_slot->link);
list_del(&mm_slot->mm_list);
easy_to_free = 1;
} else {
list_move(&mm_slot->mm_list,
&ksm_scan.mm_slot->mm_list);
}
}
spin_unlock(&ksm_mmlist_lock);
if (easy_to_free) {
free_mm_slot(mm_slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
mmdrop(mm);
} else if (mm_slot) {
down_write(&mm->mmap_sem);
up_write(&mm->mmap_sem);
}
}
struct page *ksm_does_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
struct page *new_page;
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (new_page) {
copy_user_highpage(new_page, page, address, vma);
SetPageDirty(new_page);
__SetPageUptodate(new_page);
SetPageSwapBacked(new_page);
__set_page_locked(new_page);
if (page_evictable(new_page, vma))
lru_cache_add_lru(new_page, LRU_ACTIVE_ANON);
else
add_page_to_unevictable_list(new_page);
}
return new_page;
}
int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
unsigned long *vm_flags)
{
struct stable_node *stable_node;
struct rmap_item *rmap_item;
struct hlist_node *hlist;
unsigned int mapcount = page_mapcount(page);
int referenced = 0;
int search_new_forks = 0;
VM_BUG_ON(!PageKsm(page));
VM_BUG_ON(!PageLocked(page));
stable_node = page_stable_node(page);
if (!stable_node)
return 0;
again:
hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
struct anon_vma *anon_vma = rmap_item->anon_vma;
struct anon_vma_chain *vmac;
struct vm_area_struct *vma;
anon_vma_lock(anon_vma);
list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
vma = vmac->vma;
if (rmap_item->address < vma->vm_start ||
rmap_item->address >= vma->vm_end)
continue;
/*
* Initially we examine only the vma which covers this
* rmap_item; but later, if there is still work to do,
* we examine covering vmas in other mms: in case they
* were forked from the original since ksmd passed.
*/
if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
continue;
if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
continue;
referenced += page_referenced_one(page, vma,
rmap_item->address, &mapcount, vm_flags);
if (!search_new_forks || !mapcount)
break;
}
anon_vma_unlock(anon_vma);
if (!mapcount)
goto out;
}
if (!search_new_forks++)
goto again;
out:
return referenced;
}
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
{
struct stable_node *stable_node;
struct hlist_node *hlist;
struct rmap_item *rmap_item;
int ret = SWAP_AGAIN;
int search_new_forks = 0;
VM_BUG_ON(!PageKsm(page));
VM_BUG_ON(!PageLocked(page));
stable_node = page_stable_node(page);
if (!stable_node)
return SWAP_FAIL;
again:
hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
struct anon_vma *anon_vma = rmap_item->anon_vma;
struct anon_vma_chain *vmac;
struct vm_area_struct *vma;
anon_vma_lock(anon_vma);
list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
vma = vmac->vma;
if (rmap_item->address < vma->vm_start ||
rmap_item->address >= vma->vm_end)
continue;
/*
* Initially we examine only the vma which covers this
* rmap_item; but later, if there is still work to do,
* we examine covering vmas in other mms: in case they
* were forked from the original since ksmd passed.
*/
if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
continue;
ret = try_to_unmap_one(page, vma,
rmap_item->address, flags);
if (ret != SWAP_AGAIN || !page_mapped(page)) {
anon_vma_unlock(anon_vma);
goto out;
}
}
anon_vma_unlock(anon_vma);
}
if (!search_new_forks++)
goto again;
out:
return ret;
}
#ifdef CONFIG_MIGRATION
int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
struct vm_area_struct *, unsigned long, void *), void *arg)
{
struct stable_node *stable_node;
struct hlist_node *hlist;
struct rmap_item *rmap_item;
int ret = SWAP_AGAIN;
int search_new_forks = 0;
VM_BUG_ON(!PageKsm(page));
VM_BUG_ON(!PageLocked(page));
stable_node = page_stable_node(page);
if (!stable_node)
return ret;
again:
hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
struct anon_vma *anon_vma = rmap_item->anon_vma;
struct anon_vma_chain *vmac;
struct vm_area_struct *vma;
anon_vma_lock(anon_vma);
list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
vma = vmac->vma;
if (rmap_item->address < vma->vm_start ||
rmap_item->address >= vma->vm_end)
continue;
/*
* Initially we examine only the vma which covers this
* rmap_item; but later, if there is still work to do,
* we examine covering vmas in other mms: in case they
* were forked from the original since ksmd passed.
*/
if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
continue;
ret = rmap_one(page, vma, rmap_item->address, arg);
if (ret != SWAP_AGAIN) {
anon_vma_unlock(anon_vma);
goto out;
}
}
anon_vma_unlock(anon_vma);
}
if (!search_new_forks++)
goto again;
out:
return ret;
}
void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
struct stable_node *stable_node;
VM_BUG_ON(!PageLocked(oldpage));
VM_BUG_ON(!PageLocked(newpage));
VM_BUG_ON(newpage->mapping != oldpage->mapping);
stable_node = page_stable_node(newpage);
if (stable_node) {
VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
stable_node->kpfn = page_to_pfn(newpage);
}
}
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_MEMORY_HOTREMOVE
static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
unsigned long end_pfn)
{
struct rb_node *node;
for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) {
struct stable_node *stable_node;
stable_node = rb_entry(node, struct stable_node, node);
if (stable_node->kpfn >= start_pfn &&
stable_node->kpfn < end_pfn)
return stable_node;
}
return NULL;
}
static int ksm_memory_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
struct memory_notify *mn = arg;
struct stable_node *stable_node;
switch (action) {
case MEM_GOING_OFFLINE:
/*
* Keep it very simple for now: just lock out ksmd and
* MADV_UNMERGEABLE while any memory is going offline.
*/
mutex_lock(&ksm_thread_mutex);
break;
case MEM_OFFLINE:
/*
* Most of the work is done by page migration; but there might
* be a few stable_nodes left over, still pointing to struct
* pages which have been offlined: prune those from the tree.
*/
while ((stable_node = ksm_check_stable_tree(mn->start_pfn,
mn->start_pfn + mn->nr_pages)) != NULL)
remove_node_from_stable_tree(stable_node);
/* fallthrough */
case MEM_CANCEL_OFFLINE:
mutex_unlock(&ksm_thread_mutex);
break;
}
return NOTIFY_OK;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
#ifdef CONFIG_SYSFS
/*
* This all compiles without CONFIG_SYSFS, but is a waste of space.
*/
#define KSM_ATTR_RO(_name) \
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
#define KSM_ATTR(_name) \
static struct kobj_attribute _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
static ssize_t sleep_millisecs_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs);
}
static ssize_t sleep_millisecs_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
unsigned long msecs;
int err;
err = strict_strtoul(buf, 10, &msecs);
if (err || msecs > UINT_MAX)
return -EINVAL;
ksm_thread_sleep_millisecs = msecs;
return count;
}
KSM_ATTR(sleep_millisecs);
static ssize_t pages_to_scan_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", ksm_thread_pages_to_scan);
}
static ssize_t pages_to_scan_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int err;
unsigned long nr_pages;
err = strict_strtoul(buf, 10, &nr_pages);
if (err || nr_pages > UINT_MAX)
return -EINVAL;
ksm_thread_pages_to_scan = nr_pages;
return count;
}
KSM_ATTR(pages_to_scan);
static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", ksm_run);
}
static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int err;
unsigned long flags;
err = strict_strtoul(buf, 10, &flags);
if (err || flags > UINT_MAX)
return -EINVAL;
if (flags > KSM_RUN_UNMERGE)
return -EINVAL;
/*
* KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
* KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
* breaking COW to free the pages_shared (but leaves mm_slots
* on the list for when ksmd may be set running again).
*/
mutex_lock(&ksm_thread_mutex);
if (ksm_run != flags) {
ksm_run = flags;
if (flags & KSM_RUN_UNMERGE) {
current->flags |= PF_OOM_ORIGIN;
err = unmerge_and_remove_all_rmap_items();
current->flags &= ~PF_OOM_ORIGIN;
if (err) {
ksm_run = KSM_RUN_STOP;
count = err;
}
}
}
mutex_unlock(&ksm_thread_mutex);
if (flags & KSM_RUN_MERGE)
wake_up_interruptible(&ksm_thread_wait);
return count;
}
KSM_ATTR(run);
static ssize_t pages_shared_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", ksm_pages_shared);
}
KSM_ATTR_RO(pages_shared);
static ssize_t pages_sharing_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", ksm_pages_sharing);
}
KSM_ATTR_RO(pages_sharing);
static ssize_t pages_unshared_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", ksm_pages_unshared);
}
KSM_ATTR_RO(pages_unshared);
static ssize_t pages_volatile_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
long ksm_pages_volatile;
ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
- ksm_pages_sharing - ksm_pages_unshared;
/*
* It was not worth any locking to calculate that statistic,
* but it might therefore sometimes be negative: conceal that.
*/
if (ksm_pages_volatile < 0)
ksm_pages_volatile = 0;
return sprintf(buf, "%ld\n", ksm_pages_volatile);
}
KSM_ATTR_RO(pages_volatile);
static ssize_t full_scans_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", ksm_scan.seqnr);
}
KSM_ATTR_RO(full_scans);
static struct attribute *ksm_attrs[] = {
&sleep_millisecs_attr.attr,
&pages_to_scan_attr.attr,
&run_attr.attr,
&pages_shared_attr.attr,
&pages_sharing_attr.attr,
&pages_unshared_attr.attr,
&pages_volatile_attr.attr,
&full_scans_attr.attr,
NULL,
};
static struct attribute_group ksm_attr_group = {
.attrs = ksm_attrs,
.name = "ksm",
};
#endif /* CONFIG_SYSFS */
static int __init ksm_init(void)
{
struct task_struct *ksm_thread;
int err;
err = ksm_slab_init();
if (err)
goto out;
ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
if (IS_ERR(ksm_thread)) {
printk(KERN_ERR "ksm: creating kthread failed\n");
err = PTR_ERR(ksm_thread);
goto out_free;
}
#ifdef CONFIG_SYSFS
err = sysfs_create_group(mm_kobj, &ksm_attr_group);
if (err) {
printk(KERN_ERR "ksm: register sysfs failed\n");
kthread_stop(ksm_thread);
goto out_free;
}
#else
ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
#endif /* CONFIG_SYSFS */
#ifdef CONFIG_MEMORY_HOTREMOVE
/*
* Choose a high priority since the callback takes ksm_thread_mutex:
* later callbacks could only be taking locks which nest within that.
*/
hotplug_memory_notifier(ksm_memory_callback, 100);
#endif
return 0;
out_free:
ksm_slab_free();
out:
return err;
}
module_init(ksm_init)
| gpl-2.0 |
GuardianRG/linux | drivers/tty/serial/of_serial.c | 392 | 8946 | /*
* Serial Port driver for Open Firmware platform devices
*
* Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/console.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/nwpserial.h>
#include <linux/clk.h>
#include "8250/8250.h"
struct of_serial_info {
struct clk *clk;
int type;
int line;
};
#ifdef CONFIG_ARCH_TEGRA
void tegra_serial_handle_break(struct uart_port *p)
{
unsigned int status, tmout = 10000;
do {
status = p->serial_in(p, UART_LSR);
if (status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS))
status = p->serial_in(p, UART_RX);
else
break;
if (--tmout == 0)
break;
udelay(1);
} while (1);
}
#else
static inline void tegra_serial_handle_break(struct uart_port *port)
{
}
#endif
/*
* Fill a struct uart_port for a given device node
*/
static int of_platform_serial_setup(struct platform_device *ofdev,
int type, struct uart_port *port,
struct of_serial_info *info)
{
struct resource resource;
struct device_node *np = ofdev->dev.of_node;
u32 clk, spd, prop;
int ret;
memset(port, 0, sizeof *port);
if (of_property_read_u32(np, "clock-frequency", &clk)) {
/* Get clk rate through clk driver if present */
info->clk = devm_clk_get(&ofdev->dev, NULL);
if (IS_ERR(info->clk)) {
dev_warn(&ofdev->dev,
"clk or clock-frequency not defined\n");
return PTR_ERR(info->clk);
}
ret = clk_prepare_enable(info->clk);
if (ret < 0)
return ret;
clk = clk_get_rate(info->clk);
}
/* If current-speed was set, then try not to change it. */
if (of_property_read_u32(np, "current-speed", &spd) == 0)
port->custom_divisor = clk / (16 * spd);
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
dev_warn(&ofdev->dev, "invalid address\n");
goto out;
}
spin_lock_init(&port->lock);
port->mapbase = resource.start;
port->mapsize = resource_size(&resource);
/* Check for shifted address mapping */
if (of_property_read_u32(np, "reg-offset", &prop) == 0)
port->mapbase += prop;
/* Check for registers offset within the devices address range */
if (of_property_read_u32(np, "reg-shift", &prop) == 0)
port->regshift = prop;
/* Check for fifo size */
if (of_property_read_u32(np, "fifo-size", &prop) == 0)
port->fifosize = prop;
/* Check for a fixed line number */
ret = of_alias_get_id(np, "serial");
if (ret >= 0)
port->line = ret;
port->irq = irq_of_parse_and_map(np, 0);
port->iotype = UPIO_MEM;
if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
switch (prop) {
case 1:
port->iotype = UPIO_MEM;
break;
case 4:
port->iotype = of_device_is_big_endian(np) ?
UPIO_MEM32BE : UPIO_MEM32;
break;
default:
dev_warn(&ofdev->dev, "unsupported reg-io-width (%d)\n",
prop);
ret = -EINVAL;
goto out;
}
}
port->type = type;
port->uartclk = clk;
port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP
| UPF_FIXED_PORT | UPF_FIXED_TYPE;
if (of_find_property(np, "no-loopback-test", NULL))
port->flags |= UPF_SKIP_TEST;
port->dev = &ofdev->dev;
switch (type) {
case PORT_TEGRA:
port->handle_break = tegra_serial_handle_break;
break;
case PORT_RT2880:
port->iotype = UPIO_AU;
break;
}
return 0;
out:
if (info->clk)
clk_disable_unprepare(info->clk);
return ret;
}
/*
* Try to register a serial port
*/
static const struct of_device_id of_platform_serial_table[];
static int of_platform_serial_probe(struct platform_device *ofdev)
{
const struct of_device_id *match;
struct of_serial_info *info;
struct uart_port port;
int port_type;
int ret;
match = of_match_device(of_platform_serial_table, &ofdev->dev);
if (!match)
return -EINVAL;
if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL))
return -EBUSY;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL)
return -ENOMEM;
port_type = (unsigned long)match->data;
ret = of_platform_serial_setup(ofdev, port_type, &port, info);
if (ret)
goto out;
switch (port_type) {
#ifdef CONFIG_SERIAL_8250
case PORT_8250 ... PORT_MAX_8250:
{
struct uart_8250_port port8250;
memset(&port8250, 0, sizeof(port8250));
port8250.port = port;
if (port.fifosize)
port8250.capabilities = UART_CAP_FIFO;
if (of_property_read_bool(ofdev->dev.of_node,
"auto-flow-control"))
port8250.capabilities |= UART_CAP_AFE;
ret = serial8250_register_8250_port(&port8250);
break;
}
#endif
#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
case PORT_NWPSERIAL:
ret = nwpserial_register_port(&port);
break;
#endif
default:
/* need to add code for these */
case PORT_UNKNOWN:
dev_info(&ofdev->dev, "Unknown serial port found, ignored\n");
ret = -ENODEV;
break;
}
if (ret < 0)
goto out;
info->type = port_type;
info->line = ret;
platform_set_drvdata(ofdev, info);
return 0;
out:
kfree(info);
irq_dispose_mapping(port.irq);
return ret;
}
/*
* Release a line
*/
static int of_platform_serial_remove(struct platform_device *ofdev)
{
struct of_serial_info *info = platform_get_drvdata(ofdev);
switch (info->type) {
#ifdef CONFIG_SERIAL_8250
case PORT_8250 ... PORT_MAX_8250:
serial8250_unregister_port(info->line);
break;
#endif
#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
case PORT_NWPSERIAL:
nwpserial_unregister_port(info->line);
break;
#endif
default:
/* need to add code for these */
break;
}
if (info->clk)
clk_disable_unprepare(info->clk);
kfree(info);
return 0;
}
#ifdef CONFIG_PM_SLEEP
#ifdef CONFIG_SERIAL_8250
static void of_serial_suspend_8250(struct of_serial_info *info)
{
struct uart_8250_port *port8250 = serial8250_get_port(info->line);
struct uart_port *port = &port8250->port;
serial8250_suspend_port(info->line);
if (info->clk && (!uart_console(port) || console_suspend_enabled))
clk_disable_unprepare(info->clk);
}
static void of_serial_resume_8250(struct of_serial_info *info)
{
struct uart_8250_port *port8250 = serial8250_get_port(info->line);
struct uart_port *port = &port8250->port;
if (info->clk && (!uart_console(port) || console_suspend_enabled))
clk_prepare_enable(info->clk);
serial8250_resume_port(info->line);
}
#else
static inline void of_serial_suspend_8250(struct of_serial_info *info)
{
}
static inline void of_serial_resume_8250(struct of_serial_info *info)
{
}
#endif
static int of_serial_suspend(struct device *dev)
{
struct of_serial_info *info = dev_get_drvdata(dev);
switch (info->type) {
case PORT_8250 ... PORT_MAX_8250:
of_serial_suspend_8250(info);
break;
default:
break;
}
return 0;
}
static int of_serial_resume(struct device *dev)
{
struct of_serial_info *info = dev_get_drvdata(dev);
switch (info->type) {
case PORT_8250 ... PORT_MAX_8250:
of_serial_resume_8250(info);
break;
default:
break;
}
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(of_serial_pm_ops, of_serial_suspend, of_serial_resume);
/*
* A few common types, add more as needed.
*/
static const struct of_device_id of_platform_serial_table[] = {
{ .compatible = "ns8250", .data = (void *)PORT_8250, },
{ .compatible = "ns16450", .data = (void *)PORT_16450, },
{ .compatible = "ns16550a", .data = (void *)PORT_16550A, },
{ .compatible = "ns16550", .data = (void *)PORT_16550, },
{ .compatible = "ns16750", .data = (void *)PORT_16750, },
{ .compatible = "ns16850", .data = (void *)PORT_16850, },
{ .compatible = "nvidia,tegra20-uart", .data = (void *)PORT_TEGRA, },
{ .compatible = "nxp,lpc3220-uart", .data = (void *)PORT_LPC3220, },
{ .compatible = "ralink,rt2880-uart", .data = (void *)PORT_RT2880, },
{ .compatible = "altr,16550-FIFO32",
.data = (void *)PORT_ALTR_16550_F32, },
{ .compatible = "altr,16550-FIFO64",
.data = (void *)PORT_ALTR_16550_F64, },
{ .compatible = "altr,16550-FIFO128",
.data = (void *)PORT_ALTR_16550_F128, },
{ .compatible = "mrvl,mmp-uart",
.data = (void *)PORT_XSCALE, },
{ .compatible = "mrvl,pxa-uart",
.data = (void *)PORT_XSCALE, },
#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
{ .compatible = "ibm,qpace-nwp-serial",
.data = (void *)PORT_NWPSERIAL, },
#endif
{ /* end of list */ },
};
static struct platform_driver of_platform_serial_driver = {
.driver = {
.name = "of_serial",
.of_match_table = of_platform_serial_table,
},
.probe = of_platform_serial_probe,
.remove = of_platform_serial_remove,
};
module_platform_driver(of_platform_serial_driver);
MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serial Port driver for Open Firmware platform devices");
| gpl-2.0 |
rcrobles/linux-stable-4.3 | drivers/staging/rtl8723au/core/rtw_wlan_util.c | 392 | 40429 | /******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
******************************************************************************/
#define _RTW_WLAN_UTIL_C_
#include <osdep_service.h>
#include <drv_types.h>
#include <linux/ieee80211.h>
#include <wifi.h>
#include <rtl8723a_spec.h>
static unsigned char ARTHEROS_OUI1[] = {0x00, 0x03, 0x7f};
static unsigned char ARTHEROS_OUI2[] = {0x00, 0x13, 0x74};
static unsigned char BROADCOM_OUI1[] = {0x00, 0x10, 0x18};
static unsigned char BROADCOM_OUI2[] = {0x00, 0x0a, 0xf7};
static unsigned char CISCO_OUI[] = {0x00, 0x40, 0x96};
static unsigned char MARVELL_OUI[] = {0x00, 0x50, 0x43};
static unsigned char RALINK_OUI[] = {0x00, 0x0c, 0x43};
static unsigned char REALTEK_OUI[] = {0x00, 0xe0, 0x4c};
static unsigned char AIRGOCAP_OUI[] = {0x00, 0x0a, 0xf5};
static unsigned char EPIGRAM_OUI[] = {0x00, 0x90, 0x4c};
static unsigned char WPA_TKIP_CIPHER[4] = {0x00, 0x50, 0xf2, 0x02};
static unsigned char RSN_TKIP_CIPHER[4] = {0x00, 0x0f, 0xac, 0x02};
#define R2T_PHY_DELAY 0
/* define WAIT_FOR_BCN_TO_MIN 3000 */
#define WAIT_FOR_BCN_TO_MIN 6000
#define WAIT_FOR_BCN_TO_MAX 20000
static u8 rtw_basic_rate_cck[4] = {
IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK,
IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK,
IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK,
IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK
};
static u8 rtw_basic_rate_ofdm[3] = {
IEEE80211_OFDM_RATE_6MB | IEEE80211_BASIC_RATE_MASK,
IEEE80211_OFDM_RATE_12MB | IEEE80211_BASIC_RATE_MASK,
IEEE80211_OFDM_RATE_24MB | IEEE80211_BASIC_RATE_MASK
};
static u8 rtw_basic_rate_mix[7] = {
IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK,
IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK,
IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK,
IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK,
IEEE80211_OFDM_RATE_6MB | IEEE80211_BASIC_RATE_MASK,
IEEE80211_OFDM_RATE_12MB | IEEE80211_BASIC_RATE_MASK,
IEEE80211_OFDM_RATE_24MB | IEEE80211_BASIC_RATE_MASK
};
int cckrates_included23a(unsigned char *rate, int ratelen)
{
int i;
for (i = 0; i < ratelen; i++) {
if (((rate[i]) & 0x7f) == 2 || ((rate[i]) & 0x7f) == 4 ||
((rate[i]) & 0x7f) == 11 || ((rate[i]) & 0x7f) == 22)
return true;
}
return false;
}
int cckratesonly_included23a(unsigned char *rate, int ratelen)
{
int i;
for (i = 0; i < ratelen; i++) {
if (((rate[i]) & 0x7f) != 2 && ((rate[i]) & 0x7f) != 4 &&
((rate[i]) & 0x7f) != 11 && ((rate[i]) & 0x7f) != 22)
return false;
}
return true;
}
unsigned char networktype_to_raid23a(unsigned char network_type)
{
unsigned char raid;
switch (network_type) {
case WIRELESS_11B:
raid = RATR_INX_WIRELESS_B;
break;
case WIRELESS_11A:
case WIRELESS_11G:
raid = RATR_INX_WIRELESS_G;
break;
case WIRELESS_11BG:
raid = RATR_INX_WIRELESS_GB;
break;
case WIRELESS_11_24N:
case WIRELESS_11_5N:
raid = RATR_INX_WIRELESS_N;
break;
case WIRELESS_11A_5N:
case WIRELESS_11G_24N:
raid = RATR_INX_WIRELESS_NG;
break;
case WIRELESS_11BG_24N:
raid = RATR_INX_WIRELESS_NGB;
break;
default:
raid = RATR_INX_WIRELESS_GB;
break;
}
return raid;
}
u8 judge_network_type23a(struct rtw_adapter *padapter,
unsigned char *rate, int ratelen)
{
u8 network_type = 0;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (pmlmeext->cur_channel > 14) {
if (pmlmeinfo->HT_enable)
network_type = WIRELESS_11_5N;
network_type |= WIRELESS_11A;
} else {
if (pmlmeinfo->HT_enable)
network_type = WIRELESS_11_24N;
if ((cckratesonly_included23a(rate, ratelen)) == true)
network_type |= WIRELESS_11B;
else if ((cckrates_included23a(rate, ratelen)) == true)
network_type |= WIRELESS_11BG;
else
network_type |= WIRELESS_11G;
}
return network_type;
}
static unsigned char ratetbl_val_2wifirate(unsigned char rate)
{
unsigned char val = 0;
switch (rate & 0x7f) {
case 0:
val = IEEE80211_CCK_RATE_1MB;
break;
case 1:
val = IEEE80211_CCK_RATE_2MB;
break;
case 2:
val = IEEE80211_CCK_RATE_5MB;
break;
case 3:
val = IEEE80211_CCK_RATE_11MB;
break;
case 4:
val = IEEE80211_OFDM_RATE_6MB;
break;
case 5:
val = IEEE80211_OFDM_RATE_9MB;
break;
case 6:
val = IEEE80211_OFDM_RATE_12MB;
break;
case 7:
val = IEEE80211_OFDM_RATE_18MB;
break;
case 8:
val = IEEE80211_OFDM_RATE_24MB;
break;
case 9:
val = IEEE80211_OFDM_RATE_36MB;
break;
case 10:
val = IEEE80211_OFDM_RATE_48MB;
break;
case 11:
val = IEEE80211_OFDM_RATE_54MB;
break;
}
return val;
}
static int is_basicrate(struct rtw_adapter *padapter, unsigned char rate)
{
int i;
unsigned char val;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
for (i = 0; i < NumRates; i++) {
val = pmlmeext->basicrate[i];
if (val != 0xff && val != 0xfe) {
if (rate == ratetbl_val_2wifirate(val))
return true;
}
}
return false;
}
static unsigned int ratetbl2rateset(struct rtw_adapter *padapter,
unsigned char *rateset)
{
int i;
unsigned char rate;
unsigned int len = 0;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
for (i = 0; i < NumRates; i++) {
rate = pmlmeext->datarate[i];
switch (rate) {
case 0xff:
return len;
case 0xfe:
continue;
default:
rate = ratetbl_val_2wifirate(rate);
if (is_basicrate(padapter, rate) == true)
rate |= IEEE80211_BASIC_RATE_MASK;
rateset[len] = rate;
len++;
break;
}
}
return len;
}
void get_rate_set23a(struct rtw_adapter *padapter,
unsigned char *pbssrate, int *bssrate_len)
{
unsigned char supportedrates[NumRates];
memset(supportedrates, 0, NumRates);
*bssrate_len = ratetbl2rateset(padapter, supportedrates);
memcpy(pbssrate, supportedrates, *bssrate_len);
}
void UpdateBrateTbl23a(struct rtw_adapter *Adapter, u8 *mBratesOS)
{
u8 i;
u8 rate;
/* 1M, 2M, 5.5M, 11M, 6M, 12M, 24M are mandatory. */
for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
rate = mBratesOS[i] & 0x7f;
switch (rate) {
case IEEE80211_CCK_RATE_1MB:
case IEEE80211_CCK_RATE_2MB:
case IEEE80211_CCK_RATE_5MB:
case IEEE80211_CCK_RATE_11MB:
case IEEE80211_OFDM_RATE_6MB:
case IEEE80211_OFDM_RATE_12MB:
case IEEE80211_OFDM_RATE_24MB:
mBratesOS[i] |= IEEE80211_BASIC_RATE_MASK;
break;
default:
break;
}
}
}
void Update23aTblForSoftAP(u8 *bssrateset, u32 bssratelen)
{
u8 i;
u8 rate;
for (i = 0; i < bssratelen; i++) {
rate = bssrateset[i] & 0x7f;
switch (rate) {
case IEEE80211_CCK_RATE_1MB:
case IEEE80211_CCK_RATE_2MB:
case IEEE80211_CCK_RATE_5MB:
case IEEE80211_CCK_RATE_11MB:
bssrateset[i] |= IEEE80211_BASIC_RATE_MASK;
break;
}
}
}
inline u8 rtw_get_oper_ch23a(struct rtw_adapter *adapter)
{
return adapter_to_dvobj(adapter)->oper_channel;
}
inline void rtw_set_oper_ch23a(struct rtw_adapter *adapter, u8 ch)
{
adapter_to_dvobj(adapter)->oper_channel = ch;
}
inline u8 rtw_get_oper_bw23a(struct rtw_adapter *adapter)
{
return adapter_to_dvobj(adapter)->oper_bwmode;
}
inline void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw)
{
adapter_to_dvobj(adapter)->oper_bwmode = bw;
}
inline u8 rtw_get_oper_ch23aoffset(struct rtw_adapter *adapter)
{
return adapter_to_dvobj(adapter)->oper_ch_offset;
}
inline void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset)
{
adapter_to_dvobj(adapter)->oper_ch_offset = offset;
}
void SelectChannel23a(struct rtw_adapter *padapter, unsigned char channel)
{
mutex_lock(&adapter_to_dvobj(padapter)->setch_mutex);
/* saved channel info */
rtw_set_oper_ch23a(padapter, channel);
PHY_SwChnl8723A(padapter, channel);
mutex_unlock(&adapter_to_dvobj(padapter)->setch_mutex);
}
static void set_bwmode(struct rtw_adapter *padapter, unsigned short bwmode,
unsigned char channel_offset)
{
mutex_lock(&adapter_to_dvobj(padapter)->setbw_mutex);
/* saved bw info */
rtw_set_oper_bw23a(padapter, bwmode);
rtw_set_oper_ch23aoffset23a(padapter, channel_offset);
PHY_SetBWMode23a8723A(padapter, (enum ht_channel_width)bwmode,
channel_offset);
mutex_unlock(&adapter_to_dvobj(padapter)->setbw_mutex);
}
void set_channel_bwmode23a(struct rtw_adapter *padapter, unsigned char channel,
unsigned char channel_offset, unsigned short bwmode)
{
u8 center_ch;
if (bwmode == HT_CHANNEL_WIDTH_20 ||
channel_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE) {
/* SelectChannel23a(padapter, channel); */
center_ch = channel;
} else {
/* switch to the proper channel */
if (channel_offset == HAL_PRIME_CHNL_OFFSET_LOWER) {
/* SelectChannel23a(padapter, channel + 2); */
center_ch = channel + 2;
} else {
/* SelectChannel23a(padapter, channel - 2); */
center_ch = channel - 2;
}
}
/* set Channel */
mutex_lock(&adapter_to_dvobj(padapter)->setch_mutex);
/* saved channel/bw info */
rtw_set_oper_ch23a(padapter, channel);
rtw_set_oper_bw23a(padapter, bwmode);
rtw_set_oper_ch23aoffset23a(padapter, channel_offset);
PHY_SwChnl8723A(padapter, center_ch); /* set center channel */
mutex_unlock(&adapter_to_dvobj(padapter)->setch_mutex);
set_bwmode(padapter, bwmode, channel_offset);
}
inline u8 *get_my_bssid23a(struct wlan_bssid_ex *pnetwork)
{
return pnetwork->MacAddress;
}
bool is_client_associated_to_ap23a(struct rtw_adapter *padapter)
{
struct mlme_ext_priv *pmlmeext;
struct mlme_ext_info *pmlmeinfo;
if (!padapter)
return false;
pmlmeext = &padapter->mlmeextpriv;
pmlmeinfo = &pmlmeext->mlmext_info;
if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS &&
(pmlmeinfo->state & 0x03) == MSR_INFRA)
return true;
else
return false;
}
bool is_client_associated_to_ibss23a(struct rtw_adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS &&
(pmlmeinfo->state & 0x03) == MSR_ADHOC)
return true;
else
return false;
}
bool is_IBSS_empty23a(struct rtw_adapter *padapter)
{
unsigned int i;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) {
if (pmlmeinfo->FW_sta_info[i].status == 1)
return false;
}
return true;
}
unsigned int decide_wait_for_beacon_timeout23a(unsigned int bcn_interval)
{
if ((bcn_interval << 2) < WAIT_FOR_BCN_TO_MIN)
return WAIT_FOR_BCN_TO_MIN;
else if ((bcn_interval << 2) > WAIT_FOR_BCN_TO_MAX)
return WAIT_FOR_BCN_TO_MAX;
else
return bcn_interval << 2;
}
void clear_cam_entry23a(struct rtw_adapter *padapter, u8 entry)
{
unsigned char null_sta[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
unsigned char null_key[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00};
rtl8723a_cam_write(padapter, entry, 0, null_sta, null_key);
}
int allocate_fw_sta_entry23a(struct rtw_adapter *padapter)
{
unsigned int mac_id;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
for (mac_id = IBSS_START_MAC_ID; mac_id < NUM_STA; mac_id++) {
if (pmlmeinfo->FW_sta_info[mac_id].status == 0) {
pmlmeinfo->FW_sta_info[mac_id].status = 1;
pmlmeinfo->FW_sta_info[mac_id].retry = 0;
break;
}
}
return mac_id;
}
void flush_all_cam_entry23a(struct rtw_adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
rtl8723a_cam_invalidate_all(padapter);
memset(pmlmeinfo->FW_sta_info, 0, sizeof(pmlmeinfo->FW_sta_info));
}
int WMM_param_handler23a(struct rtw_adapter *padapter, const u8 *p)
{
/* struct registry_priv *pregpriv = &padapter->registrypriv; */
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (pmlmepriv->qos_option == 0) {
pmlmeinfo->WMM_enable = 0;
return _FAIL;
}
pmlmeinfo->WMM_enable = 1;
memcpy(&pmlmeinfo->WMM_param, p + 2 + 6,
sizeof(struct WMM_para_element));
return true;
}
void WMMOnAssocRsp23a(struct rtw_adapter *padapter)
{
u8 ACI, ACM, AIFS, ECWMin, ECWMax, aSifsTime;
u8 acm_mask;
u16 TXOP;
u32 acParm, i;
u32 edca[4], inx[4];
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct registry_priv *pregpriv = &padapter->registrypriv;
if (pmlmeinfo->WMM_enable == 0) {
padapter->mlmepriv.acm_mask = 0;
return;
}
acm_mask = 0;
if (pmlmeext->cur_wireless_mode == WIRELESS_11B)
aSifsTime = 10;
else
aSifsTime = 16;
for (i = 0; i < 4; i++) {
ACI = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 5) & 0x03;
ACM = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 4) & 0x01;
/* AIFS = AIFSN * slot time + SIFS - r2t phy delay */
AIFS = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN & 0x0f) *
pmlmeinfo->slotTime + aSifsTime;
ECWMin = pmlmeinfo->WMM_param.ac_param[i].CW & 0x0f;
ECWMax = (pmlmeinfo->WMM_param.ac_param[i].CW & 0xf0) >> 4;
TXOP = le16_to_cpu(pmlmeinfo->WMM_param.ac_param[i].TXOP_limit);
acParm = AIFS | (ECWMin << 8) | (ECWMax << 12) | (TXOP << 16);
switch (ACI) {
case 0x0:
rtl8723a_set_ac_param_be(padapter, acParm);
acm_mask |= (ACM? BIT(1):0);
edca[XMIT_BE_QUEUE] = acParm;
break;
case 0x1:
rtl8723a_set_ac_param_bk(padapter, acParm);
/* acm_mask |= (ACM? BIT(0):0); */
edca[XMIT_BK_QUEUE] = acParm;
break;
case 0x2:
rtl8723a_set_ac_param_vi(padapter, acParm);
acm_mask |= (ACM? BIT(2):0);
edca[XMIT_VI_QUEUE] = acParm;
break;
case 0x3:
rtl8723a_set_ac_param_vo(padapter, acParm);
acm_mask |= (ACM? BIT(3):0);
edca[XMIT_VO_QUEUE] = acParm;
break;
}
DBG_8723A("WMM(%x): %x, %x\n", ACI, ACM, acParm);
}
if (padapter->registrypriv.acm_method == 1)
rtl8723a_set_acm_ctrl(padapter, acm_mask);
else
padapter->mlmepriv.acm_mask = acm_mask;
inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
if (pregpriv->wifi_spec == 1) {
u32 j, change_inx = false;
/* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
for (i = 0; i < 4; i++) {
for (j = i+1; j < 4; j++) {
/* compare CW and AIFS */
if ((edca[j] & 0xFFFF) < (edca[i] & 0xFFFF)) {
change_inx = true;
} else if ((edca[j] & 0xFFFF) ==
(edca[i] & 0xFFFF)) {
/* compare TXOP */
if ((edca[j] >> 16) > (edca[i] >> 16))
change_inx = true;
}
if (change_inx) {
swap(edca[i], edca[j]);
swap(inx[i], inx[j]);
change_inx = false;
}
}
}
}
for (i = 0; i<4; i++) {
pxmitpriv->wmm_para_seq[i] = inx[i];
DBG_8723A("wmm_para_seq(%d): %d\n", i,
pxmitpriv->wmm_para_seq[i]);
}
}
static void bwmode_update_check(struct rtw_adapter *padapter, const u8 *p)
{
struct ieee80211_ht_operation *pHT_info;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
unsigned char new_bwmode;
unsigned char new_ch_offset;
if (!p)
return;
if (!phtpriv->ht_option)
return;
if (p[1] != sizeof(struct ieee80211_ht_operation))
return;
pHT_info = (struct ieee80211_ht_operation *)(p + 2);
if ((pHT_info->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) &&
pregistrypriv->cbw40_enable) {
new_bwmode = HT_CHANNEL_WIDTH_40;
switch (pHT_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET){
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
new_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
break;
case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
new_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
break;
default:
new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
break;
}
} else {
new_bwmode = HT_CHANNEL_WIDTH_20;
new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
}
if (new_bwmode != pmlmeext->cur_bwmode ||
new_ch_offset != pmlmeext->cur_ch_offset) {
pmlmeinfo->bwmode_updated = true;
pmlmeext->cur_bwmode = new_bwmode;
pmlmeext->cur_ch_offset = new_ch_offset;
/* update HT info also */
HT_info_handler23a(padapter, p);
} else
pmlmeinfo->bwmode_updated = false;
if (pmlmeinfo->bwmode_updated) {
struct sta_info *psta;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
struct sta_priv *pstapriv = &padapter->stapriv;
/* set_channel_bwmode23a(padapter, pmlmeext->cur_channel,
pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */
/* update ap's stainfo */
psta = rtw_get_stainfo23a(pstapriv, cur_network->MacAddress);
if (psta) {
struct ht_priv *phtpriv_sta = &psta->htpriv;
if (phtpriv_sta->ht_option) {
/* bwmode */
phtpriv_sta->bwmode = pmlmeext->cur_bwmode;
phtpriv_sta->ch_offset =
pmlmeext->cur_ch_offset;
} else {
phtpriv_sta->bwmode = HT_CHANNEL_WIDTH_20;
phtpriv_sta->ch_offset =
HAL_PRIME_CHNL_OFFSET_DONT_CARE;
}
}
}
}
void HT_caps_handler23a(struct rtw_adapter *padapter, const u8 *p)
{
unsigned int i;
u8 rf_type;
u8 max_AMPDU_len, min_MPDU_spacing;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
struct ieee80211_ht_cap *cap;
u8 *dstcap;
if (!p)
return;
if (!phtpriv->ht_option)
return;
pmlmeinfo->HT_caps_enable = 1;
cap = &pmlmeinfo->ht_cap;
dstcap = (u8 *)cap;
for (i = 0; i < p[1]; i++) {
if (i != 2) {
dstcap[i] &= p[i + 2];
} else {
/* modify from fw by Thomas 2010/11/17 */
if ((cap->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_FACTOR) >
(p[i + 2] & IEEE80211_HT_AMPDU_PARM_FACTOR))
max_AMPDU_len = p[i + 2] &
IEEE80211_HT_AMPDU_PARM_FACTOR;
else
max_AMPDU_len = cap->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_FACTOR;
if ((cap->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_DENSITY) >
(p[i + 2] & IEEE80211_HT_AMPDU_PARM_DENSITY))
min_MPDU_spacing = cap->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_DENSITY;
else
min_MPDU_spacing = p[i + 2] &
IEEE80211_HT_AMPDU_PARM_DENSITY;
cap->ampdu_params_info =
max_AMPDU_len | min_MPDU_spacing;
}
}
rf_type = rtl8723a_get_rf_type(padapter);
/* update the MCS rates */
for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
if (rf_type == RF_1T1R || rf_type == RF_1T2R)
cap->mcs.rx_mask[i] &= MCS_rate_1R23A[i];
else
cap->mcs.rx_mask[i] &= MCS_rate_2R23A[i];
}
}
void HT_info_handler23a(struct rtw_adapter *padapter, const u8 *p)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
if (!p)
return;
if (!phtpriv->ht_option)
return;
if (p[1] != sizeof(struct ieee80211_ht_operation))
return;
pmlmeinfo->HT_info_enable = 1;
memcpy(&pmlmeinfo->HT_info, p + 2, p[1]);
}
void HTOnAssocRsp23a(struct rtw_adapter *padapter)
{
unsigned char max_AMPDU_len;
unsigned char min_MPDU_spacing;
/* struct registry_priv *pregpriv = &padapter->registrypriv; */
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
DBG_8723A("%s\n", __func__);
if (pmlmeinfo->HT_info_enable && pmlmeinfo->HT_caps_enable)
pmlmeinfo->HT_enable = 1;
else {
pmlmeinfo->HT_enable = 0;
/* set_channel_bwmode23a(padapter, pmlmeext->cur_channel,
pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */
return;
}
/* handle A-MPDU parameter field */
/*
AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
AMPDU_para [4:2]:Min MPDU Start Spacing
*/
max_AMPDU_len = pmlmeinfo->ht_cap.ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_FACTOR;
min_MPDU_spacing =
(pmlmeinfo->ht_cap.ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2;
rtl8723a_set_ampdu_min_space(padapter, min_MPDU_spacing);
rtl8723a_set_ampdu_factor(padapter, max_AMPDU_len);
}
void ERP_IE_handler23a(struct rtw_adapter *padapter, const u8 *p)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (p[1] > 1)
return;
pmlmeinfo->ERP_enable = 1;
memcpy(&pmlmeinfo->ERP_IE, p + 2, p[1]);
}
void VCS_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
{
struct registry_priv *pregpriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
switch (pregpriv->vrtl_carrier_sense) { /* 0:off 1:on 2:auto */
case 0: /* off */
psta->rtsen = 0;
psta->cts2self = 0;
break;
case 1: /* on */
if (pregpriv->vcs_type == RTS_CTS) {
psta->rtsen = 1;
psta->cts2self = 0;
} else {
psta->rtsen = 0;
psta->cts2self = 1;
}
break;
case 2: /* auto */
default:
if (pmlmeinfo->ERP_enable && pmlmeinfo->ERP_IE & BIT(1)) {
if (pregpriv->vcs_type == RTS_CTS) {
psta->rtsen = 1;
psta->cts2self = 0;
} else {
psta->rtsen = 0;
psta->cts2self = 1;
}
} else {
psta->rtsen = 0;
psta->cts2self = 0;
}
break;
}
}
int rtw_check_bcn_info23a(struct rtw_adapter *Adapter,
struct ieee80211_mgmt *mgmt, u32 pkt_len)
{
struct wlan_network *cur_network = &Adapter->mlmepriv.cur_network;
struct ieee80211_ht_operation *pht_info;
unsigned short val16;
u8 crypto, bcn_channel;
int group_cipher = 0, pairwise_cipher = 0, is_8021x = 0, r;
int pie_len, ssid_len, privacy;
const u8 *p, *ssid;
if (!is_client_associated_to_ap23a(Adapter))
return _SUCCESS;
if (unlikely(!ieee80211_is_beacon(mgmt->frame_control))) {
printk(KERN_WARNING "%s: received a non beacon frame!\n",
__func__);
return _FAIL;
}
if (!ether_addr_equal(cur_network->network.MacAddress, mgmt->bssid)) {
DBG_8723A("%s: linked but recv other bssid bcn %pM %pM\n",
__func__, mgmt->bssid,
cur_network->network.MacAddress);
return _FAIL;
}
/* check bw and channel offset */
/* parsing HT_CAP_IE */
pie_len = pkt_len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
/* Checking for channel */
p = cfg80211_find_ie(WLAN_EID_DS_PARAMS, mgmt->u.beacon.variable,
pie_len);
if (p)
bcn_channel = p[2];
else {
/* In 5G, some ap do not have DSSET IE checking HT
info for channel */
p = cfg80211_find_ie(WLAN_EID_HT_OPERATION,
mgmt->u.beacon.variable, pie_len);
if (p && p[1] > 0) {
pht_info = (struct ieee80211_ht_operation *)(p + 2);
bcn_channel = pht_info->primary_chan;
} else { /* we don't find channel IE, so don't check it */
DBG_8723A("Oops: %s we don't find channel IE, so don't "
"check it\n", __func__);
bcn_channel = Adapter->mlmeextpriv.cur_channel;
}
}
if (bcn_channel != Adapter->mlmeextpriv.cur_channel) {
DBG_8723A("%s beacon channel:%d cur channel:%d disconnect\n",
__func__, bcn_channel,
Adapter->mlmeextpriv.cur_channel);
goto _mismatch;
}
/* checking SSID */
p = cfg80211_find_ie(WLAN_EID_SSID, mgmt->u.beacon.variable, pie_len);
if (p && p[1]) {
ssid = p + 2;
ssid_len = p[1];
} else {
DBG_8723A("%s marc: cannot find SSID for survey event\n",
__func__);
ssid = NULL;
ssid_len = 0;
}
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
"%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d cur_network->network.Ssid.Ssid:%s len:%d\n",
__func__, ssid, ssid_len, cur_network->network.Ssid.ssid,
cur_network->network.Ssid.ssid_len);
if (ssid_len != cur_network->network.Ssid.ssid_len || ssid_len > 32 ||
(ssid_len &&
memcmp(ssid, cur_network->network.Ssid.ssid, ssid_len))) {
DBG_8723A("%s(), SSID is not match return FAIL\n", __func__);
goto _mismatch;
}
/* check encryption info */
val16 = le16_to_cpu(mgmt->u.beacon.capab_info);
if (val16 & WLAN_CAPABILITY_PRIVACY)
privacy = 1;
else
privacy = 0;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
"%s(): cur_network->network.Privacy is %d, bssid.Privacy is %d\n",
__func__, cur_network->network.Privacy, privacy);
if (cur_network->network.Privacy != privacy) {
DBG_8723A("%s(), privacy is not match return FAIL\n", __func__);
goto _mismatch;
}
p = cfg80211_find_ie(WLAN_EID_RSN, mgmt->u.beacon.variable, pie_len);
if (p && p[1]) {
crypto = ENCRYP_PROTOCOL_WPA2;
if (p && p[1]) {
r = rtw_parse_wpa2_ie23a(p, p[1] + 2, &group_cipher,
&pairwise_cipher, &is_8021x);
if (r == _SUCCESS)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
"%s pnetwork->pairwise_cipher: %d, pnetwork->group_cipher: %d, is_802x : %d\n",
__func__, pairwise_cipher,
group_cipher, is_8021x);
}
} else {
p = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
mgmt->u.beacon.variable, pie_len);
if (p && p[1]) {
crypto = ENCRYP_PROTOCOL_WPA;
r = rtw_parse_wpa_ie23a(p, p[1] + 2, &group_cipher,
&pairwise_cipher, &is_8021x);
if (r == _SUCCESS)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
"%s pnetwork->pairwise_cipher: %d, group_cipher is %d, is_8021x is %d\n",
__func__, pairwise_cipher,
group_cipher, is_8021x);
} else {
if (privacy)
crypto = ENCRYP_PROTOCOL_WEP;
else
crypto = ENCRYP_PROTOCOL_OPENSYS;
}
}
if (cur_network->BcnInfo.encryp_protocol != crypto) {
DBG_8723A("%s(): encryption mismatch, return FAIL\n", __func__);
goto _mismatch;
}
if (crypto == ENCRYP_PROTOCOL_WPA || crypto == ENCRYP_PROTOCOL_WPA2) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
"%s cur_network->group_cipher is %d: %d\n", __func__,
cur_network->BcnInfo.group_cipher, group_cipher);
if (pairwise_cipher != cur_network->BcnInfo.pairwise_cipher ||
group_cipher != cur_network->BcnInfo.group_cipher) {
DBG_8723A("%s pairwise_cipher(%x:%x) or group_cipher "
"(%x:%x) is not match, return FAIL\n",
__func__, pairwise_cipher,
cur_network->BcnInfo.pairwise_cipher,
group_cipher,
cur_network->BcnInfo.group_cipher);
goto _mismatch;
}
if (is_8021x != cur_network->BcnInfo.is_8021x) {
DBG_8723A("%s authentication is not match, return "
"FAIL\n", __func__);
goto _mismatch;
}
}
return _SUCCESS;
_mismatch:
return _FAIL;
}
void update_beacon23a_info(struct rtw_adapter *padapter,
struct ieee80211_mgmt *mgmt,
uint pkt_len, struct sta_info *psta)
{
unsigned int len;
const u8 *p;
len = pkt_len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
p = cfg80211_find_ie(WLAN_EID_HT_OPERATION, mgmt->u.beacon.variable,
len);
if (p)
bwmode_update_check(padapter, p);
p = cfg80211_find_ie(WLAN_EID_ERP_INFO, mgmt->u.beacon.variable, len);
if (p) {
ERP_IE_handler23a(padapter, p);
VCS_update23a(padapter, psta);
}
}
bool is_ap_in_tkip23a(struct rtw_adapter *padapter)
{
u32 i;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
const u8 *p;
if (cur_network->capability & WLAN_CAPABILITY_PRIVACY) {
for (i = 0; i < pmlmeinfo->network.IELength;) {
p = pmlmeinfo->network.IEs + i;
switch (p[0]) {
case WLAN_EID_VENDOR_SPECIFIC:
if (!memcmp(p + 2, RTW_WPA_OUI23A_TYPE, 4) &&
!memcmp(p + 2 + 12, WPA_TKIP_CIPHER, 4))
return true;
break;
case WLAN_EID_RSN:
if (!memcmp(p + 2 + 8, RSN_TKIP_CIPHER, 4))
return true;
break;
default:
break;
}
i += (p[1] + 2);
}
return false;
} else
return false;
}
bool should_forbid_n_rate23a(struct rtw_adapter *padapter)
{
u32 i;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *cur_network = &pmlmepriv->cur_network.network;
const u8 *p;
if (cur_network->capability & WLAN_CAPABILITY_PRIVACY) {
for (i = 0; i < cur_network->IELength;) {
p = cur_network->IEs + i;
switch (p[0]) {
case WLAN_EID_VENDOR_SPECIFIC:
if (!memcmp(p + 2, RTW_WPA_OUI23A_TYPE, 4) &&
(!memcmp(p + 2 + 12,
WPA_CIPHER_SUITE_CCMP23A, 4) ||
!memcmp(p + 2 + 16,
WPA_CIPHER_SUITE_CCMP23A, 4)))
return false;
break;
case WLAN_EID_RSN:
if (!memcmp(p + 2 + 8,
RSN_CIPHER_SUITE_CCMP23A, 4) ||
!memcmp(p + 2 + 12,
RSN_CIPHER_SUITE_CCMP23A, 4))
return false;
default:
break;
}
i += (p[1] + 2);
}
return true;
} else {
return false;
}
}
bool is_ap_in_wep23a(struct rtw_adapter *padapter)
{
u32 i;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
const u8 *p;
if (cur_network->capability & WLAN_CAPABILITY_PRIVACY) {
for (i = 0; i < pmlmeinfo->network.IELength;) {
p = pmlmeinfo->network.IEs + i;
switch (p[0]) {
case WLAN_EID_VENDOR_SPECIFIC:
if (!memcmp(p + 2, RTW_WPA_OUI23A_TYPE, 4))
return false;
break;
case WLAN_EID_RSN:
return false;
default:
break;
}
i += (p[1] + 2);
}
return true;
} else
return false;
}
static int wifirate2_ratetbl_inx23a(unsigned char rate)
{
int inx = 0;
rate = rate & 0x7f;
switch (rate) {
case 54*2:
inx = 11;
break;
case 48*2:
inx = 10;
break;
case 36*2:
inx = 9;
break;
case 24*2:
inx = 8;
break;
case 18*2:
inx = 7;
break;
case 12*2:
inx = 6;
break;
case 9*2:
inx = 5;
break;
case 6*2:
inx = 4;
break;
case 11*2:
inx = 3;
break;
case 11:
inx = 2;
break;
case 2*2:
inx = 1;
break;
case 1*2:
inx = 0;
break;
}
return inx;
}
unsigned int update_basic_rate23a(unsigned char *ptn, unsigned int ptn_sz)
{
unsigned int i, num_of_rate;
unsigned int mask = 0;
num_of_rate = (ptn_sz > NumRates)? NumRates: ptn_sz;
for (i = 0; i < num_of_rate; i++) {
if ((*(ptn + i)) & 0x80)
mask |= 0x1 << wifirate2_ratetbl_inx23a(*(ptn + i));
}
return mask;
}
unsigned int update_supported_rate23a(unsigned char *ptn, unsigned int ptn_sz)
{
unsigned int i, num_of_rate;
unsigned int mask = 0;
num_of_rate = (ptn_sz > NumRates) ? NumRates : ptn_sz;
for (i = 0; i < num_of_rate; i++)
mask |= 0x1 << wifirate2_ratetbl_inx23a(*(ptn + i));
return mask;
}
unsigned int update_MSC_rate23a(struct ieee80211_ht_cap *pHT_caps)
{
unsigned int mask = 0;
mask = pHT_caps->mcs.rx_mask[0] << 12 |
pHT_caps->mcs.rx_mask[1] << 20;
return mask;
}
int support_short_GI23a(struct rtw_adapter *padapter,
struct ieee80211_ht_cap *pHT_caps)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
unsigned char bit_offset;
if (!pmlmeinfo->HT_enable)
return _FAIL;
if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_RALINK)
return _FAIL;
bit_offset = (pmlmeext->cur_bwmode & HT_CHANNEL_WIDTH_40)? 6: 5;
if (pHT_caps->cap_info & cpu_to_le16(0x1 << bit_offset))
return _SUCCESS;
else
return _FAIL;
}
unsigned char get_highest_rate_idx23a(u32 mask)
{
int i;
unsigned char rate_idx = 0;
for (i = 27; i >= 0; i--) {
if (mask & BIT(i)) {
rate_idx = i;
break;
}
}
return rate_idx;
}
void Update_RA_Entry23a(struct rtw_adapter *padapter, struct sta_info *psta)
{
rtw_hal_update_ra_mask23a(psta, 0);
}
static void enable_rate_adaptive(struct rtw_adapter *padapter,
struct sta_info *psta)
{
Update_RA_Entry23a(padapter, psta);
}
void set_sta_rate23a(struct rtw_adapter *padapter, struct sta_info *psta)
{
/* rate adaptive */
enable_rate_adaptive(padapter, psta);
}
/* Update RRSR and Rate for USERATE */
void update_tx_basic_rate23a(struct rtw_adapter *padapter, u8 wirelessmode)
{
unsigned char supported_rates[NDIS_802_11_LENGTH_RATES_EX];
memset(supported_rates, 0, NDIS_802_11_LENGTH_RATES_EX);
if (wirelessmode == WIRELESS_11B) {
memcpy(supported_rates, rtw_basic_rate_cck, 4);
} else if (wirelessmode & WIRELESS_11B) {
memcpy(supported_rates, rtw_basic_rate_mix, 7);
} else {
memcpy(supported_rates, rtw_basic_rate_ofdm, 3);
}
if (wirelessmode & WIRELESS_11B)
update_mgnt_tx_rate23a(padapter, IEEE80211_CCK_RATE_1MB);
else
update_mgnt_tx_rate23a(padapter, IEEE80211_OFDM_RATE_6MB);
HalSetBrateCfg23a(padapter, supported_rates);
}
unsigned char check_assoc_AP23a(u8 *pframe, uint len)
{
int i;
u8 epigram_vendor_flag;
u8 ralink_vendor_flag;
const u8 *p;
epigram_vendor_flag = 0;
ralink_vendor_flag = 0;
for (i = 0; i < len;) {
p = pframe + i;
switch (p[0]) {
case WLAN_EID_VENDOR_SPECIFIC:
if (!memcmp(p + 2, ARTHEROS_OUI1, 3) ||
!memcmp(p + 2, ARTHEROS_OUI2, 3)) {
DBG_8723A("link to Artheros AP\n");
return HT_IOT_PEER_ATHEROS;
} else if (!memcmp(p + 2, BROADCOM_OUI1, 3) ||
!memcmp(p + 2, BROADCOM_OUI2, 3)) {
DBG_8723A("link to Broadcom AP\n");
return HT_IOT_PEER_BROADCOM;
} else if (!memcmp(p + 2, MARVELL_OUI, 3)) {
DBG_8723A("link to Marvell AP\n");
return HT_IOT_PEER_MARVELL;
} else if (!memcmp(p + 2, RALINK_OUI, 3)) {
if (!ralink_vendor_flag)
ralink_vendor_flag = 1;
else {
DBG_8723A("link to Ralink AP\n");
return HT_IOT_PEER_RALINK;
}
} else if (!memcmp(p + 2, CISCO_OUI, 3)) {
DBG_8723A("link to Cisco AP\n");
return HT_IOT_PEER_CISCO;
} else if (!memcmp(p + 2, REALTEK_OUI, 3)) {
DBG_8723A("link to Realtek 96B\n");
return HT_IOT_PEER_REALTEK;
} else if (!memcmp(p + 2, AIRGOCAP_OUI, 3)) {
DBG_8723A("link to Airgo Cap\n");
return HT_IOT_PEER_AIRGO;
} else if (!memcmp(p + 2, EPIGRAM_OUI, 3)) {
epigram_vendor_flag = 1;
if (ralink_vendor_flag) {
DBG_8723A("link to Tenda W311R AP\n");
return HT_IOT_PEER_TENDA;
} else
DBG_8723A("Capture EPIGRAM_OUI\n");
} else
break;
default:
break;
}
i += (p[1] + 2);
}
if (ralink_vendor_flag && !epigram_vendor_flag) {
DBG_8723A("link to Ralink AP\n");
return HT_IOT_PEER_RALINK;
} else if (ralink_vendor_flag && epigram_vendor_flag) {
DBG_8723A("link to Tenda W311R AP\n");
return HT_IOT_PEER_TENDA;
} else {
DBG_8723A("link to new AP\n");
return HT_IOT_PEER_UNKNOWN;
}
}
void update_IOT_info23a(struct rtw_adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
switch (pmlmeinfo->assoc_AP_vendor) {
case HT_IOT_PEER_MARVELL:
pmlmeinfo->turboMode_cts2self = 1;
pmlmeinfo->turboMode_rtsen = 0;
break;
case HT_IOT_PEER_RALINK:
pmlmeinfo->turboMode_cts2self = 0;
pmlmeinfo->turboMode_rtsen = 1;
/* disable high power */
rtl8723a_odm_support_ability_clr(padapter, (u32)
~DYNAMIC_BB_DYNAMIC_TXPWR);
break;
case HT_IOT_PEER_REALTEK:
/* rtw_write16(padapter, 0x4cc, 0xffff); */
/* rtw_write16(padapter, 0x546, 0x01c0); */
/* disable high power */
rtl8723a_odm_support_ability_clr(padapter, (u32)
~DYNAMIC_BB_DYNAMIC_TXPWR);
break;
default:
pmlmeinfo->turboMode_cts2self = 0;
pmlmeinfo->turboMode_rtsen = 1;
break;
}
}
void update_capinfo23a(struct rtw_adapter *Adapter, u16 updateCap)
{
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (updateCap & cShortPreamble) {
/* Short Preamble */
if (pmlmeinfo->preamble_mode != PREAMBLE_SHORT) {
/* PREAMBLE_LONG or PREAMBLE_AUTO */
pmlmeinfo->preamble_mode = PREAMBLE_SHORT;
rtl8723a_ack_preamble(Adapter, true);
}
} else { /* Long Preamble */
if (pmlmeinfo->preamble_mode != PREAMBLE_LONG) {
/* PREAMBLE_SHORT or PREAMBLE_AUTO */
pmlmeinfo->preamble_mode = PREAMBLE_LONG;
rtl8723a_ack_preamble(Adapter, false);
}
}
if (updateCap & cIBSS) {
/* Filen: See 802.11-2007 p.91 */
pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
} else {
/* Filen: See 802.11-2007 p.90 */
if (pmlmeext->cur_wireless_mode &
(WIRELESS_11G | WIRELESS_11_24N)) {
if (updateCap & cShortSlotTime) { /* Short Slot Time */
if (pmlmeinfo->slotTime != SHORT_SLOT_TIME)
pmlmeinfo->slotTime = SHORT_SLOT_TIME;
} else { /* Long Slot Time */
if (pmlmeinfo->slotTime != NON_SHORT_SLOT_TIME)
pmlmeinfo->slotTime =
NON_SHORT_SLOT_TIME;
}
} else if (pmlmeext->cur_wireless_mode &
(WIRELESS_11A | WIRELESS_11_5N)) {
pmlmeinfo->slotTime = SHORT_SLOT_TIME;
} else {
/* B Mode */
pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
}
}
rtl8723a_set_slot_time(Adapter, pmlmeinfo->slotTime);
}
void update_wireless_mode23a(struct rtw_adapter *padapter)
{
int ratelen, network_type = 0;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
unsigned char *rate = cur_network->SupportedRates;
ratelen = rtw_get_rateset_len23a(cur_network->SupportedRates);
if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable))
pmlmeinfo->HT_enable = 1;
if (pmlmeext->cur_channel > 14) {
if (pmlmeinfo->HT_enable)
network_type = WIRELESS_11_5N;
network_type |= WIRELESS_11A;
} else {
if (pmlmeinfo->HT_enable)
network_type = WIRELESS_11_24N;
if (cckratesonly_included23a(rate, ratelen) == true)
network_type |= WIRELESS_11B;
else if (cckrates_included23a(rate, ratelen) == true)
network_type |= WIRELESS_11BG;
else
network_type |= WIRELESS_11G;
}
pmlmeext->cur_wireless_mode =
network_type & padapter->registrypriv.wireless_mode;
/* 0x0808 -> for CCK, 0x0a0a -> for OFDM */
/* change this value if having IOT issues. */
rtl8723a_set_resp_sifs(padapter, 0x08, 0x08, 0x0a, 0x0a);
if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
update_mgnt_tx_rate23a(padapter, IEEE80211_CCK_RATE_1MB);
else
update_mgnt_tx_rate23a(padapter, IEEE80211_OFDM_RATE_6MB);
}
void update_bmc_sta_support_rate23a(struct rtw_adapter *padapter, u32 mac_id)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (pmlmeext->cur_wireless_mode & WIRELESS_11B) {
/* Only B, B/G, and B/G/N AP could use CCK rate */
memcpy((pmlmeinfo->FW_sta_info[mac_id].SupportedRates),
rtw_basic_rate_cck, 4);
} else {
memcpy(pmlmeinfo->FW_sta_info[mac_id].SupportedRates,
rtw_basic_rate_ofdm, 3);
}
}
int update_sta_support_rate23a(struct rtw_adapter *padapter, u8 *pvar_ie,
uint var_ie_len, int cam_idx)
{
int supportRateNum = 0;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
const u8 *p;
p = cfg80211_find_ie(WLAN_EID_SUPP_RATES, pvar_ie, var_ie_len);
if (!p)
return _FAIL;
memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, p + 2, p[1]);
supportRateNum = p[1];
p = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, pvar_ie, var_ie_len);
if (p)
memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates +
supportRateNum, p + 2, p[1]);
return _SUCCESS;
}
void process_addba_req23a(struct rtw_adapter *padapter,
u8 *paddba_req, u8 *addr)
{
struct sta_info *psta;
u16 tid, start_seq, param;
struct recv_reorder_ctrl *preorder_ctrl;
struct sta_priv *pstapriv = &padapter->stapriv;
struct ADDBA_request *preq = (struct ADDBA_request *)paddba_req;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
psta = rtw_get_stainfo23a(pstapriv, addr);
if (psta) {
start_seq = le16_to_cpu(preq->BA_starting_seqctrl) >> 4;
param = le16_to_cpu(preq->BA_para_set);
tid = (param >> 2) & 0x0f;
preorder_ctrl = &psta->recvreorder_ctrl[tid];
preorder_ctrl->indicate_seq = 0xffff;
preorder_ctrl->enable = (pmlmeinfo->bAcceptAddbaReq == true) ?
true : false;
}
}
| gpl-2.0 |
qiubing/linux-3.2.4 | arch/arm/mach-mxs/devices.c | 648 | 2600 | /*
* Copyright 2008 Sascha Hauer, kernel@pengutronix.de
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
struct platform_device *__init mxs_add_platform_device_dmamask(
const char *name, int id,
const struct resource *res, unsigned int num_resources,
const void *data, size_t size_data, u64 dmamask)
{
int ret = -ENOMEM;
struct platform_device *pdev;
pdev = platform_device_alloc(name, id);
if (!pdev)
goto err;
if (dmamask) {
/*
* This memory isn't freed when the device is put,
* I don't have a nice idea for that though. Conceptually
* dma_mask in struct device should not be a pointer.
* See http://thread.gmane.org/gmane.linux.kernel.pci/9081
*/
pdev->dev.dma_mask =
kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
if (!pdev->dev.dma_mask)
/* ret is still -ENOMEM; */
goto err;
*pdev->dev.dma_mask = dmamask;
pdev->dev.coherent_dma_mask = dmamask;
}
if (res) {
ret = platform_device_add_resources(pdev, res, num_resources);
if (ret)
goto err;
}
if (data) {
ret = platform_device_add_data(pdev, data, size_data);
if (ret)
goto err;
}
ret = platform_device_add(pdev);
if (ret) {
err:
if (dmamask)
kfree(pdev->dev.dma_mask);
platform_device_put(pdev);
return ERR_PTR(ret);
}
return pdev;
}
int __init mxs_add_amba_device(const struct amba_device *dev)
{
struct amba_device *adev = kmalloc(sizeof(*adev), GFP_KERNEL);
if (!adev) {
pr_err("%s: failed to allocate memory", __func__);
return -ENOMEM;
}
*adev = *dev;
return amba_device_register(adev, &iomem_resource);
}
struct device mxs_apbh_bus = {
.init_name = "mxs_apbh",
.parent = &platform_bus,
};
static int __init mxs_device_init(void)
{
return device_register(&mxs_apbh_bus);
}
core_initcall(mxs_device_init);
| gpl-2.0 |
mwalt2/shooter-2.6.35_mr-kernel | net/netfilter/nf_conntrack_h323_main.c | 648 | 53129 | /*
* H.323 connection tracking helper
*
* Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
*
* This source code is licensed under General Public License version 2.
*
* Based on the 'brute force' H.323 connection tracking module by
* Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*
* For more information, please see http://nath323.sourceforge.net/
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/slab.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <net/route.h>
#include <net/ip6_route.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_h323.h>
/* Parameters */
static unsigned int default_rrq_ttl __read_mostly = 300;
module_param(default_rrq_ttl, uint, 0600);
MODULE_PARM_DESC(default_rrq_ttl, "use this TTL if it's missing in RRQ");
static int gkrouted_only __read_mostly = 1;
module_param(gkrouted_only, int, 0600);
MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper");
static int callforward_filter __read_mostly = 1;
module_param(callforward_filter, bool, 0600);
MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
"if both endpoints are on different sides "
"(determined by routing information)");
/* Hooks for NAT */
int (*set_h245_addr_hook) (struct sk_buff *skb,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr,
union nf_inet_addr *addr, __be16 port)
__read_mostly;
int (*set_h225_addr_hook) (struct sk_buff *skb,
unsigned char **data, int dataoff,
TransportAddress *taddr,
union nf_inet_addr *addr, __be16 port)
__read_mostly;
int (*set_sig_addr_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data,
TransportAddress *taddr, int count) __read_mostly;
int (*set_ras_addr_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data,
TransportAddress *taddr, int count) __read_mostly;
int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr,
__be16 port, __be16 rtp_port,
struct nf_conntrack_expect *rtp_exp,
struct nf_conntrack_expect *rtcp_exp) __read_mostly;
int (*nat_t120_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp) __read_mostly;
int (*nat_h245_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp) __read_mostly;
int (*nat_callforwarding_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp) __read_mostly;
int (*nat_q931_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, TransportAddress *taddr, int idx,
__be16 port, struct nf_conntrack_expect *exp)
__read_mostly;
static DEFINE_SPINLOCK(nf_h323_lock);
static char *h323_buffer;
static struct nf_conntrack_helper nf_conntrack_helper_h245;
static struct nf_conntrack_helper nf_conntrack_helper_q931[];
static struct nf_conntrack_helper nf_conntrack_helper_ras[];
/****************************************************************************/
static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo,
unsigned char **data, int *datalen, int *dataoff)
{
struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
int dir = CTINFO2DIR(ctinfo);
const struct tcphdr *th;
struct tcphdr _tcph;
int tcpdatalen;
int tcpdataoff;
unsigned char *tpkt;
int tpktlen;
int tpktoff;
/* Get TCP header */
th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
if (th == NULL)
return 0;
/* Get TCP data offset */
tcpdataoff = protoff + th->doff * 4;
/* Get TCP data length */
tcpdatalen = skb->len - tcpdataoff;
if (tcpdatalen <= 0) /* No TCP data */
goto clear_out;
if (*data == NULL) { /* first TPKT */
/* Get first TPKT pointer */
tpkt = skb_header_pointer(skb, tcpdataoff, tcpdatalen,
h323_buffer);
BUG_ON(tpkt == NULL);
/* Validate TPKT identifier */
if (tcpdatalen < 4 || tpkt[0] != 0x03 || tpkt[1] != 0) {
/* Netmeeting sends TPKT header and data separately */
if (info->tpkt_len[dir] > 0) {
pr_debug("nf_ct_h323: previous packet "
"indicated separate TPKT data of %hu "
"bytes\n", info->tpkt_len[dir]);
if (info->tpkt_len[dir] <= tcpdatalen) {
/* Yes, there was a TPKT header
* received */
*data = tpkt;
*datalen = info->tpkt_len[dir];
*dataoff = 0;
goto out;
}
/* Fragmented TPKT */
pr_debug("nf_ct_h323: fragmented TPKT\n");
goto clear_out;
}
/* It is not even a TPKT */
return 0;
}
tpktoff = 0;
} else { /* Next TPKT */
tpktoff = *dataoff + *datalen;
tcpdatalen -= tpktoff;
if (tcpdatalen <= 4) /* No more TPKT */
goto clear_out;
tpkt = *data + *datalen;
/* Validate TPKT identifier */
if (tpkt[0] != 0x03 || tpkt[1] != 0)
goto clear_out;
}
/* Validate TPKT length */
tpktlen = tpkt[2] * 256 + tpkt[3];
if (tpktlen < 4)
goto clear_out;
if (tpktlen > tcpdatalen) {
if (tcpdatalen == 4) { /* Separate TPKT header */
/* Netmeeting sends TPKT header and data separately */
pr_debug("nf_ct_h323: separate TPKT header indicates "
"there will be TPKT data of %hu bytes\n",
tpktlen - 4);
info->tpkt_len[dir] = tpktlen - 4;
return 0;
}
pr_debug("nf_ct_h323: incomplete TPKT (fragmented?)\n");
goto clear_out;
}
/* This is the encapsulated data */
*data = tpkt + 4;
*datalen = tpktlen - 4;
*dataoff = tpktoff + 4;
out:
/* Clear TPKT length */
info->tpkt_len[dir] = 0;
return 1;
clear_out:
info->tpkt_len[dir] = 0;
return 0;
}
/****************************************************************************/
static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
H245_TransportAddress *taddr,
union nf_inet_addr *addr, __be16 *port)
{
const unsigned char *p;
int len;
if (taddr->choice != eH245_TransportAddress_unicastAddress)
return 0;
switch (taddr->unicastAddress.choice) {
case eUnicastAddress_iPAddress:
if (nf_ct_l3num(ct) != AF_INET)
return 0;
p = data + taddr->unicastAddress.iPAddress.network;
len = 4;
break;
case eUnicastAddress_iP6Address:
if (nf_ct_l3num(ct) != AF_INET6)
return 0;
p = data + taddr->unicastAddress.iP6Address.network;
len = 16;
break;
default:
return 0;
}
memcpy(addr, p, len);
memset((void *)addr + len, 0, sizeof(*addr) - len);
memcpy(port, p + len, sizeof(__be16));
return 1;
}
/****************************************************************************/
static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr)
{
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
__be16 rtp_port, rtcp_port;
union nf_inet_addr addr;
struct nf_conntrack_expect *rtp_exp;
struct nf_conntrack_expect *rtcp_exp;
typeof(nat_rtp_rtcp_hook) nat_rtp_rtcp;
/* Read RTP or RTCP address */
if (!get_h245_addr(ct, *data, taddr, &addr, &port) ||
memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) ||
port == 0)
return 0;
/* RTP port is even */
port &= htons(~1);
rtp_port = port;
rtcp_port = htons(ntohs(port) + 1);
/* Create expect for RTP */
if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_UDP, NULL, &rtp_port);
/* Create expect for RTCP */
if ((rtcp_exp = nf_ct_expect_alloc(ct)) == NULL) {
nf_ct_expect_put(rtp_exp);
return -1;
}
nf_ct_expect_init(rtcp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_UDP, NULL, &rtcp_port);
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
(nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) &&
ct->status & IPS_NAT_MASK) {
/* NAT needed */
ret = nat_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
taddr, port, rtp_port, rtp_exp, rtcp_exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(rtp_exp) == 0) {
if (nf_ct_expect_related(rtcp_exp) == 0) {
pr_debug("nf_ct_h323: expect RTP ");
nf_ct_dump_tuple(&rtp_exp->tuple);
pr_debug("nf_ct_h323: expect RTCP ");
nf_ct_dump_tuple(&rtcp_exp->tuple);
} else {
nf_ct_unexpect_related(rtp_exp);
ret = -1;
}
} else
ret = -1;
}
nf_ct_expect_put(rtp_exp);
nf_ct_expect_put(rtcp_exp);
return ret;
}
/****************************************************************************/
static int expect_t120(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr)
{
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
typeof(nat_t120_hook) nat_t120;
/* Read T.120 address */
if (!get_h245_addr(ct, *data, taddr, &addr, &port) ||
memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) ||
port == 0)
return 0;
/* Create expect for T.120 connections */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_TCP, NULL, &port);
exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple channels */
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
(nat_t120 = rcu_dereference(nat_t120_hook)) &&
ct->status & IPS_NAT_MASK) {
/* NAT needed */
ret = nat_t120(skb, ct, ctinfo, data, dataoff, taddr,
port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp) == 0) {
pr_debug("nf_ct_h323: expect T.120 ");
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
}
nf_ct_expect_put(exp);
return ret;
}
/****************************************************************************/
static int process_h245_channel(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
H2250LogicalChannelParameters *channel)
{
int ret;
if (channel->options & eH2250LogicalChannelParameters_mediaChannel) {
/* RTP */
ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
&channel->mediaChannel);
if (ret < 0)
return -1;
}
if (channel->
options & eH2250LogicalChannelParameters_mediaControlChannel) {
/* RTCP */
ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
&channel->mediaControlChannel);
if (ret < 0)
return -1;
}
return 0;
}
/****************************************************************************/
static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
OpenLogicalChannel *olc)
{
int ret;
pr_debug("nf_ct_h323: OpenLogicalChannel\n");
if (olc->forwardLogicalChannelParameters.multiplexParameters.choice ==
eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)
{
ret = process_h245_channel(skb, ct, ctinfo, data, dataoff,
&olc->
forwardLogicalChannelParameters.
multiplexParameters.
h2250LogicalChannelParameters);
if (ret < 0)
return -1;
}
if ((olc->options &
eOpenLogicalChannel_reverseLogicalChannelParameters) &&
(olc->reverseLogicalChannelParameters.options &
eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters)
&& (olc->reverseLogicalChannelParameters.multiplexParameters.
choice ==
eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters))
{
ret =
process_h245_channel(skb, ct, ctinfo, data, dataoff,
&olc->
reverseLogicalChannelParameters.
multiplexParameters.
h2250LogicalChannelParameters);
if (ret < 0)
return -1;
}
if ((olc->options & eOpenLogicalChannel_separateStack) &&
olc->forwardLogicalChannelParameters.dataType.choice ==
eDataType_data &&
olc->forwardLogicalChannelParameters.dataType.data.application.
choice == eDataApplicationCapability_application_t120 &&
olc->forwardLogicalChannelParameters.dataType.data.application.
t120.choice == eDataProtocolCapability_separateLANStack &&
olc->separateStack.networkAddress.choice ==
eNetworkAccessParameters_networkAddress_localAreaAddress) {
ret = expect_t120(skb, ct, ctinfo, data, dataoff,
&olc->separateStack.networkAddress.
localAreaAddress);
if (ret < 0)
return -1;
}
return 0;
}
/****************************************************************************/
static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
OpenLogicalChannelAck *olca)
{
H2250LogicalChannelAckParameters *ack;
int ret;
pr_debug("nf_ct_h323: OpenLogicalChannelAck\n");
if ((olca->options &
eOpenLogicalChannelAck_reverseLogicalChannelParameters) &&
(olca->reverseLogicalChannelParameters.options &
eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters)
&& (olca->reverseLogicalChannelParameters.multiplexParameters.
choice ==
eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters))
{
ret = process_h245_channel(skb, ct, ctinfo, data, dataoff,
&olca->
reverseLogicalChannelParameters.
multiplexParameters.
h2250LogicalChannelParameters);
if (ret < 0)
return -1;
}
if ((olca->options &
eOpenLogicalChannelAck_forwardMultiplexAckParameters) &&
(olca->forwardMultiplexAckParameters.choice ==
eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters))
{
ack = &olca->forwardMultiplexAckParameters.
h2250LogicalChannelAckParameters;
if (ack->options &
eH2250LogicalChannelAckParameters_mediaChannel) {
/* RTP */
ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
&ack->mediaChannel);
if (ret < 0)
return -1;
}
if (ack->options &
eH2250LogicalChannelAckParameters_mediaControlChannel) {
/* RTCP */
ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
&ack->mediaControlChannel);
if (ret < 0)
return -1;
}
}
if ((olca->options & eOpenLogicalChannelAck_separateStack) &&
olca->separateStack.networkAddress.choice ==
eNetworkAccessParameters_networkAddress_localAreaAddress) {
ret = expect_t120(skb, ct, ctinfo, data, dataoff,
&olca->separateStack.networkAddress.
localAreaAddress);
if (ret < 0)
return -1;
}
return 0;
}
/****************************************************************************/
static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
MultimediaSystemControlMessage *mscm)
{
switch (mscm->choice) {
case eMultimediaSystemControlMessage_request:
if (mscm->request.choice ==
eRequestMessage_openLogicalChannel) {
return process_olc(skb, ct, ctinfo, data, dataoff,
&mscm->request.openLogicalChannel);
}
pr_debug("nf_ct_h323: H.245 Request %d\n",
mscm->request.choice);
break;
case eMultimediaSystemControlMessage_response:
if (mscm->response.choice ==
eResponseMessage_openLogicalChannelAck) {
return process_olca(skb, ct, ctinfo, data, dataoff,
&mscm->response.
openLogicalChannelAck);
}
pr_debug("nf_ct_h323: H.245 Response %d\n",
mscm->response.choice);
break;
default:
pr_debug("nf_ct_h323: H.245 signal %d\n", mscm->choice);
break;
}
return 0;
}
/****************************************************************************/
static int h245_help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
static MultimediaSystemControlMessage mscm;
unsigned char *data = NULL;
int datalen;
int dataoff;
int ret;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
return NF_ACCEPT;
}
pr_debug("nf_ct_h245: skblen = %u\n", skb->len);
spin_lock_bh(&nf_h323_lock);
/* Process each TPKT */
while (get_tpkt_data(skb, protoff, ct, ctinfo,
&data, &datalen, &dataoff)) {
pr_debug("nf_ct_h245: TPKT len=%d ", datalen);
nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
/* Decode H.245 signal */
ret = DecodeMultimediaSystemControlMessage(data, datalen,
&mscm);
if (ret < 0) {
pr_debug("nf_ct_h245: decoding error: %s\n",
ret == H323_ERROR_BOUND ?
"out of bound" : "out of range");
/* We don't drop when decoding error */
break;
}
/* Process H.245 signal */
if (process_h245(skb, ct, ctinfo, &data, dataoff, &mscm) < 0)
goto drop;
}
spin_unlock_bh(&nf_h323_lock);
return NF_ACCEPT;
drop:
spin_unlock_bh(&nf_h323_lock);
if (net_ratelimit())
pr_info("nf_ct_h245: packet dropped\n");
return NF_DROP;
}
/****************************************************************************/
static const struct nf_conntrack_expect_policy h245_exp_policy = {
.max_expected = H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */,
.timeout = 240,
};
static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = {
.name = "H.245",
.me = THIS_MODULE,
.tuple.src.l3num = AF_UNSPEC,
.tuple.dst.protonum = IPPROTO_UDP,
.help = h245_help,
.expect_policy = &h245_exp_policy,
};
/****************************************************************************/
int get_h225_addr(struct nf_conn *ct, unsigned char *data,
TransportAddress *taddr,
union nf_inet_addr *addr, __be16 *port)
{
const unsigned char *p;
int len;
switch (taddr->choice) {
case eTransportAddress_ipAddress:
if (nf_ct_l3num(ct) != AF_INET)
return 0;
p = data + taddr->ipAddress.ip;
len = 4;
break;
case eTransportAddress_ip6Address:
if (nf_ct_l3num(ct) != AF_INET6)
return 0;
p = data + taddr->ip6Address.ip;
len = 16;
break;
default:
return 0;
}
memcpy(addr, p, len);
memset((void *)addr + len, 0, sizeof(*addr) - len);
memcpy(port, p + len, sizeof(__be16));
return 1;
}
/****************************************************************************/
static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
TransportAddress *taddr)
{
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
typeof(nat_h245_hook) nat_h245;
/* Read h245Address */
if (!get_h225_addr(ct, *data, taddr, &addr, &port) ||
memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) ||
port == 0)
return 0;
/* Create expect for h245 connection */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_TCP, NULL, &port);
exp->helper = &nf_conntrack_helper_h245;
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
(nat_h245 = rcu_dereference(nat_h245_hook)) &&
ct->status & IPS_NAT_MASK) {
/* NAT needed */
ret = nat_h245(skb, ct, ctinfo, data, dataoff, taddr,
port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp) == 0) {
pr_debug("nf_ct_q931: expect H.245 ");
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
}
nf_ct_expect_put(exp);
return ret;
}
/* If the calling party is on the same side of the forward-to party,
* we don't need to track the second call */
static int callforward_do_filter(const union nf_inet_addr *src,
const union nf_inet_addr *dst,
u_int8_t family)
{
const struct nf_afinfo *afinfo;
struct flowi fl1, fl2;
int ret = 0;
/* rcu_read_lock()ed by nf_hook_slow() */
afinfo = nf_get_afinfo(family);
if (!afinfo)
return 0;
memset(&fl1, 0, sizeof(fl1));
memset(&fl2, 0, sizeof(fl2));
switch (family) {
case AF_INET: {
struct rtable *rt1, *rt2;
fl1.fl4_dst = src->ip;
fl2.fl4_dst = dst->ip;
if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) {
if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) {
if (rt1->rt_gateway == rt2->rt_gateway &&
rt1->u.dst.dev == rt2->u.dst.dev)
ret = 1;
dst_release(&rt2->u.dst);
}
dst_release(&rt1->u.dst);
}
break;
}
#if defined(CONFIG_NF_CONNTRACK_IPV6) || \
defined(CONFIG_NF_CONNTRACK_IPV6_MODULE)
case AF_INET6: {
struct rt6_info *rt1, *rt2;
memcpy(&fl1.fl6_dst, src, sizeof(fl1.fl6_dst));
memcpy(&fl2.fl6_dst, dst, sizeof(fl2.fl6_dst));
if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) {
if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) {
if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
sizeof(rt1->rt6i_gateway)) &&
rt1->u.dst.dev == rt2->u.dst.dev)
ret = 1;
dst_release(&rt2->u.dst);
}
dst_release(&rt1->u.dst);
}
break;
}
#endif
}
return ret;
}
/****************************************************************************/
static int expect_callforwarding(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
TransportAddress *taddr)
{
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
typeof(nat_callforwarding_hook) nat_callforwarding;
/* Read alternativeAddress */
if (!get_h225_addr(ct, *data, taddr, &addr, &port) || port == 0)
return 0;
/* If the calling party is on the same side of the forward-to party,
* we don't need to track the second call */
if (callforward_filter &&
callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3,
nf_ct_l3num(ct))) {
pr_debug("nf_ct_q931: Call Forwarding not tracked\n");
return 0;
}
/* Create expect for the second call leg */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3, &addr,
IPPROTO_TCP, NULL, &port);
exp->helper = nf_conntrack_helper_q931;
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
(nat_callforwarding = rcu_dereference(nat_callforwarding_hook)) &&
ct->status & IPS_NAT_MASK) {
/* Need NAT */
ret = nat_callforwarding(skb, ct, ctinfo, data, dataoff,
taddr, port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp) == 0) {
pr_debug("nf_ct_q931: expect Call Forwarding ");
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
}
nf_ct_expect_put(exp);
return ret;
}
/****************************************************************************/
static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
Setup_UUIE *setup)
{
int dir = CTINFO2DIR(ctinfo);
int ret;
int i;
__be16 port;
union nf_inet_addr addr;
typeof(set_h225_addr_hook) set_h225_addr;
pr_debug("nf_ct_q931: Setup\n");
if (setup->options & eSetup_UUIE_h245Address) {
ret = expect_h245(skb, ct, ctinfo, data, dataoff,
&setup->h245Address);
if (ret < 0)
return -1;
}
set_h225_addr = rcu_dereference(set_h225_addr_hook);
if ((setup->options & eSetup_UUIE_destCallSignalAddress) &&
(set_h225_addr) && ct->status & IPS_NAT_MASK &&
get_h225_addr(ct, *data, &setup->destCallSignalAddress,
&addr, &port) &&
memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) {
pr_debug("nf_ct_q931: set destCallSignalAddress %pI6:%hu->%pI6:%hu\n",
&addr, ntohs(port), &ct->tuplehash[!dir].tuple.src.u3,
ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port));
ret = set_h225_addr(skb, data, dataoff,
&setup->destCallSignalAddress,
&ct->tuplehash[!dir].tuple.src.u3,
ct->tuplehash[!dir].tuple.src.u.tcp.port);
if (ret < 0)
return -1;
}
if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) &&
(set_h225_addr) && ct->status & IPS_NAT_MASK &&
get_h225_addr(ct, *data, &setup->sourceCallSignalAddress,
&addr, &port) &&
memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) {
pr_debug("nf_ct_q931: set sourceCallSignalAddress %pI6:%hu->%pI6:%hu\n",
&addr, ntohs(port), &ct->tuplehash[!dir].tuple.dst.u3,
ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port));
ret = set_h225_addr(skb, data, dataoff,
&setup->sourceCallSignalAddress,
&ct->tuplehash[!dir].tuple.dst.u3,
ct->tuplehash[!dir].tuple.dst.u.tcp.port);
if (ret < 0)
return -1;
}
if (setup->options & eSetup_UUIE_fastStart) {
for (i = 0; i < setup->fastStart.count; i++) {
ret = process_olc(skb, ct, ctinfo, data, dataoff,
&setup->fastStart.item[i]);
if (ret < 0)
return -1;
}
}
return 0;
}
/****************************************************************************/
static int process_callproceeding(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
CallProceeding_UUIE *callproc)
{
int ret;
int i;
pr_debug("nf_ct_q931: CallProceeding\n");
if (callproc->options & eCallProceeding_UUIE_h245Address) {
ret = expect_h245(skb, ct, ctinfo, data, dataoff,
&callproc->h245Address);
if (ret < 0)
return -1;
}
if (callproc->options & eCallProceeding_UUIE_fastStart) {
for (i = 0; i < callproc->fastStart.count; i++) {
ret = process_olc(skb, ct, ctinfo, data, dataoff,
&callproc->fastStart.item[i]);
if (ret < 0)
return -1;
}
}
return 0;
}
/****************************************************************************/
static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
Connect_UUIE *connect)
{
int ret;
int i;
pr_debug("nf_ct_q931: Connect\n");
if (connect->options & eConnect_UUIE_h245Address) {
ret = expect_h245(skb, ct, ctinfo, data, dataoff,
&connect->h245Address);
if (ret < 0)
return -1;
}
if (connect->options & eConnect_UUIE_fastStart) {
for (i = 0; i < connect->fastStart.count; i++) {
ret = process_olc(skb, ct, ctinfo, data, dataoff,
&connect->fastStart.item[i]);
if (ret < 0)
return -1;
}
}
return 0;
}
/****************************************************************************/
static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
Alerting_UUIE *alert)
{
int ret;
int i;
pr_debug("nf_ct_q931: Alerting\n");
if (alert->options & eAlerting_UUIE_h245Address) {
ret = expect_h245(skb, ct, ctinfo, data, dataoff,
&alert->h245Address);
if (ret < 0)
return -1;
}
if (alert->options & eAlerting_UUIE_fastStart) {
for (i = 0; i < alert->fastStart.count; i++) {
ret = process_olc(skb, ct, ctinfo, data, dataoff,
&alert->fastStart.item[i]);
if (ret < 0)
return -1;
}
}
return 0;
}
/****************************************************************************/
static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
Facility_UUIE *facility)
{
int ret;
int i;
pr_debug("nf_ct_q931: Facility\n");
if (facility->reason.choice == eFacilityReason_callForwarded) {
if (facility->options & eFacility_UUIE_alternativeAddress)
return expect_callforwarding(skb, ct, ctinfo, data,
dataoff,
&facility->
alternativeAddress);
return 0;
}
if (facility->options & eFacility_UUIE_h245Address) {
ret = expect_h245(skb, ct, ctinfo, data, dataoff,
&facility->h245Address);
if (ret < 0)
return -1;
}
if (facility->options & eFacility_UUIE_fastStart) {
for (i = 0; i < facility->fastStart.count; i++) {
ret = process_olc(skb, ct, ctinfo, data, dataoff,
&facility->fastStart.item[i]);
if (ret < 0)
return -1;
}
}
return 0;
}
/****************************************************************************/
static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
Progress_UUIE *progress)
{
int ret;
int i;
pr_debug("nf_ct_q931: Progress\n");
if (progress->options & eProgress_UUIE_h245Address) {
ret = expect_h245(skb, ct, ctinfo, data, dataoff,
&progress->h245Address);
if (ret < 0)
return -1;
}
if (progress->options & eProgress_UUIE_fastStart) {
for (i = 0; i < progress->fastStart.count; i++) {
ret = process_olc(skb, ct, ctinfo, data, dataoff,
&progress->fastStart.item[i]);
if (ret < 0)
return -1;
}
}
return 0;
}
/****************************************************************************/
static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff, Q931 *q931)
{
H323_UU_PDU *pdu = &q931->UUIE.h323_uu_pdu;
int i;
int ret = 0;
switch (pdu->h323_message_body.choice) {
case eH323_UU_PDU_h323_message_body_setup:
ret = process_setup(skb, ct, ctinfo, data, dataoff,
&pdu->h323_message_body.setup);
break;
case eH323_UU_PDU_h323_message_body_callProceeding:
ret = process_callproceeding(skb, ct, ctinfo, data, dataoff,
&pdu->h323_message_body.
callProceeding);
break;
case eH323_UU_PDU_h323_message_body_connect:
ret = process_connect(skb, ct, ctinfo, data, dataoff,
&pdu->h323_message_body.connect);
break;
case eH323_UU_PDU_h323_message_body_alerting:
ret = process_alerting(skb, ct, ctinfo, data, dataoff,
&pdu->h323_message_body.alerting);
break;
case eH323_UU_PDU_h323_message_body_facility:
ret = process_facility(skb, ct, ctinfo, data, dataoff,
&pdu->h323_message_body.facility);
break;
case eH323_UU_PDU_h323_message_body_progress:
ret = process_progress(skb, ct, ctinfo, data, dataoff,
&pdu->h323_message_body.progress);
break;
default:
pr_debug("nf_ct_q931: Q.931 signal %d\n",
pdu->h323_message_body.choice);
break;
}
if (ret < 0)
return -1;
if (pdu->options & eH323_UU_PDU_h245Control) {
for (i = 0; i < pdu->h245Control.count; i++) {
ret = process_h245(skb, ct, ctinfo, data, dataoff,
&pdu->h245Control.item[i]);
if (ret < 0)
return -1;
}
}
return 0;
}
/****************************************************************************/
static int q931_help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
static Q931 q931;
unsigned char *data = NULL;
int datalen;
int dataoff;
int ret;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
return NF_ACCEPT;
}
pr_debug("nf_ct_q931: skblen = %u\n", skb->len);
spin_lock_bh(&nf_h323_lock);
/* Process each TPKT */
while (get_tpkt_data(skb, protoff, ct, ctinfo,
&data, &datalen, &dataoff)) {
pr_debug("nf_ct_q931: TPKT len=%d ", datalen);
nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
/* Decode Q.931 signal */
ret = DecodeQ931(data, datalen, &q931);
if (ret < 0) {
pr_debug("nf_ct_q931: decoding error: %s\n",
ret == H323_ERROR_BOUND ?
"out of bound" : "out of range");
/* We don't drop when decoding error */
break;
}
/* Process Q.931 signal */
if (process_q931(skb, ct, ctinfo, &data, dataoff, &q931) < 0)
goto drop;
}
spin_unlock_bh(&nf_h323_lock);
return NF_ACCEPT;
drop:
spin_unlock_bh(&nf_h323_lock);
if (net_ratelimit())
pr_info("nf_ct_q931: packet dropped\n");
return NF_DROP;
}
/****************************************************************************/
static const struct nf_conntrack_expect_policy q931_exp_policy = {
/* T.120 and H.245 */
.max_expected = H323_RTP_CHANNEL_MAX * 4 + 4,
.timeout = 240,
};
static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
{
.name = "Q.931",
.me = THIS_MODULE,
.tuple.src.l3num = AF_INET,
.tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT),
.tuple.dst.protonum = IPPROTO_TCP,
.help = q931_help,
.expect_policy = &q931_exp_policy,
},
{
.name = "Q.931",
.me = THIS_MODULE,
.tuple.src.l3num = AF_INET6,
.tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT),
.tuple.dst.protonum = IPPROTO_TCP,
.help = q931_help,
.expect_policy = &q931_exp_policy,
},
};
/****************************************************************************/
static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff,
int *datalen)
{
const struct udphdr *uh;
struct udphdr _uh;
int dataoff;
uh = skb_header_pointer(skb, protoff, sizeof(_uh), &_uh);
if (uh == NULL)
return NULL;
dataoff = protoff + sizeof(_uh);
if (dataoff >= skb->len)
return NULL;
*datalen = skb->len - dataoff;
return skb_header_pointer(skb, dataoff, *datalen, h323_buffer);
}
/****************************************************************************/
static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
union nf_inet_addr *addr,
__be16 port)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple tuple;
memset(&tuple.src.u3, 0, sizeof(tuple.src.u3));
tuple.src.u.tcp.port = 0;
memcpy(&tuple.dst.u3, addr, sizeof(tuple.dst.u3));
tuple.dst.u.tcp.port = port;
tuple.dst.protonum = IPPROTO_TCP;
exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple);
if (exp && exp->master == ct)
return exp;
return NULL;
}
/****************************************************************************/
static int set_expect_timeout(struct nf_conntrack_expect *exp,
unsigned timeout)
{
if (!exp || !del_timer(&exp->timeout))
return 0;
exp->timeout.expires = jiffies + timeout * HZ;
add_timer(&exp->timeout);
return 1;
}
/****************************************************************************/
static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data,
TransportAddress *taddr, int count)
{
struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
int i;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
typeof(nat_q931_hook) nat_q931;
/* Look for the first related address */
for (i = 0; i < count; i++) {
if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) &&
memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3,
sizeof(addr)) == 0 && port != 0)
break;
}
if (i >= count) /* Not found */
return 0;
/* Create expect for Q.931 */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
gkrouted_only ? /* only accept calls from GK? */
&ct->tuplehash[!dir].tuple.src.u3 : NULL,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_TCP, NULL, &port);
exp->helper = nf_conntrack_helper_q931;
exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple calls */
nat_q931 = rcu_dereference(nat_q931_hook);
if (nat_q931 && ct->status & IPS_NAT_MASK) { /* Need NAT */
ret = nat_q931(skb, ct, ctinfo, data, taddr, i, port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp) == 0) {
pr_debug("nf_ct_ras: expect Q.931 ");
nf_ct_dump_tuple(&exp->tuple);
/* Save port for looking up expect in processing RCF */
info->sig_port[dir] = port;
} else
ret = -1;
}
nf_ct_expect_put(exp);
return ret;
}
/****************************************************************************/
static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, GatekeeperRequest *grq)
{
typeof(set_ras_addr_hook) set_ras_addr;
pr_debug("nf_ct_ras: GRQ\n");
set_ras_addr = rcu_dereference(set_ras_addr_hook);
if (set_ras_addr && ct->status & IPS_NAT_MASK) /* NATed */
return set_ras_addr(skb, ct, ctinfo, data,
&grq->rasAddress, 1);
return 0;
}
/****************************************************************************/
static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, GatekeeperConfirm *gcf)
{
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
pr_debug("nf_ct_ras: GCF\n");
if (!get_h225_addr(ct, *data, &gcf->rasAddress, &addr, &port))
return 0;
/* Registration port is the same as discovery port */
if (!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
port == ct->tuplehash[dir].tuple.src.u.udp.port)
return 0;
/* Avoid RAS expectation loops. A GCF is never expected. */
if (test_bit(IPS_EXPECTED_BIT, &ct->status))
return 0;
/* Need new expect */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3, &addr,
IPPROTO_UDP, NULL, &port);
exp->helper = nf_conntrack_helper_ras;
if (nf_ct_expect_related(exp) == 0) {
pr_debug("nf_ct_ras: expect RAS ");
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
nf_ct_expect_put(exp);
return ret;
}
/****************************************************************************/
static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, RegistrationRequest *rrq)
{
struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
int ret;
typeof(set_ras_addr_hook) set_ras_addr;
pr_debug("nf_ct_ras: RRQ\n");
ret = expect_q931(skb, ct, ctinfo, data,
rrq->callSignalAddress.item,
rrq->callSignalAddress.count);
if (ret < 0)
return -1;
set_ras_addr = rcu_dereference(set_ras_addr_hook);
if (set_ras_addr && ct->status & IPS_NAT_MASK) {
ret = set_ras_addr(skb, ct, ctinfo, data,
rrq->rasAddress.item,
rrq->rasAddress.count);
if (ret < 0)
return -1;
}
if (rrq->options & eRegistrationRequest_timeToLive) {
pr_debug("nf_ct_ras: RRQ TTL = %u seconds\n", rrq->timeToLive);
info->timeout = rrq->timeToLive;
} else
info->timeout = default_rrq_ttl;
return 0;
}
/****************************************************************************/
static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, RegistrationConfirm *rcf)
{
struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
int dir = CTINFO2DIR(ctinfo);
int ret;
struct nf_conntrack_expect *exp;
typeof(set_sig_addr_hook) set_sig_addr;
pr_debug("nf_ct_ras: RCF\n");
set_sig_addr = rcu_dereference(set_sig_addr_hook);
if (set_sig_addr && ct->status & IPS_NAT_MASK) {
ret = set_sig_addr(skb, ct, ctinfo, data,
rcf->callSignalAddress.item,
rcf->callSignalAddress.count);
if (ret < 0)
return -1;
}
if (rcf->options & eRegistrationConfirm_timeToLive) {
pr_debug("nf_ct_ras: RCF TTL = %u seconds\n", rcf->timeToLive);
info->timeout = rcf->timeToLive;
}
if (info->timeout > 0) {
pr_debug("nf_ct_ras: set RAS connection timeout to "
"%u seconds\n", info->timeout);
nf_ct_refresh(ct, skb, info->timeout * HZ);
/* Set expect timeout */
spin_lock_bh(&nf_conntrack_lock);
exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3,
info->sig_port[!dir]);
if (exp) {
pr_debug("nf_ct_ras: set Q.931 expect "
"timeout to %u seconds for",
info->timeout);
nf_ct_dump_tuple(&exp->tuple);
set_expect_timeout(exp, info->timeout);
}
spin_unlock_bh(&nf_conntrack_lock);
}
return 0;
}
/****************************************************************************/
static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, UnregistrationRequest *urq)
{
struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
int dir = CTINFO2DIR(ctinfo);
int ret;
typeof(set_sig_addr_hook) set_sig_addr;
pr_debug("nf_ct_ras: URQ\n");
set_sig_addr = rcu_dereference(set_sig_addr_hook);
if (set_sig_addr && ct->status & IPS_NAT_MASK) {
ret = set_sig_addr(skb, ct, ctinfo, data,
urq->callSignalAddress.item,
urq->callSignalAddress.count);
if (ret < 0)
return -1;
}
/* Clear old expect */
nf_ct_remove_expectations(ct);
info->sig_port[dir] = 0;
info->sig_port[!dir] = 0;
/* Give it 30 seconds for UCF or URJ */
nf_ct_refresh(ct, skb, 30 * HZ);
return 0;
}
/****************************************************************************/
static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, AdmissionRequest *arq)
{
const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
int dir = CTINFO2DIR(ctinfo);
__be16 port;
union nf_inet_addr addr;
typeof(set_h225_addr_hook) set_h225_addr;
pr_debug("nf_ct_ras: ARQ\n");
set_h225_addr = rcu_dereference(set_h225_addr_hook);
if ((arq->options & eAdmissionRequest_destCallSignalAddress) &&
get_h225_addr(ct, *data, &arq->destCallSignalAddress,
&addr, &port) &&
!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
port == info->sig_port[dir] &&
set_h225_addr && ct->status & IPS_NAT_MASK) {
/* Answering ARQ */
return set_h225_addr(skb, data, 0,
&arq->destCallSignalAddress,
&ct->tuplehash[!dir].tuple.dst.u3,
info->sig_port[!dir]);
}
if ((arq->options & eAdmissionRequest_srcCallSignalAddress) &&
get_h225_addr(ct, *data, &arq->srcCallSignalAddress,
&addr, &port) &&
!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
set_h225_addr && ct->status & IPS_NAT_MASK) {
/* Calling ARQ */
return set_h225_addr(skb, data, 0,
&arq->srcCallSignalAddress,
&ct->tuplehash[!dir].tuple.dst.u3,
port);
}
return 0;
}
/****************************************************************************/
static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, AdmissionConfirm *acf)
{
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
typeof(set_sig_addr_hook) set_sig_addr;
pr_debug("nf_ct_ras: ACF\n");
if (!get_h225_addr(ct, *data, &acf->destCallSignalAddress,
&addr, &port))
return 0;
if (!memcmp(&addr, &ct->tuplehash[dir].tuple.dst.u3, sizeof(addr))) {
/* Answering ACF */
set_sig_addr = rcu_dereference(set_sig_addr_hook);
if (set_sig_addr && ct->status & IPS_NAT_MASK)
return set_sig_addr(skb, ct, ctinfo, data,
&acf->destCallSignalAddress, 1);
return 0;
}
/* Need new expect */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3, &addr,
IPPROTO_TCP, NULL, &port);
exp->flags = NF_CT_EXPECT_PERMANENT;
exp->helper = nf_conntrack_helper_q931;
if (nf_ct_expect_related(exp) == 0) {
pr_debug("nf_ct_ras: expect Q.931 ");
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
nf_ct_expect_put(exp);
return ret;
}
/****************************************************************************/
static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, LocationRequest *lrq)
{
typeof(set_ras_addr_hook) set_ras_addr;
pr_debug("nf_ct_ras: LRQ\n");
set_ras_addr = rcu_dereference(set_ras_addr_hook);
if (set_ras_addr && ct->status & IPS_NAT_MASK)
return set_ras_addr(skb, ct, ctinfo, data,
&lrq->replyAddress, 1);
return 0;
}
/****************************************************************************/
static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, LocationConfirm *lcf)
{
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
pr_debug("nf_ct_ras: LCF\n");
if (!get_h225_addr(ct, *data, &lcf->callSignalAddress,
&addr, &port))
return 0;
/* Need new expect for call signal */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3, &addr,
IPPROTO_TCP, NULL, &port);
exp->flags = NF_CT_EXPECT_PERMANENT;
exp->helper = nf_conntrack_helper_q931;
if (nf_ct_expect_related(exp) == 0) {
pr_debug("nf_ct_ras: expect Q.931 ");
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
nf_ct_expect_put(exp);
/* Ignore rasAddress */
return ret;
}
/****************************************************************************/
static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, InfoRequestResponse *irr)
{
int ret;
typeof(set_ras_addr_hook) set_ras_addr;
typeof(set_sig_addr_hook) set_sig_addr;
pr_debug("nf_ct_ras: IRR\n");
set_ras_addr = rcu_dereference(set_ras_addr_hook);
if (set_ras_addr && ct->status & IPS_NAT_MASK) {
ret = set_ras_addr(skb, ct, ctinfo, data,
&irr->rasAddress, 1);
if (ret < 0)
return -1;
}
set_sig_addr = rcu_dereference(set_sig_addr_hook);
if (set_sig_addr && ct->status & IPS_NAT_MASK) {
ret = set_sig_addr(skb, ct, ctinfo, data,
irr->callSignalAddress.item,
irr->callSignalAddress.count);
if (ret < 0)
return -1;
}
return 0;
}
/****************************************************************************/
static int process_ras(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, RasMessage *ras)
{
switch (ras->choice) {
case eRasMessage_gatekeeperRequest:
return process_grq(skb, ct, ctinfo, data,
&ras->gatekeeperRequest);
case eRasMessage_gatekeeperConfirm:
return process_gcf(skb, ct, ctinfo, data,
&ras->gatekeeperConfirm);
case eRasMessage_registrationRequest:
return process_rrq(skb, ct, ctinfo, data,
&ras->registrationRequest);
case eRasMessage_registrationConfirm:
return process_rcf(skb, ct, ctinfo, data,
&ras->registrationConfirm);
case eRasMessage_unregistrationRequest:
return process_urq(skb, ct, ctinfo, data,
&ras->unregistrationRequest);
case eRasMessage_admissionRequest:
return process_arq(skb, ct, ctinfo, data,
&ras->admissionRequest);
case eRasMessage_admissionConfirm:
return process_acf(skb, ct, ctinfo, data,
&ras->admissionConfirm);
case eRasMessage_locationRequest:
return process_lrq(skb, ct, ctinfo, data,
&ras->locationRequest);
case eRasMessage_locationConfirm:
return process_lcf(skb, ct, ctinfo, data,
&ras->locationConfirm);
case eRasMessage_infoRequestResponse:
return process_irr(skb, ct, ctinfo, data,
&ras->infoRequestResponse);
default:
pr_debug("nf_ct_ras: RAS message %d\n", ras->choice);
break;
}
return 0;
}
/****************************************************************************/
static int ras_help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
static RasMessage ras;
unsigned char *data;
int datalen = 0;
int ret;
pr_debug("nf_ct_ras: skblen = %u\n", skb->len);
spin_lock_bh(&nf_h323_lock);
/* Get UDP data */
data = get_udp_data(skb, protoff, &datalen);
if (data == NULL)
goto accept;
pr_debug("nf_ct_ras: RAS message len=%d ", datalen);
nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
/* Decode RAS message */
ret = DecodeRasMessage(data, datalen, &ras);
if (ret < 0) {
pr_debug("nf_ct_ras: decoding error: %s\n",
ret == H323_ERROR_BOUND ?
"out of bound" : "out of range");
goto accept;
}
/* Process RAS message */
if (process_ras(skb, ct, ctinfo, &data, &ras) < 0)
goto drop;
accept:
spin_unlock_bh(&nf_h323_lock);
return NF_ACCEPT;
drop:
spin_unlock_bh(&nf_h323_lock);
if (net_ratelimit())
pr_info("nf_ct_ras: packet dropped\n");
return NF_DROP;
}
/****************************************************************************/
static const struct nf_conntrack_expect_policy ras_exp_policy = {
.max_expected = 32,
.timeout = 240,
};
static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
{
.name = "RAS",
.me = THIS_MODULE,
.tuple.src.l3num = AF_INET,
.tuple.src.u.udp.port = cpu_to_be16(RAS_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
.help = ras_help,
.expect_policy = &ras_exp_policy,
},
{
.name = "RAS",
.me = THIS_MODULE,
.tuple.src.l3num = AF_INET6,
.tuple.src.u.udp.port = cpu_to_be16(RAS_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
.help = ras_help,
.expect_policy = &ras_exp_policy,
},
};
/****************************************************************************/
static void __exit nf_conntrack_h323_fini(void)
{
nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[1]);
nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]);
nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]);
nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]);
nf_conntrack_helper_unregister(&nf_conntrack_helper_h245);
kfree(h323_buffer);
pr_debug("nf_ct_h323: fini\n");
}
/****************************************************************************/
static int __init nf_conntrack_h323_init(void)
{
int ret;
h323_buffer = kmalloc(65536, GFP_KERNEL);
if (!h323_buffer)
return -ENOMEM;
ret = nf_conntrack_helper_register(&nf_conntrack_helper_h245);
if (ret < 0)
goto err1;
ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[0]);
if (ret < 0)
goto err2;
ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[1]);
if (ret < 0)
goto err3;
ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[0]);
if (ret < 0)
goto err4;
ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[1]);
if (ret < 0)
goto err5;
pr_debug("nf_ct_h323: init success\n");
return 0;
err5:
nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]);
err4:
nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]);
err3:
nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]);
err2:
nf_conntrack_helper_unregister(&nf_conntrack_helper_h245);
err1:
kfree(h323_buffer);
return ret;
}
/****************************************************************************/
module_init(nf_conntrack_h323_init);
module_exit(nf_conntrack_h323_fini);
EXPORT_SYMBOL_GPL(get_h225_addr);
EXPORT_SYMBOL_GPL(set_h245_addr_hook);
EXPORT_SYMBOL_GPL(set_h225_addr_hook);
EXPORT_SYMBOL_GPL(set_sig_addr_hook);
EXPORT_SYMBOL_GPL(set_ras_addr_hook);
EXPORT_SYMBOL_GPL(nat_rtp_rtcp_hook);
EXPORT_SYMBOL_GPL(nat_t120_hook);
EXPORT_SYMBOL_GPL(nat_h245_hook);
EXPORT_SYMBOL_GPL(nat_callforwarding_hook);
EXPORT_SYMBOL_GPL(nat_q931_hook);
MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>");
MODULE_DESCRIPTION("H.323 connection tracking helper");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ip_conntrack_h323");
MODULE_ALIAS_NFCT_HELPER("h323");
| gpl-2.0 |
olegsvs/android_kernel_ark_benefit_m7_mm | sound/soc/blackfin/bf5xx-i2s.c | 904 | 9964 | /*
* File: sound/soc/blackfin/bf5xx-i2s.c
* Author: Cliff Cai <Cliff.Cai@analog.com>
*
* Created: Tue June 06 2008
* Description: Blackfin I2S CPU DAI driver
*
* Modified:
* Copyright 2008 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <asm/irq.h>
#include <asm/portmux.h>
#include <linux/mutex.h>
#include <linux/gpio.h>
#include "bf5xx-sport.h"
#include "bf5xx-i2s-pcm.h"
struct bf5xx_i2s_port {
u16 tcr1;
u16 rcr1;
u16 tcr2;
u16 rcr2;
int configured;
unsigned int slots;
unsigned int tx_mask;
unsigned int rx_mask;
struct bf5xx_i2s_pcm_data tx_dma_data;
struct bf5xx_i2s_pcm_data rx_dma_data;
};
static int bf5xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(cpu_dai);
struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data;
int ret = 0;
/* interface format:support I2S,slave mode */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
bf5xx_i2s->tcr1 |= TFSR | TCKFE;
bf5xx_i2s->rcr1 |= RFSR | RCKFE;
bf5xx_i2s->tcr2 |= TSFSE;
bf5xx_i2s->rcr2 |= RSFSE;
break;
case SND_SOC_DAIFMT_DSP_A:
bf5xx_i2s->tcr1 |= TFSR;
bf5xx_i2s->rcr1 |= RFSR;
break;
case SND_SOC_DAIFMT_LEFT_J:
ret = -EINVAL;
break;
default:
dev_err(cpu_dai->dev, "%s: Unknown DAI format type\n",
__func__);
ret = -EINVAL;
break;
}
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
break;
case SND_SOC_DAIFMT_CBS_CFS:
case SND_SOC_DAIFMT_CBM_CFS:
case SND_SOC_DAIFMT_CBS_CFM:
ret = -EINVAL;
break;
default:
dev_err(cpu_dai->dev, "%s: Unknown DAI master type\n",
__func__);
ret = -EINVAL;
break;
}
return ret;
}
static int bf5xx_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai);
struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data;
int ret = 0;
bf5xx_i2s->tcr2 &= ~0x1f;
bf5xx_i2s->rcr2 &= ~0x1f;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
bf5xx_i2s->tcr2 |= 7;
bf5xx_i2s->rcr2 |= 7;
sport_handle->wdsize = 1;
break;
case SNDRV_PCM_FORMAT_S16_LE:
bf5xx_i2s->tcr2 |= 15;
bf5xx_i2s->rcr2 |= 15;
sport_handle->wdsize = 2;
break;
case SNDRV_PCM_FORMAT_S24_LE:
bf5xx_i2s->tcr2 |= 23;
bf5xx_i2s->rcr2 |= 23;
sport_handle->wdsize = 3;
break;
case SNDRV_PCM_FORMAT_S32_LE:
bf5xx_i2s->tcr2 |= 31;
bf5xx_i2s->rcr2 |= 31;
sport_handle->wdsize = 4;
break;
}
if (!bf5xx_i2s->configured) {
/*
* TX and RX are not independent,they are enabled at the
* same time, even if only one side is running. So, we
* need to configure both of them at the time when the first
* stream is opened.
*
* CPU DAI:slave mode.
*/
bf5xx_i2s->configured = 1;
ret = sport_config_rx(sport_handle, bf5xx_i2s->rcr1,
bf5xx_i2s->rcr2, 0, 0);
if (ret) {
dev_err(dai->dev, "SPORT is busy!\n");
return -EBUSY;
}
ret = sport_config_tx(sport_handle, bf5xx_i2s->tcr1,
bf5xx_i2s->tcr2, 0, 0);
if (ret) {
dev_err(dai->dev, "SPORT is busy!\n");
return -EBUSY;
}
}
return 0;
}
static void bf5xx_i2s_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai);
struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data;
dev_dbg(dai->dev, "%s enter\n", __func__);
/* No active stream, SPORT is allowed to be configured again. */
if (!dai->active)
bf5xx_i2s->configured = 0;
}
static int bf5xx_i2s_set_channel_map(struct snd_soc_dai *dai,
unsigned int tx_num, unsigned int *tx_slot,
unsigned int rx_num, unsigned int *rx_slot)
{
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai);
struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data;
unsigned int tx_mapped = 0, rx_mapped = 0;
unsigned int slot;
int i;
if ((tx_num > BFIN_TDM_DAI_MAX_SLOTS) ||
(rx_num > BFIN_TDM_DAI_MAX_SLOTS))
return -EINVAL;
for (i = 0; i < tx_num; i++) {
slot = tx_slot[i];
if ((slot < BFIN_TDM_DAI_MAX_SLOTS) &&
(!(tx_mapped & (1 << slot)))) {
bf5xx_i2s->tx_dma_data.map[i] = slot;
tx_mapped |= 1 << slot;
} else
return -EINVAL;
}
for (i = 0; i < rx_num; i++) {
slot = rx_slot[i];
if ((slot < BFIN_TDM_DAI_MAX_SLOTS) &&
(!(rx_mapped & (1 << slot)))) {
bf5xx_i2s->rx_dma_data.map[i] = slot;
rx_mapped |= 1 << slot;
} else
return -EINVAL;
}
return 0;
}
static int bf5xx_i2s_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
unsigned int rx_mask, int slots, int width)
{
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai);
struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data;
if (slots % 8 != 0 || slots > 8)
return -EINVAL;
if (width != 32)
return -EINVAL;
bf5xx_i2s->slots = slots;
bf5xx_i2s->tx_mask = tx_mask;
bf5xx_i2s->rx_mask = rx_mask;
bf5xx_i2s->tx_dma_data.tdm_mode = slots != 0;
bf5xx_i2s->rx_dma_data.tdm_mode = slots != 0;
return sport_set_multichannel(sport_handle, slots, tx_mask, rx_mask, 0);
}
#ifdef CONFIG_PM
static int bf5xx_i2s_suspend(struct snd_soc_dai *dai)
{
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai);
dev_dbg(dai->dev, "%s : sport %d\n", __func__, dai->id);
if (dai->capture_active)
sport_rx_stop(sport_handle);
if (dai->playback_active)
sport_tx_stop(sport_handle);
return 0;
}
static int bf5xx_i2s_resume(struct snd_soc_dai *dai)
{
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai);
struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data;
int ret;
dev_dbg(dai->dev, "%s : sport %d\n", __func__, dai->id);
ret = sport_config_rx(sport_handle, bf5xx_i2s->rcr1,
bf5xx_i2s->rcr2, 0, 0);
if (ret) {
dev_err(dai->dev, "SPORT is busy!\n");
return -EBUSY;
}
ret = sport_config_tx(sport_handle, bf5xx_i2s->tcr1,
bf5xx_i2s->tcr2, 0, 0);
if (ret) {
dev_err(dai->dev, "SPORT is busy!\n");
return -EBUSY;
}
return sport_set_multichannel(sport_handle, bf5xx_i2s->slots,
bf5xx_i2s->tx_mask, bf5xx_i2s->rx_mask, 0);
}
#else
#define bf5xx_i2s_suspend NULL
#define bf5xx_i2s_resume NULL
#endif
static int bf5xx_i2s_dai_probe(struct snd_soc_dai *dai)
{
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai);
struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data;
unsigned int i;
for (i = 0; i < BFIN_TDM_DAI_MAX_SLOTS; i++) {
bf5xx_i2s->tx_dma_data.map[i] = i;
bf5xx_i2s->rx_dma_data.map[i] = i;
}
dai->playback_dma_data = &bf5xx_i2s->tx_dma_data;
dai->capture_dma_data = &bf5xx_i2s->rx_dma_data;
return 0;
}
#define BF5XX_I2S_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \
SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
SNDRV_PCM_RATE_96000)
#define BF5XX_I2S_FORMATS \
(SNDRV_PCM_FMTBIT_S8 | \
SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops bf5xx_i2s_dai_ops = {
.shutdown = bf5xx_i2s_shutdown,
.hw_params = bf5xx_i2s_hw_params,
.set_fmt = bf5xx_i2s_set_dai_fmt,
.set_tdm_slot = bf5xx_i2s_set_tdm_slot,
.set_channel_map = bf5xx_i2s_set_channel_map,
};
static struct snd_soc_dai_driver bf5xx_i2s_dai = {
.probe = bf5xx_i2s_dai_probe,
.suspend = bf5xx_i2s_suspend,
.resume = bf5xx_i2s_resume,
.playback = {
.channels_min = 2,
.channels_max = 8,
.rates = BF5XX_I2S_RATES,
.formats = BF5XX_I2S_FORMATS,},
.capture = {
.channels_min = 2,
.channels_max = 8,
.rates = BF5XX_I2S_RATES,
.formats = BF5XX_I2S_FORMATS,},
.ops = &bf5xx_i2s_dai_ops,
};
static const struct snd_soc_component_driver bf5xx_i2s_component = {
.name = "bf5xx-i2s",
};
static int bf5xx_i2s_probe(struct platform_device *pdev)
{
struct sport_device *sport_handle;
int ret;
/* configure SPORT for I2S */
sport_handle = sport_init(pdev, 4, 8 * sizeof(u32),
sizeof(struct bf5xx_i2s_port));
if (!sport_handle)
return -ENODEV;
/* register with the ASoC layers */
ret = snd_soc_register_component(&pdev->dev, &bf5xx_i2s_component,
&bf5xx_i2s_dai, 1);
if (ret) {
dev_err(&pdev->dev, "Failed to register DAI: %d\n", ret);
sport_done(sport_handle);
return ret;
}
return 0;
}
static int bf5xx_i2s_remove(struct platform_device *pdev)
{
struct sport_device *sport_handle = platform_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s enter\n", __func__);
snd_soc_unregister_component(&pdev->dev);
sport_done(sport_handle);
return 0;
}
static struct platform_driver bfin_i2s_driver = {
.probe = bf5xx_i2s_probe,
.remove = bf5xx_i2s_remove,
.driver = {
.name = "bfin-i2s",
.owner = THIS_MODULE,
},
};
module_platform_driver(bfin_i2s_driver);
/* Module information */
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("I2S driver for ADI Blackfin");
MODULE_LICENSE("GPL");
| gpl-2.0 |
AdrianHuang/rt-thread-for-vmm | components/external/jpeg/example.c | 904 | 17091 | /*
* example.c
*
* This file illustrates how to use the IJG code as a subroutine library
* to read or write JPEG image files. You should look at this code in
* conjunction with the documentation file libjpeg.txt.
*
* This code will not do anything useful as-is, but it may be helpful as a
* skeleton for constructing routines that call the JPEG library.
*
* We present these routines in the same coding style used in the JPEG code
* (ANSI function definitions, etc); but you are of course free to code your
* routines in a different style if you prefer.
*/
#include <stdio.h>
/*
* Include file for users of JPEG library.
* You will need to have included system headers that define at least
* the typedefs FILE and size_t before you can include jpeglib.h.
* (stdio.h is sufficient on ANSI-conforming systems.)
* You may also wish to include "jerror.h".
*/
#include "jpeglib.h"
/*
* <setjmp.h> is used for the optional error recovery mechanism shown in
* the second part of the example.
*/
#include <setjmp.h>
/******************** JPEG COMPRESSION SAMPLE INTERFACE *******************/
/* This half of the example shows how to feed data into the JPEG compressor.
* We present a minimal version that does not worry about refinements such
* as error recovery (the JPEG code will just exit() if it gets an error).
*/
/*
* IMAGE DATA FORMATS:
*
* The standard input image format is a rectangular array of pixels, with
* each pixel having the same number of "component" values (color channels).
* Each pixel row is an array of JSAMPLEs (which typically are unsigned chars).
* If you are working with color data, then the color values for each pixel
* must be adjacent in the row; for example, R,G,B,R,G,B,R,G,B,... for 24-bit
* RGB color.
*
* For this example, we'll assume that this data structure matches the way
* our application has stored the image in memory, so we can just pass a
* pointer to our image buffer. In particular, let's say that the image is
* RGB color and is described by:
*/
extern JSAMPLE * image_buffer; /* Points to large array of R,G,B-order data */
extern int image_height; /* Number of rows in image */
extern int image_width; /* Number of columns in image */
/*
* Sample routine for JPEG compression. We assume that the target file name
* and a compression quality factor are passed in.
*/
GLOBAL(void)
write_JPEG_file (char * filename, int quality)
{
/* This struct contains the JPEG compression parameters and pointers to
* working space (which is allocated as needed by the JPEG library).
* It is possible to have several such structures, representing multiple
* compression/decompression processes, in existence at once. We refer
* to any one struct (and its associated working data) as a "JPEG object".
*/
struct jpeg_compress_struct cinfo;
/* This struct represents a JPEG error handler. It is declared separately
* because applications often want to supply a specialized error handler
* (see the second half of this file for an example). But here we just
* take the easy way out and use the standard error handler, which will
* print a message on stderr and call exit() if compression fails.
* Note that this struct must live as long as the main JPEG parameter
* struct, to avoid dangling-pointer problems.
*/
struct jpeg_error_mgr jerr;
/* More stuff */
FILE * outfile; /* target file */
JSAMPROW row_pointer[1]; /* pointer to JSAMPLE row[s] */
int row_stride; /* physical row width in image buffer */
/* Step 1: allocate and initialize JPEG compression object */
/* We have to set up the error handler first, in case the initialization
* step fails. (Unlikely, but it could happen if you are out of memory.)
* This routine fills in the contents of struct jerr, and returns jerr's
* address which we place into the link field in cinfo.
*/
cinfo.err = jpeg_std_error(&jerr);
/* Now we can initialize the JPEG compression object. */
jpeg_create_compress(&cinfo);
/* Step 2: specify data destination (eg, a file) */
/* Note: steps 2 and 3 can be done in either order. */
/* Here we use the library-supplied code to send compressed data to a
* stdio stream. You can also write your own code to do something else.
* VERY IMPORTANT: use "b" option to fopen() if you are on a machine that
* requires it in order to write binary files.
*/
if ((outfile = fopen(filename, "wb")) == NULL) {
fprintf(stderr, "can't open %s\n", filename);
exit(1);
}
jpeg_stdio_dest(&cinfo, outfile);
/* Step 3: set parameters for compression */
/* First we supply a description of the input image.
* Four fields of the cinfo struct must be filled in:
*/
cinfo.image_width = image_width; /* image width and height, in pixels */
cinfo.image_height = image_height;
cinfo.input_components = 3; /* # of color components per pixel */
cinfo.in_color_space = JCS_RGB; /* colorspace of input image */
/* Now use the library's routine to set default compression parameters.
* (You must set at least cinfo.in_color_space before calling this,
* since the defaults depend on the source color space.)
*/
jpeg_set_defaults(&cinfo);
/* Now you can set any non-default parameters you wish to.
* Here we just illustrate the use of quality (quantization table) scaling:
*/
jpeg_set_quality(&cinfo, quality, TRUE /* limit to baseline-JPEG values */);
/* Step 4: Start compressor */
/* TRUE ensures that we will write a complete interchange-JPEG file.
* Pass TRUE unless you are very sure of what you're doing.
*/
jpeg_start_compress(&cinfo, TRUE);
/* Step 5: while (scan lines remain to be written) */
/* jpeg_write_scanlines(...); */
/* Here we use the library's state variable cinfo.next_scanline as the
* loop counter, so that we don't have to keep track ourselves.
* To keep things simple, we pass one scanline per call; you can pass
* more if you wish, though.
*/
row_stride = image_width * 3; /* JSAMPLEs per row in image_buffer */
while (cinfo.next_scanline < cinfo.image_height) {
/* jpeg_write_scanlines expects an array of pointers to scanlines.
* Here the array is only one element long, but you could pass
* more than one scanline at a time if that's more convenient.
*/
row_pointer[0] = & image_buffer[cinfo.next_scanline * row_stride];
(void) jpeg_write_scanlines(&cinfo, row_pointer, 1);
}
/* Step 6: Finish compression */
jpeg_finish_compress(&cinfo);
/* After finish_compress, we can close the output file. */
fclose(outfile);
/* Step 7: release JPEG compression object */
/* This is an important step since it will release a good deal of memory. */
jpeg_destroy_compress(&cinfo);
/* And we're done! */
}
/*
* SOME FINE POINTS:
*
* In the above loop, we ignored the return value of jpeg_write_scanlines,
* which is the number of scanlines actually written. We could get away
* with this because we were only relying on the value of cinfo.next_scanline,
* which will be incremented correctly. If you maintain additional loop
* variables then you should be careful to increment them properly.
* Actually, for output to a stdio stream you needn't worry, because
* then jpeg_write_scanlines will write all the lines passed (or else exit
* with a fatal error). Partial writes can only occur if you use a data
* destination module that can demand suspension of the compressor.
* (If you don't know what that's for, you don't need it.)
*
* If the compressor requires full-image buffers (for entropy-coding
* optimization or a multi-scan JPEG file), it will create temporary
* files for anything that doesn't fit within the maximum-memory setting.
* (Note that temp files are NOT needed if you use the default parameters.)
* On some systems you may need to set up a signal handler to ensure that
* temporary files are deleted if the program is interrupted. See libjpeg.txt.
*
* Scanlines MUST be supplied in top-to-bottom order if you want your JPEG
* files to be compatible with everyone else's. If you cannot readily read
* your data in that order, you'll need an intermediate array to hold the
* image. See rdtarga.c or rdbmp.c for examples of handling bottom-to-top
* source data using the JPEG code's internal virtual-array mechanisms.
*/
/******************** JPEG DECOMPRESSION SAMPLE INTERFACE *******************/
/* This half of the example shows how to read data from the JPEG decompressor.
* It's a bit more refined than the above, in that we show:
* (a) how to modify the JPEG library's standard error-reporting behavior;
* (b) how to allocate workspace using the library's memory manager.
*
* Just to make this example a little different from the first one, we'll
* assume that we do not intend to put the whole image into an in-memory
* buffer, but to send it line-by-line someplace else. We need a one-
* scanline-high JSAMPLE array as a work buffer, and we will let the JPEG
* memory manager allocate it for us. This approach is actually quite useful
* because we don't need to remember to deallocate the buffer separately: it
* will go away automatically when the JPEG object is cleaned up.
*/
/*
* ERROR HANDLING:
*
* The JPEG library's standard error handler (jerror.c) is divided into
* several "methods" which you can override individually. This lets you
* adjust the behavior without duplicating a lot of code, which you might
* have to update with each future release.
*
* Our example here shows how to override the "error_exit" method so that
* control is returned to the library's caller when a fatal error occurs,
* rather than calling exit() as the standard error_exit method does.
*
* We use C's setjmp/longjmp facility to return control. This means that the
* routine which calls the JPEG library must first execute a setjmp() call to
* establish the return point. We want the replacement error_exit to do a
* longjmp(). But we need to make the setjmp buffer accessible to the
* error_exit routine. To do this, we make a private extension of the
* standard JPEG error handler object. (If we were using C++, we'd say we
* were making a subclass of the regular error handler.)
*
* Here's the extended error handler struct:
*/
struct my_error_mgr {
struct jpeg_error_mgr pub; /* "public" fields */
jmp_buf setjmp_buffer; /* for return to caller */
};
typedef struct my_error_mgr * my_error_ptr;
/*
* Here's the routine that will replace the standard error_exit method:
*/
METHODDEF(void)
my_error_exit (j_common_ptr cinfo)
{
/* cinfo->err really points to a my_error_mgr struct, so coerce pointer */
my_error_ptr myerr = (my_error_ptr) cinfo->err;
/* Always display the message. */
/* We could postpone this until after returning, if we chose. */
(*cinfo->err->output_message) (cinfo);
/* Return control to the setjmp point */
longjmp(myerr->setjmp_buffer, 1);
}
/*
* Sample routine for JPEG decompression. We assume that the source file name
* is passed in. We want to return 1 on success, 0 on error.
*/
GLOBAL(int)
read_JPEG_file (char * filename)
{
/* This struct contains the JPEG decompression parameters and pointers to
* working space (which is allocated as needed by the JPEG library).
*/
struct jpeg_decompress_struct cinfo;
/* We use our private extension JPEG error handler.
* Note that this struct must live as long as the main JPEG parameter
* struct, to avoid dangling-pointer problems.
*/
struct my_error_mgr jerr;
/* More stuff */
FILE * infile; /* source file */
JSAMPARRAY buffer; /* Output row buffer */
int row_stride; /* physical row width in output buffer */
/* In this example we want to open the input file before doing anything else,
* so that the setjmp() error recovery below can assume the file is open.
* VERY IMPORTANT: use "b" option to fopen() if you are on a machine that
* requires it in order to read binary files.
*/
if ((infile = fopen(filename, "rb")) == NULL) {
fprintf(stderr, "can't open %s\n", filename);
return 0;
}
/* Step 1: allocate and initialize JPEG decompression object */
/* We set up the normal JPEG error routines, then override error_exit. */
cinfo.err = jpeg_std_error(&jerr.pub);
jerr.pub.error_exit = my_error_exit;
/* Establish the setjmp return context for my_error_exit to use. */
if (setjmp(jerr.setjmp_buffer)) {
/* If we get here, the JPEG code has signaled an error.
* We need to clean up the JPEG object, close the input file, and return.
*/
jpeg_destroy_decompress(&cinfo);
fclose(infile);
return 0;
}
/* Now we can initialize the JPEG decompression object. */
jpeg_create_decompress(&cinfo);
/* Step 2: specify data source (eg, a file) */
jpeg_stdio_src(&cinfo, infile);
/* Step 3: read file parameters with jpeg_read_header() */
(void) jpeg_read_header(&cinfo, TRUE);
/* We can ignore the return value from jpeg_read_header since
* (a) suspension is not possible with the stdio data source, and
* (b) we passed TRUE to reject a tables-only JPEG file as an error.
* See libjpeg.txt for more info.
*/
/* Step 4: set parameters for decompression */
/* In this example, we don't need to change any of the defaults set by
* jpeg_read_header(), so we do nothing here.
*/
/* Step 5: Start decompressor */
(void) jpeg_start_decompress(&cinfo);
/* We can ignore the return value since suspension is not possible
* with the stdio data source.
*/
/* We may need to do some setup of our own at this point before reading
* the data. After jpeg_start_decompress() we have the correct scaled
* output image dimensions available, as well as the output colormap
* if we asked for color quantization.
* In this example, we need to make an output work buffer of the right size.
*/
/* JSAMPLEs per row in output buffer */
row_stride = cinfo.output_width * cinfo.output_components;
/* Make a one-row-high sample array that will go away when done with image */
buffer = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, row_stride, 1);
/* Step 6: while (scan lines remain to be read) */
/* jpeg_read_scanlines(...); */
/* Here we use the library's state variable cinfo.output_scanline as the
* loop counter, so that we don't have to keep track ourselves.
*/
while (cinfo.output_scanline < cinfo.output_height) {
/* jpeg_read_scanlines expects an array of pointers to scanlines.
* Here the array is only one element long, but you could ask for
* more than one scanline at a time if that's more convenient.
*/
(void) jpeg_read_scanlines(&cinfo, buffer, 1);
/* Assume put_scanline_someplace wants a pointer and sample count. */
put_scanline_someplace(buffer[0], row_stride);
}
/* Step 7: Finish decompression */
(void) jpeg_finish_decompress(&cinfo);
/* We can ignore the return value since suspension is not possible
* with the stdio data source.
*/
/* Step 8: Release JPEG decompression object */
/* This is an important step since it will release a good deal of memory. */
jpeg_destroy_decompress(&cinfo);
/* After finish_decompress, we can close the input file.
* Here we postpone it until after no more JPEG errors are possible,
* so as to simplify the setjmp error logic above. (Actually, I don't
* think that jpeg_destroy can do an error exit, but why assume anything...)
*/
fclose(infile);
/* At this point you may want to check to see whether any corrupt-data
* warnings occurred (test whether jerr.pub.num_warnings is nonzero).
*/
/* And we're done! */
return 1;
}
/*
* SOME FINE POINTS:
*
* In the above code, we ignored the return value of jpeg_read_scanlines,
* which is the number of scanlines actually read. We could get away with
* this because we asked for only one line at a time and we weren't using
* a suspending data source. See libjpeg.txt for more info.
*
* We cheated a bit by calling alloc_sarray() after jpeg_start_decompress();
* we should have done it beforehand to ensure that the space would be
* counted against the JPEG max_memory setting. In some systems the above
* code would risk an out-of-memory error. However, in general we don't
* know the output image dimensions before jpeg_start_decompress(), unless we
* call jpeg_calc_output_dimensions(). See libjpeg.txt for more about this.
*
* Scanlines are returned in the same order as they appear in the JPEG file,
* which is standardly top-to-bottom. If you must emit data bottom-to-top,
* you can use one of the virtual arrays provided by the JPEG memory manager
* to invert the data. See wrbmp.c for an example.
*
* As with compression, some operating modes may require temporary files.
* On some systems you may need to set up a signal handler to ensure that
* temporary files are deleted if the program is interrupted. See libjpeg.txt.
*/
| gpl-2.0 |
Kuzma30/NT34K | drivers/net/wan/ixp4xx_hss.c | 1672 | 37811 | /*
* Intel IXP4xx HSS (synchronous serial port) driver for Linux
*
* Copyright (C) 2007-2008 Krzysztof Hałasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/cdev.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/fs.h>
#include <linux/hdlc.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <mach/npe.h>
#include <mach/qmgr.h>
#define DEBUG_DESC 0
#define DEBUG_RX 0
#define DEBUG_TX 0
#define DEBUG_PKT_BYTES 0
#define DEBUG_CLOSE 0
#define DRV_NAME "ixp4xx_hss"
#define PKT_EXTRA_FLAGS 0 /* orig 1 */
#define PKT_NUM_PIPES 1 /* 1, 2 or 4 */
#define PKT_PIPE_FIFO_SIZEW 4 /* total 4 dwords per HSS */
#define RX_DESCS 16 /* also length of all RX queues */
#define TX_DESCS 16 /* also length of all TX queues */
#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
#define RX_SIZE (HDLC_MAX_MRU + 4) /* NPE needs more space */
#define MAX_CLOSE_WAIT 1000 /* microseconds */
#define HSS_COUNT 2
#define FRAME_SIZE 256 /* doesn't matter at this point */
#define FRAME_OFFSET 0
#define MAX_CHANNELS (FRAME_SIZE / 8)
#define NAPI_WEIGHT 16
/* Queue IDs */
#define HSS0_CHL_RXTRIG_QUEUE 12 /* orig size = 32 dwords */
#define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */
#define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */
#define HSS0_PKT_TX1_QUEUE 15
#define HSS0_PKT_TX2_QUEUE 16
#define HSS0_PKT_TX3_QUEUE 17
#define HSS0_PKT_RXFREE0_QUEUE 18 /* orig size = 16 dwords */
#define HSS0_PKT_RXFREE1_QUEUE 19
#define HSS0_PKT_RXFREE2_QUEUE 20
#define HSS0_PKT_RXFREE3_QUEUE 21
#define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */
#define HSS1_CHL_RXTRIG_QUEUE 10
#define HSS1_PKT_RX_QUEUE 0
#define HSS1_PKT_TX0_QUEUE 5
#define HSS1_PKT_TX1_QUEUE 6
#define HSS1_PKT_TX2_QUEUE 7
#define HSS1_PKT_TX3_QUEUE 8
#define HSS1_PKT_RXFREE0_QUEUE 1
#define HSS1_PKT_RXFREE1_QUEUE 2
#define HSS1_PKT_RXFREE2_QUEUE 3
#define HSS1_PKT_RXFREE3_QUEUE 4
#define HSS1_PKT_TXDONE_QUEUE 9
#define NPE_PKT_MODE_HDLC 0
#define NPE_PKT_MODE_RAW 1
#define NPE_PKT_MODE_56KMODE 2
#define NPE_PKT_MODE_56KENDIAN_MSB 4
/* PKT_PIPE_HDLC_CFG_WRITE flags */
#define PKT_HDLC_IDLE_ONES 0x1 /* default = flags */
#define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */
#define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */
/* hss_config, PCRs */
/* Frame sync sampling, default = active low */
#define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000
#define PCR_FRM_SYNC_FALLINGEDGE 0x80000000
#define PCR_FRM_SYNC_RISINGEDGE 0xC0000000
/* Frame sync pin: input (default) or output generated off a given clk edge */
#define PCR_FRM_SYNC_OUTPUT_FALLING 0x20000000
#define PCR_FRM_SYNC_OUTPUT_RISING 0x30000000
/* Frame and data clock sampling on edge, default = falling */
#define PCR_FCLK_EDGE_RISING 0x08000000
#define PCR_DCLK_EDGE_RISING 0x04000000
/* Clock direction, default = input */
#define PCR_SYNC_CLK_DIR_OUTPUT 0x02000000
/* Generate/Receive frame pulses, default = enabled */
#define PCR_FRM_PULSE_DISABLED 0x01000000
/* Data rate is full (default) or half the configured clk speed */
#define PCR_HALF_CLK_RATE 0x00200000
/* Invert data between NPE and HSS FIFOs? (default = no) */
#define PCR_DATA_POLARITY_INVERT 0x00100000
/* TX/RX endianness, default = LSB */
#define PCR_MSB_ENDIAN 0x00080000
/* Normal (default) / open drain mode (TX only) */
#define PCR_TX_PINS_OPEN_DRAIN 0x00040000
/* No framing bit transmitted and expected on RX? (default = framing bit) */
#define PCR_SOF_NO_FBIT 0x00020000
/* Drive data pins? */
#define PCR_TX_DATA_ENABLE 0x00010000
/* Voice 56k type: drive the data pins low (default), high, high Z */
#define PCR_TX_V56K_HIGH 0x00002000
#define PCR_TX_V56K_HIGH_IMP 0x00004000
/* Unassigned type: drive the data pins low (default), high, high Z */
#define PCR_TX_UNASS_HIGH 0x00000800
#define PCR_TX_UNASS_HIGH_IMP 0x00001000
/* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */
#define PCR_TX_FB_HIGH_IMP 0x00000400
/* 56k data endiannes - which bit unused: high (default) or low */
#define PCR_TX_56KE_BIT_0_UNUSED 0x00000200
/* 56k data transmission type: 32/8 bit data (default) or 56K data */
#define PCR_TX_56KS_56K_DATA 0x00000100
/* hss_config, cCR */
/* Number of packetized clients, default = 1 */
#define CCR_NPE_HFIFO_2_HDLC 0x04000000
#define CCR_NPE_HFIFO_3_OR_4HDLC 0x08000000
/* default = no loopback */
#define CCR_LOOPBACK 0x02000000
/* HSS number, default = 0 (first) */
#define CCR_SECOND_HSS 0x01000000
/* hss_config, clkCR: main:10, num:10, denom:12 */
#define CLK42X_SPEED_EXP ((0x3FF << 22) | ( 2 << 12) | 15) /*65 KHz*/
#define CLK42X_SPEED_512KHZ (( 130 << 22) | ( 2 << 12) | 15)
#define CLK42X_SPEED_1536KHZ (( 43 << 22) | ( 18 << 12) | 47)
#define CLK42X_SPEED_1544KHZ (( 43 << 22) | ( 33 << 12) | 192)
#define CLK42X_SPEED_2048KHZ (( 32 << 22) | ( 34 << 12) | 63)
#define CLK42X_SPEED_4096KHZ (( 16 << 22) | ( 34 << 12) | 127)
#define CLK42X_SPEED_8192KHZ (( 8 << 22) | ( 34 << 12) | 255)
#define CLK46X_SPEED_512KHZ (( 130 << 22) | ( 24 << 12) | 127)
#define CLK46X_SPEED_1536KHZ (( 43 << 22) | (152 << 12) | 383)
#define CLK46X_SPEED_1544KHZ (( 43 << 22) | ( 66 << 12) | 385)
#define CLK46X_SPEED_2048KHZ (( 32 << 22) | (280 << 12) | 511)
#define CLK46X_SPEED_4096KHZ (( 16 << 22) | (280 << 12) | 1023)
#define CLK46X_SPEED_8192KHZ (( 8 << 22) | (280 << 12) | 2047)
/*
* HSS_CONFIG_CLOCK_CR register consists of 3 parts:
* A (10 bits), B (10 bits) and C (12 bits).
* IXP42x HSS clock generator operation (verified with an oscilloscope):
* Each clock bit takes 7.5 ns (1 / 133.xx MHz).
* The clock sequence consists of (C - B) states of 0s and 1s, each state is
* A bits wide. It's followed by (B + 1) states of 0s and 1s, each state is
* (A + 1) bits wide.
*
* The resulting average clock frequency (assuming 33.333 MHz oscillator) is:
* freq = 66.666 MHz / (A + (B + 1) / (C + 1))
* minimum freq = 66.666 MHz / (A + 1)
* maximum freq = 66.666 MHz / A
*
* Example: A = 2, B = 2, C = 7, CLOCK_CR register = 2 << 22 | 2 << 12 | 7
* freq = 66.666 MHz / (2 + (2 + 1) / (7 + 1)) = 28.07 MHz (Mb/s).
* The clock sequence is: 1100110011 (5 doubles) 000111000 (3 triples).
* The sequence takes (C - B) * A + (B + 1) * (A + 1) = 5 * 2 + 3 * 3 bits
* = 19 bits (each 7.5 ns long) = 142.5 ns (then the sequence repeats).
* The sequence consists of 4 complete clock periods, thus the average
* frequency (= clock rate) is 4 / 142.5 ns = 28.07 MHz (Mb/s).
* (max specified clock rate for IXP42x HSS is 8.192 Mb/s).
*/
/* hss_config, LUT entries */
#define TDMMAP_UNASSIGNED 0
#define TDMMAP_HDLC 1 /* HDLC - packetized */
#define TDMMAP_VOICE56K 2 /* Voice56K - 7-bit channelized */
#define TDMMAP_VOICE64K 3 /* Voice64K - 8-bit channelized */
/* offsets into HSS config */
#define HSS_CONFIG_TX_PCR 0x00 /* port configuration registers */
#define HSS_CONFIG_RX_PCR 0x04
#define HSS_CONFIG_CORE_CR 0x08 /* loopback control, HSS# */
#define HSS_CONFIG_CLOCK_CR 0x0C /* clock generator control */
#define HSS_CONFIG_TX_FCR 0x10 /* frame configuration registers */
#define HSS_CONFIG_RX_FCR 0x14
#define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */
#define HSS_CONFIG_RX_LUT 0x38
/* NPE command codes */
/* writes the ConfigWord value to the location specified by offset */
#define PORT_CONFIG_WRITE 0x40
/* triggers the NPE to load the contents of the configuration table */
#define PORT_CONFIG_LOAD 0x41
/* triggers the NPE to return an HssErrorReadResponse message */
#define PORT_ERROR_READ 0x42
/* triggers the NPE to reset internal status and enable the HssPacketized
operation for the flow specified by pPipe */
#define PKT_PIPE_FLOW_ENABLE 0x50
#define PKT_PIPE_FLOW_DISABLE 0x51
#define PKT_NUM_PIPES_WRITE 0x52
#define PKT_PIPE_FIFO_SIZEW_WRITE 0x53
#define PKT_PIPE_HDLC_CFG_WRITE 0x54
#define PKT_PIPE_IDLE_PATTERN_WRITE 0x55
#define PKT_PIPE_RX_SIZE_WRITE 0x56
#define PKT_PIPE_MODE_WRITE 0x57
/* HDLC packet status values - desc->status */
#define ERR_SHUTDOWN 1 /* stop or shutdown occurrence */
#define ERR_HDLC_ALIGN 2 /* HDLC alignment error */
#define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */
#define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving
this packet (if buf_len < pkt_len) */
#define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */
#define ERR_HDLC_ABORT 6 /* abort sequence received */
#define ERR_DISCONNECTING 7 /* disconnect is in progress */
#ifdef __ARMEB__
typedef struct sk_buff buffer_t;
#define free_buffer dev_kfree_skb
#define free_buffer_irq dev_kfree_skb_irq
#else
typedef void buffer_t;
#define free_buffer kfree
#define free_buffer_irq kfree
#endif
struct port {
struct device *dev;
struct npe *npe;
struct net_device *netdev;
struct napi_struct napi;
struct hss_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
u32 desc_tab_phys;
unsigned int id;
unsigned int clock_type, clock_rate, loopback;
unsigned int initialized, carrier;
u8 hdlc_cfg;
u32 clock_reg;
};
/* NPE message structure */
struct msg {
#ifdef __ARMEB__
u8 cmd, unused, hss_port, index;
union {
struct { u8 data8a, data8b, data8c, data8d; };
struct { u16 data16a, data16b; };
struct { u32 data32; };
};
#else
u8 index, hss_port, unused, cmd;
union {
struct { u8 data8d, data8c, data8b, data8a; };
struct { u16 data16b, data16a; };
struct { u32 data32; };
};
#endif
};
/* HDLC packet descriptor */
struct desc {
u32 next; /* pointer to next buffer, unused */
#ifdef __ARMEB__
u16 buf_len; /* buffer length */
u16 pkt_len; /* packet length */
u32 data; /* pointer to data buffer in RAM */
u8 status;
u8 error_count;
u16 __reserved;
#else
u16 pkt_len; /* packet length */
u16 buf_len; /* buffer length */
u32 data; /* pointer to data buffer in RAM */
u16 __reserved;
u8 error_count;
u8 status;
#endif
u32 __reserved1[4];
};
#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
(n) * sizeof(struct desc))
#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
((n) + RX_DESCS) * sizeof(struct desc))
#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
/*****************************************************************************
* global variables
****************************************************************************/
static int ports_open;
static struct dma_pool *dma_pool;
static spinlock_t npe_lock;
static const struct {
int tx, txdone, rx, rxfree;
}queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
HSS0_PKT_RXFREE0_QUEUE},
{HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE,
HSS1_PKT_RXFREE0_QUEUE},
};
/*****************************************************************************
* utility functions
****************************************************************************/
static inline struct port* dev_to_port(struct net_device *dev)
{
return dev_to_hdlc(dev)->priv;
}
#ifndef __ARMEB__
static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
{
int i;
for (i = 0; i < cnt; i++)
dest[i] = swab32(src[i]);
}
#endif
/*****************************************************************************
* HSS access
****************************************************************************/
static void hss_npe_send(struct port *port, struct msg *msg, const char* what)
{
u32 *val = (u32*)msg;
if (npe_send_message(port->npe, msg, what)) {
pr_crit("HSS-%i: unable to send command [%08X:%08X] to %s\n",
port->id, val[0], val[1], npe_name(port->npe));
BUG();
}
}
static void hss_config_set_lut(struct port *port)
{
struct msg msg;
int ch;
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_CONFIG_WRITE;
msg.hss_port = port->id;
for (ch = 0; ch < MAX_CHANNELS; ch++) {
msg.data32 >>= 2;
msg.data32 |= TDMMAP_HDLC << 30;
if (ch % 16 == 15) {
msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3);
hss_npe_send(port, &msg, "HSS_SET_TX_LUT");
msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT;
hss_npe_send(port, &msg, "HSS_SET_RX_LUT");
}
}
}
static void hss_config(struct port *port)
{
struct msg msg;
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_CONFIG_WRITE;
msg.hss_port = port->id;
msg.index = HSS_CONFIG_TX_PCR;
msg.data32 = PCR_FRM_PULSE_DISABLED | PCR_MSB_ENDIAN |
PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT;
if (port->clock_type == CLOCK_INT)
msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT;
hss_npe_send(port, &msg, "HSS_SET_TX_PCR");
msg.index = HSS_CONFIG_RX_PCR;
msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING;
hss_npe_send(port, &msg, "HSS_SET_RX_PCR");
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_CONFIG_WRITE;
msg.hss_port = port->id;
msg.index = HSS_CONFIG_CORE_CR;
msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) |
(port->id ? CCR_SECOND_HSS : 0);
hss_npe_send(port, &msg, "HSS_SET_CORE_CR");
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_CONFIG_WRITE;
msg.hss_port = port->id;
msg.index = HSS_CONFIG_CLOCK_CR;
msg.data32 = port->clock_reg;
hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR");
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_CONFIG_WRITE;
msg.hss_port = port->id;
msg.index = HSS_CONFIG_TX_FCR;
msg.data16a = FRAME_OFFSET;
msg.data16b = FRAME_SIZE - 1;
hss_npe_send(port, &msg, "HSS_SET_TX_FCR");
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_CONFIG_WRITE;
msg.hss_port = port->id;
msg.index = HSS_CONFIG_RX_FCR;
msg.data16a = FRAME_OFFSET;
msg.data16b = FRAME_SIZE - 1;
hss_npe_send(port, &msg, "HSS_SET_RX_FCR");
hss_config_set_lut(port);
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_CONFIG_LOAD;
msg.hss_port = port->id;
hss_npe_send(port, &msg, "HSS_LOAD_CONFIG");
if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") ||
/* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */
msg.cmd != PORT_CONFIG_LOAD || msg.data32) {
pr_crit("HSS-%i: HSS_LOAD_CONFIG failed\n", port->id);
BUG();
}
/* HDLC may stop working without this - check FIXME */
npe_recv_message(port->npe, &msg, "FLUSH_IT");
}
static void hss_set_hdlc_cfg(struct port *port)
{
struct msg msg;
memset(&msg, 0, sizeof(msg));
msg.cmd = PKT_PIPE_HDLC_CFG_WRITE;
msg.hss_port = port->id;
msg.data8a = port->hdlc_cfg; /* rx_cfg */
msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */
hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG");
}
static u32 hss_get_status(struct port *port)
{
struct msg msg;
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_ERROR_READ;
msg.hss_port = port->id;
hss_npe_send(port, &msg, "PORT_ERROR_READ");
if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) {
pr_crit("HSS-%i: unable to read HSS status\n", port->id);
BUG();
}
return msg.data32;
}
static void hss_start_hdlc(struct port *port)
{
struct msg msg;
memset(&msg, 0, sizeof(msg));
msg.cmd = PKT_PIPE_FLOW_ENABLE;
msg.hss_port = port->id;
msg.data32 = 0;
hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE");
}
static void hss_stop_hdlc(struct port *port)
{
struct msg msg;
memset(&msg, 0, sizeof(msg));
msg.cmd = PKT_PIPE_FLOW_DISABLE;
msg.hss_port = port->id;
hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE");
hss_get_status(port); /* make sure it's halted */
}
static int hss_load_firmware(struct port *port)
{
struct msg msg;
int err;
if (port->initialized)
return 0;
if (!npe_running(port->npe) &&
(err = npe_load_firmware(port->npe, npe_name(port->npe),
port->dev)))
return err;
/* HDLC mode configuration */
memset(&msg, 0, sizeof(msg));
msg.cmd = PKT_NUM_PIPES_WRITE;
msg.hss_port = port->id;
msg.data8a = PKT_NUM_PIPES;
hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES");
msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE;
msg.data8a = PKT_PIPE_FIFO_SIZEW;
hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO");
msg.cmd = PKT_PIPE_MODE_WRITE;
msg.data8a = NPE_PKT_MODE_HDLC;
/* msg.data8b = inv_mask */
/* msg.data8c = or_mask */
hss_npe_send(port, &msg, "HSS_SET_PKT_MODE");
msg.cmd = PKT_PIPE_RX_SIZE_WRITE;
msg.data16a = HDLC_MAX_MRU; /* including CRC */
hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE");
msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE;
msg.data32 = 0x7F7F7F7F; /* ??? FIXME */
hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE");
port->initialized = 1;
return 0;
}
/*****************************************************************************
* packetized (HDLC) operation
****************************************************************************/
static inline void debug_pkt(struct net_device *dev, const char *func,
u8 *data, int len)
{
#if DEBUG_PKT_BYTES
int i;
printk(KERN_DEBUG "%s: %s(%i)", dev->name, func, len);
for (i = 0; i < len; i++) {
if (i >= DEBUG_PKT_BYTES)
break;
printk("%s%02X", !(i % 4) ? " " : "", data[i]);
}
printk("\n");
#endif
}
static inline void debug_desc(u32 phys, struct desc *desc)
{
#if DEBUG_DESC
printk(KERN_DEBUG "%X: %X %3X %3X %08X %X %X\n",
phys, desc->next, desc->buf_len, desc->pkt_len,
desc->data, desc->status, desc->error_count);
#endif
}
static inline int queue_get_desc(unsigned int queue, struct port *port,
int is_tx)
{
u32 phys, tab_phys, n_desc;
struct desc *tab;
if (!(phys = qmgr_get_entry(queue)))
return -1;
BUG_ON(phys & 0x1F);
tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
n_desc = (phys - tab_phys) / sizeof(struct desc);
BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
debug_desc(phys, &tab[n_desc]);
BUG_ON(tab[n_desc].next);
return n_desc;
}
static inline void queue_put_desc(unsigned int queue, u32 phys,
struct desc *desc)
{
debug_desc(phys, desc);
BUG_ON(phys & 0x1F);
qmgr_put_entry(queue, phys);
/* Don't check for queue overflow here, we've allocated sufficient
length and queues >= 32 don't support this check anyway. */
}
static inline void dma_unmap_tx(struct port *port, struct desc *desc)
{
#ifdef __ARMEB__
dma_unmap_single(&port->netdev->dev, desc->data,
desc->buf_len, DMA_TO_DEVICE);
#else
dma_unmap_single(&port->netdev->dev, desc->data & ~3,
ALIGN((desc->data & 3) + desc->buf_len, 4),
DMA_TO_DEVICE);
#endif
}
static void hss_hdlc_set_carrier(void *pdev, int carrier)
{
struct net_device *netdev = pdev;
struct port *port = dev_to_port(netdev);
unsigned long flags;
spin_lock_irqsave(&npe_lock, flags);
port->carrier = carrier;
if (!port->loopback) {
if (carrier)
netif_carrier_on(netdev);
else
netif_carrier_off(netdev);
}
spin_unlock_irqrestore(&npe_lock, flags);
}
static void hss_hdlc_rx_irq(void *pdev)
{
struct net_device *dev = pdev;
struct port *port = dev_to_port(dev);
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
#endif
qmgr_disable_irq(queue_ids[port->id].rx);
napi_schedule(&port->napi);
}
static int hss_hdlc_poll(struct napi_struct *napi, int budget)
{
struct port *port = container_of(napi, struct port, napi);
struct net_device *dev = port->netdev;
unsigned int rxq = queue_ids[port->id].rx;
unsigned int rxfreeq = queue_ids[port->id].rxfree;
int received = 0;
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name);
#endif
while (received < budget) {
struct sk_buff *skb;
struct desc *desc;
int n;
#ifdef __ARMEB__
struct sk_buff *temp;
u32 phys;
#endif
if ((n = queue_get_desc(rxq, port, 0)) < 0) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll"
" napi_complete\n", dev->name);
#endif
napi_complete(napi);
qmgr_enable_irq(rxq);
if (!qmgr_stat_empty(rxq) &&
napi_reschedule(napi)) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll"
" napi_reschedule succeeded\n",
dev->name);
#endif
qmgr_disable_irq(rxq);
continue;
}
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n",
dev->name);
#endif
return received; /* all work done */
}
desc = rx_desc_ptr(port, n);
#if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */
if (desc->error_count)
printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
" errors %u\n", dev->name, desc->status,
desc->error_count);
#endif
skb = NULL;
switch (desc->status) {
case 0:
#ifdef __ARMEB__
if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
phys = dma_map_single(&dev->dev, skb->data,
RX_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&dev->dev, phys)) {
dev_kfree_skb(skb);
skb = NULL;
}
}
#else
skb = netdev_alloc_skb(dev, desc->pkt_len);
#endif
if (!skb)
dev->stats.rx_dropped++;
break;
case ERR_HDLC_ALIGN:
case ERR_HDLC_ABORT:
dev->stats.rx_frame_errors++;
dev->stats.rx_errors++;
break;
case ERR_HDLC_FCS:
dev->stats.rx_crc_errors++;
dev->stats.rx_errors++;
break;
case ERR_HDLC_TOO_LONG:
dev->stats.rx_length_errors++;
dev->stats.rx_errors++;
break;
default: /* FIXME - remove printk */
netdev_err(dev, "hss_hdlc_poll: status 0x%02X errors %u\n",
desc->status, desc->error_count);
dev->stats.rx_errors++;
}
if (!skb) {
/* put the desc back on RX-ready queue */
desc->buf_len = RX_SIZE;
desc->pkt_len = desc->status = 0;
queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
continue;
}
/* process received frame */
#ifdef __ARMEB__
temp = skb;
skb = port->rx_buff_tab[n];
dma_unmap_single(&dev->dev, desc->data,
RX_SIZE, DMA_FROM_DEVICE);
#else
dma_sync_single_for_cpu(&dev->dev, desc->data,
RX_SIZE, DMA_FROM_DEVICE);
memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
ALIGN(desc->pkt_len, 4) / 4);
#endif
skb_put(skb, desc->pkt_len);
debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);
skb->protocol = hdlc_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
netif_receive_skb(skb);
/* put the new buffer on RX-free queue */
#ifdef __ARMEB__
port->rx_buff_tab[n] = temp;
desc->data = phys;
#endif
desc->buf_len = RX_SIZE;
desc->pkt_len = 0;
queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
received++;
}
#if DEBUG_RX
printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n");
#endif
return received; /* not all work done */
}
static void hss_hdlc_txdone_irq(void *pdev)
{
struct net_device *dev = pdev;
struct port *port = dev_to_port(dev);
int n_desc;
#if DEBUG_TX
printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n");
#endif
while ((n_desc = queue_get_desc(queue_ids[port->id].txdone,
port, 1)) >= 0) {
struct desc *desc;
int start;
desc = tx_desc_ptr(port, n_desc);
dev->stats.tx_packets++;
dev->stats.tx_bytes += desc->pkt_len;
dma_unmap_tx(port, desc);
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq free %p\n",
dev->name, port->tx_buff_tab[n_desc]);
#endif
free_buffer_irq(port->tx_buff_tab[n_desc]);
port->tx_buff_tab[n_desc] = NULL;
start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
queue_put_desc(port->plat->txreadyq,
tx_desc_phys(port, n_desc), desc);
if (start) { /* TX-ready queue was empty */
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit"
" ready\n", dev->name);
#endif
netif_wake_queue(dev);
}
}
}
static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port *port = dev_to_port(dev);
unsigned int txreadyq = port->plat->txreadyq;
int len, offset, bytes, n;
void *mem;
u32 phys;
struct desc *desc;
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_xmit\n", dev->name);
#endif
if (unlikely(skb->len > HDLC_MAX_MRU)) {
dev_kfree_skb(skb);
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len);
len = skb->len;
#ifdef __ARMEB__
offset = 0; /* no need to keep alignment */
bytes = len;
mem = skb->data;
#else
offset = (int)skb->data & 3; /* keep 32-bit alignment */
bytes = ALIGN(offset + len, 4);
if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
dev_kfree_skb(skb);
#endif
phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
if (dma_mapping_error(&dev->dev, phys)) {
#ifdef __ARMEB__
dev_kfree_skb(skb);
#else
kfree(mem);
#endif
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
n = queue_get_desc(txreadyq, port, 1);
BUG_ON(n < 0);
desc = tx_desc_ptr(port, n);
#ifdef __ARMEB__
port->tx_buff_tab[n] = skb;
#else
port->tx_buff_tab[n] = mem;
#endif
desc->data = phys + offset;
desc->buf_len = desc->pkt_len = len;
wmb();
queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name);
#endif
netif_stop_queue(dev);
/* we could miss TX ready interrupt */
if (!qmgr_stat_below_low_watermark(txreadyq)) {
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n",
dev->name);
#endif
netif_wake_queue(dev);
}
}
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n", dev->name);
#endif
return NETDEV_TX_OK;
}
static int request_hdlc_queues(struct port *port)
{
int err;
err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0,
"%s:RX-free", port->netdev->name);
if (err)
return err;
err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0,
"%s:RX", port->netdev->name);
if (err)
goto rel_rxfree;
err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0,
"%s:TX", port->netdev->name);
if (err)
goto rel_rx;
err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
"%s:TX-ready", port->netdev->name);
if (err)
goto rel_tx;
err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0,
"%s:TX-done", port->netdev->name);
if (err)
goto rel_txready;
return 0;
rel_txready:
qmgr_release_queue(port->plat->txreadyq);
rel_tx:
qmgr_release_queue(queue_ids[port->id].tx);
rel_rx:
qmgr_release_queue(queue_ids[port->id].rx);
rel_rxfree:
qmgr_release_queue(queue_ids[port->id].rxfree);
printk(KERN_DEBUG "%s: unable to request hardware queues\n",
port->netdev->name);
return err;
}
static void release_hdlc_queues(struct port *port)
{
qmgr_release_queue(queue_ids[port->id].rxfree);
qmgr_release_queue(queue_ids[port->id].rx);
qmgr_release_queue(queue_ids[port->id].txdone);
qmgr_release_queue(queue_ids[port->id].tx);
qmgr_release_queue(port->plat->txreadyq);
}
static int init_hdlc_queues(struct port *port)
{
int i;
if (!ports_open)
if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
POOL_ALLOC_SIZE, 32, 0)))
return -ENOMEM;
if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
&port->desc_tab_phys)))
return -ENOMEM;
memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
/* Setup RX buffers */
for (i = 0; i < RX_DESCS; i++) {
struct desc *desc = rx_desc_ptr(port, i);
buffer_t *buff;
void *data;
#ifdef __ARMEB__
if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
return -ENOMEM;
data = buff->data;
#else
if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
return -ENOMEM;
data = buff;
#endif
desc->buf_len = RX_SIZE;
desc->data = dma_map_single(&port->netdev->dev, data,
RX_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(&port->netdev->dev, desc->data)) {
free_buffer(buff);
return -EIO;
}
port->rx_buff_tab[i] = buff;
}
return 0;
}
static void destroy_hdlc_queues(struct port *port)
{
int i;
if (port->desc_tab) {
for (i = 0; i < RX_DESCS; i++) {
struct desc *desc = rx_desc_ptr(port, i);
buffer_t *buff = port->rx_buff_tab[i];
if (buff) {
dma_unmap_single(&port->netdev->dev,
desc->data, RX_SIZE,
DMA_FROM_DEVICE);
free_buffer(buff);
}
}
for (i = 0; i < TX_DESCS; i++) {
struct desc *desc = tx_desc_ptr(port, i);
buffer_t *buff = port->tx_buff_tab[i];
if (buff) {
dma_unmap_tx(port, desc);
free_buffer(buff);
}
}
dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
port->desc_tab = NULL;
}
if (!ports_open && dma_pool) {
dma_pool_destroy(dma_pool);
dma_pool = NULL;
}
}
static int hss_hdlc_open(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
unsigned long flags;
int i, err = 0;
if ((err = hdlc_open(dev)))
return err;
if ((err = hss_load_firmware(port)))
goto err_hdlc_close;
if ((err = request_hdlc_queues(port)))
goto err_hdlc_close;
if ((err = init_hdlc_queues(port)))
goto err_destroy_queues;
spin_lock_irqsave(&npe_lock, flags);
if (port->plat->open)
if ((err = port->plat->open(port->id, dev,
hss_hdlc_set_carrier)))
goto err_unlock;
spin_unlock_irqrestore(&npe_lock, flags);
/* Populate queues with buffers, no failure after this point */
for (i = 0; i < TX_DESCS; i++)
queue_put_desc(port->plat->txreadyq,
tx_desc_phys(port, i), tx_desc_ptr(port, i));
for (i = 0; i < RX_DESCS; i++)
queue_put_desc(queue_ids[port->id].rxfree,
rx_desc_phys(port, i), rx_desc_ptr(port, i));
napi_enable(&port->napi);
netif_start_queue(dev);
qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY,
hss_hdlc_rx_irq, dev);
qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY,
hss_hdlc_txdone_irq, dev);
qmgr_enable_irq(queue_ids[port->id].txdone);
ports_open++;
hss_set_hdlc_cfg(port);
hss_config(port);
hss_start_hdlc(port);
/* we may already have RX data, enables IRQ */
napi_schedule(&port->napi);
return 0;
err_unlock:
spin_unlock_irqrestore(&npe_lock, flags);
err_destroy_queues:
destroy_hdlc_queues(port);
release_hdlc_queues(port);
err_hdlc_close:
hdlc_close(dev);
return err;
}
static int hss_hdlc_close(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
unsigned long flags;
int i, buffs = RX_DESCS; /* allocated RX buffers */
spin_lock_irqsave(&npe_lock, flags);
ports_open--;
qmgr_disable_irq(queue_ids[port->id].rx);
netif_stop_queue(dev);
napi_disable(&port->napi);
hss_stop_hdlc(port);
while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0)
buffs--;
while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0)
buffs--;
if (buffs)
netdev_crit(dev, "unable to drain RX queue, %i buffer(s) left in NPE\n",
buffs);
buffs = TX_DESCS;
while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0)
buffs--; /* cancel TX */
i = 0;
do {
while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
buffs--;
if (!buffs)
break;
} while (++i < MAX_CLOSE_WAIT);
if (buffs)
netdev_crit(dev, "unable to drain TX queue, %i buffer(s) left in NPE\n",
buffs);
#if DEBUG_CLOSE
if (!buffs)
printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
#endif
qmgr_disable_irq(queue_ids[port->id].txdone);
if (port->plat->close)
port->plat->close(port->id, dev);
spin_unlock_irqrestore(&npe_lock, flags);
destroy_hdlc_queues(port);
release_hdlc_queues(port);
hdlc_close(dev);
return 0;
}
static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
struct port *port = dev_to_port(dev);
if (encoding != ENCODING_NRZ)
return -EINVAL;
switch(parity) {
case PARITY_CRC16_PR1_CCITT:
port->hdlc_cfg = 0;
return 0;
case PARITY_CRC32_PR1_CCITT:
port->hdlc_cfg = PKT_HDLC_CRC_32;
return 0;
default:
return -EINVAL;
}
}
static u32 check_clock(u32 rate, u32 a, u32 b, u32 c,
u32 *best, u32 *best_diff, u32 *reg)
{
/* a is 10-bit, b is 10-bit, c is 12-bit */
u64 new_rate;
u32 new_diff;
new_rate = ixp4xx_timer_freq * (u64)(c + 1);
do_div(new_rate, a * (c + 1) + b + 1);
new_diff = abs((u32)new_rate - rate);
if (new_diff < *best_diff) {
*best = new_rate;
*best_diff = new_diff;
*reg = (a << 22) | (b << 12) | c;
}
return new_diff;
}
static void find_best_clock(u32 rate, u32 *best, u32 *reg)
{
u32 a, b, diff = 0xFFFFFFFF;
a = ixp4xx_timer_freq / rate;
if (a > 0x3FF) { /* 10-bit value - we can go as slow as ca. 65 kb/s */
check_clock(rate, 0x3FF, 1, 1, best, &diff, reg);
return;
}
if (a == 0) { /* > 66.666 MHz */
a = 1; /* minimum divider is 1 (a = 0, b = 1, c = 1) */
rate = ixp4xx_timer_freq;
}
if (rate * a == ixp4xx_timer_freq) { /* don't divide by 0 later */
check_clock(rate, a - 1, 1, 1, best, &diff, reg);
return;
}
for (b = 0; b < 0x400; b++) {
u64 c = (b + 1) * (u64)rate;
do_div(c, ixp4xx_timer_freq - rate * a);
c--;
if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */
if (b == 0 && /* also try a bit higher rate */
!check_clock(rate, a - 1, 1, 1, best, &diff, reg))
return;
check_clock(rate, a, b, 0xFFF, best, &diff, reg);
return;
}
if (!check_clock(rate, a, b, c, best, &diff, reg))
return;
if (!check_clock(rate, a, b, c + 1, best, &diff, reg))
return;
}
}
static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
struct port *port = dev_to_port(dev);
unsigned long flags;
int clk;
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
switch(ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_V35;
if (ifr->ifr_settings.size < size) {
ifr->ifr_settings.size = size; /* data size wanted */
return -ENOBUFS;
}
memset(&new_line, 0, sizeof(new_line));
new_line.clock_type = port->clock_type;
new_line.clock_rate = port->clock_rate;
new_line.loopback = port->loopback;
if (copy_to_user(line, &new_line, size))
return -EFAULT;
return 0;
case IF_IFACE_SYNC_SERIAL:
case IF_IFACE_V35:
if(!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
return -EFAULT;
clk = new_line.clock_type;
if (port->plat->set_clock)
clk = port->plat->set_clock(port->id, clk);
if (clk != CLOCK_EXT && clk != CLOCK_INT)
return -EINVAL; /* No such clock setting */
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
port->clock_type = clk; /* Update settings */
if (clk == CLOCK_INT)
find_best_clock(new_line.clock_rate, &port->clock_rate,
&port->clock_reg);
else {
port->clock_rate = 0;
port->clock_reg = CLK42X_SPEED_2048KHZ;
}
port->loopback = new_line.loopback;
spin_lock_irqsave(&npe_lock, flags);
if (dev->flags & IFF_UP)
hss_config(port);
if (port->loopback || port->carrier)
netif_carrier_on(port->netdev);
else
netif_carrier_off(port->netdev);
spin_unlock_irqrestore(&npe_lock, flags);
return 0;
default:
return hdlc_ioctl(dev, ifr, cmd);
}
}
/*****************************************************************************
* initialization
****************************************************************************/
static const struct net_device_ops hss_hdlc_ops = {
.ndo_open = hss_hdlc_open,
.ndo_stop = hss_hdlc_close,
.ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = hss_hdlc_ioctl,
};
static int __devinit hss_init_one(struct platform_device *pdev)
{
struct port *port;
struct net_device *dev;
hdlc_device *hdlc;
int err;
if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL)
return -ENOMEM;
if ((port->npe = npe_request(0)) == NULL) {
err = -ENODEV;
goto err_free;
}
if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) {
err = -ENOMEM;
goto err_plat;
}
SET_NETDEV_DEV(dev, &pdev->dev);
hdlc = dev_to_hdlc(dev);
hdlc->attach = hss_hdlc_attach;
hdlc->xmit = hss_hdlc_xmit;
dev->netdev_ops = &hss_hdlc_ops;
dev->tx_queue_len = 100;
port->clock_type = CLOCK_EXT;
port->clock_rate = 0;
port->clock_reg = CLK42X_SPEED_2048KHZ;
port->id = pdev->id;
port->dev = &pdev->dev;
port->plat = pdev->dev.platform_data;
netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
if ((err = register_hdlc_device(dev)))
goto err_free_netdev;
platform_set_drvdata(pdev, port);
netdev_info(dev, "HSS-%i\n", port->id);
return 0;
err_free_netdev:
free_netdev(dev);
err_plat:
npe_release(port->npe);
err_free:
kfree(port);
return err;
}
static int __devexit hss_remove_one(struct platform_device *pdev)
{
struct port *port = platform_get_drvdata(pdev);
unregister_hdlc_device(port->netdev);
free_netdev(port->netdev);
npe_release(port->npe);
platform_set_drvdata(pdev, NULL);
kfree(port);
return 0;
}
static struct platform_driver ixp4xx_hss_driver = {
.driver.name = DRV_NAME,
.probe = hss_init_one,
.remove = hss_remove_one,
};
static int __init hss_init_module(void)
{
if ((ixp4xx_read_feature_bits() &
(IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
(IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
return -ENODEV;
spin_lock_init(&npe_lock);
return platform_driver_register(&ixp4xx_hss_driver);
}
static void __exit hss_cleanup_module(void)
{
platform_driver_unregister(&ixp4xx_hss_driver);
}
MODULE_AUTHOR("Krzysztof Halasa");
MODULE_DESCRIPTION("Intel IXP4xx HSS driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ixp4xx_hss");
module_init(hss_init_module);
module_exit(hss_cleanup_module);
| gpl-2.0 |
hei1125/Nova_Kernel | drivers/hwmon/jz4740-hwmon.c | 2184 | 5489 | /*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
* JZ4740 SoC HWMON driver
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/mfd/core.h>
#include <linux/hwmon.h>
struct jz4740_hwmon {
struct resource *mem;
void __iomem *base;
int irq;
const struct mfd_cell *cell;
struct device *hwmon;
struct completion read_completion;
struct mutex lock;
};
static ssize_t jz4740_hwmon_show_name(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
return sprintf(buf, "jz4740\n");
}
static irqreturn_t jz4740_hwmon_irq(int irq, void *data)
{
struct jz4740_hwmon *hwmon = data;
complete(&hwmon->read_completion);
return IRQ_HANDLED;
}
static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
struct completion *completion = &hwmon->read_completion;
long t;
unsigned long val;
int ret;
mutex_lock(&hwmon->lock);
INIT_COMPLETION(*completion);
enable_irq(hwmon->irq);
hwmon->cell->enable(to_platform_device(dev));
t = wait_for_completion_interruptible_timeout(completion, HZ);
if (t > 0) {
val = readw(hwmon->base) & 0xfff;
val = (val * 3300) >> 12;
ret = sprintf(buf, "%lu\n", val);
} else {
ret = t ? t : -ETIMEDOUT;
}
hwmon->cell->disable(to_platform_device(dev));
disable_irq(hwmon->irq);
mutex_unlock(&hwmon->lock);
return ret;
}
static DEVICE_ATTR(name, S_IRUGO, jz4740_hwmon_show_name, NULL);
static DEVICE_ATTR(in0_input, S_IRUGO, jz4740_hwmon_read_adcin, NULL);
static struct attribute *jz4740_hwmon_attributes[] = {
&dev_attr_name.attr,
&dev_attr_in0_input.attr,
NULL
};
static const struct attribute_group jz4740_hwmon_attr_group = {
.attrs = jz4740_hwmon_attributes,
};
static int __devinit jz4740_hwmon_probe(struct platform_device *pdev)
{
int ret;
struct jz4740_hwmon *hwmon;
hwmon = kmalloc(sizeof(*hwmon), GFP_KERNEL);
if (!hwmon) {
dev_err(&pdev->dev, "Failed to allocate driver structure\n");
return -ENOMEM;
}
hwmon->cell = mfd_get_cell(pdev);
hwmon->irq = platform_get_irq(pdev, 0);
if (hwmon->irq < 0) {
ret = hwmon->irq;
dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
goto err_free;
}
hwmon->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!hwmon->mem) {
ret = -ENOENT;
dev_err(&pdev->dev, "Failed to get platform mmio resource\n");
goto err_free;
}
hwmon->mem = request_mem_region(hwmon->mem->start,
resource_size(hwmon->mem), pdev->name);
if (!hwmon->mem) {
ret = -EBUSY;
dev_err(&pdev->dev, "Failed to request mmio memory region\n");
goto err_free;
}
hwmon->base = ioremap_nocache(hwmon->mem->start,
resource_size(hwmon->mem));
if (!hwmon->base) {
ret = -EBUSY;
dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
goto err_release_mem_region;
}
init_completion(&hwmon->read_completion);
mutex_init(&hwmon->lock);
platform_set_drvdata(pdev, hwmon);
ret = request_irq(hwmon->irq, jz4740_hwmon_irq, 0, pdev->name, hwmon);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
goto err_iounmap;
}
disable_irq(hwmon->irq);
ret = sysfs_create_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
if (ret) {
dev_err(&pdev->dev, "Failed to create sysfs group: %d\n", ret);
goto err_free_irq;
}
hwmon->hwmon = hwmon_device_register(&pdev->dev);
if (IS_ERR(hwmon->hwmon)) {
ret = PTR_ERR(hwmon->hwmon);
goto err_remove_file;
}
return 0;
err_remove_file:
sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
err_free_irq:
free_irq(hwmon->irq, hwmon);
err_iounmap:
platform_set_drvdata(pdev, NULL);
iounmap(hwmon->base);
err_release_mem_region:
release_mem_region(hwmon->mem->start, resource_size(hwmon->mem));
err_free:
kfree(hwmon);
return ret;
}
static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
{
struct jz4740_hwmon *hwmon = platform_get_drvdata(pdev);
hwmon_device_unregister(hwmon->hwmon);
sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
free_irq(hwmon->irq, hwmon);
iounmap(hwmon->base);
release_mem_region(hwmon->mem->start, resource_size(hwmon->mem));
platform_set_drvdata(pdev, NULL);
kfree(hwmon);
return 0;
}
struct platform_driver jz4740_hwmon_driver = {
.probe = jz4740_hwmon_probe,
.remove = __devexit_p(jz4740_hwmon_remove),
.driver = {
.name = "jz4740-hwmon",
.owner = THIS_MODULE,
},
};
static int __init jz4740_hwmon_init(void)
{
return platform_driver_register(&jz4740_hwmon_driver);
}
module_init(jz4740_hwmon_init);
static void __exit jz4740_hwmon_exit(void)
{
platform_driver_unregister(&jz4740_hwmon_driver);
}
module_exit(jz4740_hwmon_exit);
MODULE_DESCRIPTION("JZ4740 SoC HWMON driver");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:jz4740-hwmon");
| gpl-2.0 |
cdesjardins/DTS-Eagle-Integration_CAF-Android-kernel | sound/drivers/opl3/opl3_midi.c | 2440 | 22751 | /*
* Copyright (c) by Uros Bizjak <uros@kss-loka.si>
*
* Midi synth routines for OPL2/OPL3/OPL4 FM
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#undef DEBUG_ALLOC
#undef DEBUG_MIDI
#include "opl3_voice.h"
#include <sound/asoundef.h>
extern char snd_opl3_regmap[MAX_OPL2_VOICES][4];
extern bool use_internal_drums;
static void snd_opl3_note_off_unsafe(void *p, int note, int vel,
struct snd_midi_channel *chan);
/*
* The next table looks magical, but it certainly is not. Its values have
* been calculated as table[i]=8*log(i/64)/log(2) with an obvious exception
* for i=0. This log-table converts a linear volume-scaling (0..127) to a
* logarithmic scaling as present in the FM-synthesizer chips. so : Volume
* 64 = 0 db = relative volume 0 and: Volume 32 = -6 db = relative
* volume -8 it was implemented as a table because it is only 128 bytes and
* it saves a lot of log() calculations. (Rob Hooft <hooft@chem.ruu.nl>)
*/
static char opl3_volume_table[128] =
{
-63, -48, -40, -35, -32, -29, -27, -26,
-24, -23, -21, -20, -19, -18, -18, -17,
-16, -15, -15, -14, -13, -13, -12, -12,
-11, -11, -10, -10, -10, -9, -9, -8,
-8, -8, -7, -7, -7, -6, -6, -6,
-5, -5, -5, -5, -4, -4, -4, -4,
-3, -3, -3, -3, -2, -2, -2, -2,
-2, -1, -1, -1, -1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1,
1, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 4,
4, 4, 4, 4, 4, 4, 4, 5,
5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6,
6, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 8, 8, 8, 8, 8
};
void snd_opl3_calc_volume(unsigned char *volbyte, int vel,
struct snd_midi_channel *chan)
{
int oldvol, newvol, n;
int volume;
volume = (vel * chan->gm_volume * chan->gm_expression) / (127*127);
if (volume > 127)
volume = 127;
oldvol = OPL3_TOTAL_LEVEL_MASK - (*volbyte & OPL3_TOTAL_LEVEL_MASK);
newvol = opl3_volume_table[volume] + oldvol;
if (newvol > OPL3_TOTAL_LEVEL_MASK)
newvol = OPL3_TOTAL_LEVEL_MASK;
else if (newvol < 0)
newvol = 0;
n = OPL3_TOTAL_LEVEL_MASK - (newvol & OPL3_TOTAL_LEVEL_MASK);
*volbyte = (*volbyte & OPL3_KSL_MASK) | (n & OPL3_TOTAL_LEVEL_MASK);
}
/*
* Converts the note frequency to block and fnum values for the FM chip
*/
static short opl3_note_table[16] =
{
305, 323, /* for pitch bending, -2 semitones */
343, 363, 385, 408, 432, 458, 485, 514, 544, 577, 611, 647,
686, 726 /* for pitch bending, +2 semitones */
};
static void snd_opl3_calc_pitch(unsigned char *fnum, unsigned char *blocknum,
int note, struct snd_midi_channel *chan)
{
int block = ((note / 12) & 0x07) - 1;
int idx = (note % 12) + 2;
int freq;
if (chan->midi_pitchbend) {
int pitchbend = chan->midi_pitchbend;
int segment;
if (pitchbend > 0x1FFF)
pitchbend = 0x1FFF;
segment = pitchbend / 0x1000;
freq = opl3_note_table[idx+segment];
freq += ((opl3_note_table[idx+segment+1] - freq) *
(pitchbend % 0x1000)) / 0x1000;
} else {
freq = opl3_note_table[idx];
}
*fnum = (unsigned char) freq;
*blocknum = ((freq >> 8) & OPL3_FNUM_HIGH_MASK) |
((block << 2) & OPL3_BLOCKNUM_MASK);
}
#ifdef DEBUG_ALLOC
static void debug_alloc(struct snd_opl3 *opl3, char *s, int voice) {
int i;
char *str = "x.24";
printk(KERN_DEBUG "time %.5i: %s [%.2i]: ", opl3->use_time, s, voice);
for (i = 0; i < opl3->max_voices; i++)
printk("%c", *(str + opl3->voices[i].state + 1));
printk("\n");
}
#endif
/*
* Get a FM voice (channel) to play a note on.
*/
static int opl3_get_voice(struct snd_opl3 *opl3, int instr_4op,
struct snd_midi_channel *chan) {
int chan_4op_1; /* first voice for 4op instrument */
int chan_4op_2; /* second voice for 4op instrument */
struct snd_opl3_voice *vp, *vp2;
unsigned int voice_time;
int i;
#ifdef DEBUG_ALLOC
char *alloc_type[3] = { "FREE ", "CHEAP ", "EXPENSIVE" };
#endif
/* This is our "allocation cost" table */
enum {
FREE = 0, CHEAP, EXPENSIVE, END
};
/* Keeps track of what we are finding */
struct best {
unsigned int time;
int voice;
} best[END];
struct best *bp;
for (i = 0; i < END; i++) {
best[i].time = (unsigned int)(-1); /* XXX MAX_?INT really */
best[i].voice = -1;
}
/* Look through all the channels for the most suitable. */
for (i = 0; i < opl3->max_voices; i++) {
vp = &opl3->voices[i];
if (vp->state == SNDRV_OPL3_ST_NOT_AVAIL)
/* skip unavailable channels, allocated by
drum voices or by bounded 4op voices) */
continue;
voice_time = vp->time;
bp = best;
chan_4op_1 = ((i < 3) || (i > 8 && i < 12));
chan_4op_2 = ((i > 2 && i < 6) || (i > 11 && i < 15));
if (instr_4op) {
/* allocate 4op voice */
/* skip channels unavailable to 4op instrument */
if (!chan_4op_1)
continue;
if (vp->state)
/* kill one voice, CHEAP */
bp++;
/* get state of bounded 2op channel
to be allocated for 4op instrument */
vp2 = &opl3->voices[i + 3];
if (vp2->state == SNDRV_OPL3_ST_ON_2OP) {
/* kill two voices, EXPENSIVE */
bp++;
voice_time = (voice_time > vp->time) ?
voice_time : vp->time;
}
} else {
/* allocate 2op voice */
if ((chan_4op_1) || (chan_4op_2))
/* use bounded channels for 2op, CHEAP */
bp++;
else if (vp->state)
/* kill one voice on 2op channel, CHEAP */
bp++;
/* raise kill cost to EXPENSIVE for all channels */
if (vp->state)
bp++;
}
if (voice_time < bp->time) {
bp->time = voice_time;
bp->voice = i;
}
}
for (i = 0; i < END; i++) {
if (best[i].voice >= 0) {
#ifdef DEBUG_ALLOC
printk(KERN_DEBUG "%s %iop allocation on voice %i\n",
alloc_type[i], instr_4op ? 4 : 2,
best[i].voice);
#endif
return best[i].voice;
}
}
/* not found */
return -1;
}
/* ------------------------------ */
/*
* System timer interrupt function
*/
void snd_opl3_timer_func(unsigned long data)
{
struct snd_opl3 *opl3 = (struct snd_opl3 *)data;
unsigned long flags;
int again = 0;
int i;
spin_lock_irqsave(&opl3->voice_lock, flags);
for (i = 0; i < opl3->max_voices; i++) {
struct snd_opl3_voice *vp = &opl3->voices[i];
if (vp->state > 0 && vp->note_off_check) {
if (vp->note_off == jiffies)
snd_opl3_note_off_unsafe(opl3, vp->note, 0,
vp->chan);
else
again++;
}
}
spin_unlock_irqrestore(&opl3->voice_lock, flags);
spin_lock_irqsave(&opl3->sys_timer_lock, flags);
if (again) {
opl3->tlist.expires = jiffies + 1; /* invoke again */
add_timer(&opl3->tlist);
} else {
opl3->sys_timer_status = 0;
}
spin_unlock_irqrestore(&opl3->sys_timer_lock, flags);
}
/*
* Start system timer
*/
static void snd_opl3_start_timer(struct snd_opl3 *opl3)
{
unsigned long flags;
spin_lock_irqsave(&opl3->sys_timer_lock, flags);
if (! opl3->sys_timer_status) {
opl3->tlist.expires = jiffies + 1;
add_timer(&opl3->tlist);
opl3->sys_timer_status = 1;
}
spin_unlock_irqrestore(&opl3->sys_timer_lock, flags);
}
/* ------------------------------ */
static int snd_opl3_oss_map[MAX_OPL3_VOICES] = {
0, 1, 2, 9, 10, 11, 6, 7, 8, 15, 16, 17, 3, 4 ,5, 12, 13, 14
};
/*
* Start a note.
*/
void snd_opl3_note_on(void *p, int note, int vel, struct snd_midi_channel *chan)
{
struct snd_opl3 *opl3;
int instr_4op;
int voice;
struct snd_opl3_voice *vp, *vp2;
unsigned short connect_mask;
unsigned char connection;
unsigned char vol_op[4];
int extra_prg = 0;
unsigned short reg_side;
unsigned char op_offset;
unsigned char voice_offset;
unsigned short opl3_reg;
unsigned char reg_val;
unsigned char prg, bank;
int key = note;
unsigned char fnum, blocknum;
int i;
struct fm_patch *patch;
struct fm_instrument *fm;
unsigned long flags;
opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "Note on, ch %i, inst %i, note %i, vel %i\n",
chan->number, chan->midi_program, note, vel);
#endif
/* in SYNTH mode, application takes care of voices */
/* in SEQ mode, drum voice numbers are notes on drum channel */
if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) {
if (chan->drum_channel) {
/* percussion instruments are located in bank 128 */
bank = 128;
prg = note;
} else {
bank = chan->gm_bank_select;
prg = chan->midi_program;
}
} else {
/* Prepare for OSS mode */
if (chan->number >= MAX_OPL3_VOICES)
return;
/* OSS instruments are located in bank 127 */
bank = 127;
prg = chan->midi_program;
}
spin_lock_irqsave(&opl3->voice_lock, flags);
if (use_internal_drums) {
snd_opl3_drum_switch(opl3, note, vel, 1, chan);
spin_unlock_irqrestore(&opl3->voice_lock, flags);
return;
}
__extra_prg:
patch = snd_opl3_find_patch(opl3, prg, bank, 0);
if (!patch) {
spin_unlock_irqrestore(&opl3->voice_lock, flags);
return;
}
fm = &patch->inst;
switch (patch->type) {
case FM_PATCH_OPL2:
instr_4op = 0;
break;
case FM_PATCH_OPL3:
if (opl3->hardware >= OPL3_HW_OPL3) {
instr_4op = 1;
break;
}
default:
spin_unlock_irqrestore(&opl3->voice_lock, flags);
return;
}
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG " --> OPL%i instrument: %s\n",
instr_4op ? 3 : 2, patch->name);
#endif
/* in SYNTH mode, application takes care of voices */
/* in SEQ mode, allocate voice on free OPL3 channel */
if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) {
voice = opl3_get_voice(opl3, instr_4op, chan);
} else {
/* remap OSS voice */
voice = snd_opl3_oss_map[chan->number];
}
if (voice < MAX_OPL2_VOICES) {
/* Left register block for voices 0 .. 8 */
reg_side = OPL3_LEFT;
voice_offset = voice;
connect_mask = (OPL3_LEFT_4OP_0 << voice_offset) & 0x07;
} else {
/* Right register block for voices 9 .. 17 */
reg_side = OPL3_RIGHT;
voice_offset = voice - MAX_OPL2_VOICES;
connect_mask = (OPL3_RIGHT_4OP_0 << voice_offset) & 0x38;
}
/* kill voice on channel */
vp = &opl3->voices[voice];
if (vp->state > 0) {
opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset);
reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT;
opl3->command(opl3, opl3_reg, reg_val);
}
if (instr_4op) {
vp2 = &opl3->voices[voice + 3];
if (vp->state > 0) {
opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK +
voice_offset + 3);
reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT;
opl3->command(opl3, opl3_reg, reg_val);
}
}
/* set connection register */
if (instr_4op) {
if ((opl3->connection_reg ^ connect_mask) & connect_mask) {
opl3->connection_reg |= connect_mask;
/* set connection bit */
opl3_reg = OPL3_RIGHT | OPL3_REG_CONNECTION_SELECT;
opl3->command(opl3, opl3_reg, opl3->connection_reg);
}
} else {
if ((opl3->connection_reg ^ ~connect_mask) & connect_mask) {
opl3->connection_reg &= ~connect_mask;
/* clear connection bit */
opl3_reg = OPL3_RIGHT | OPL3_REG_CONNECTION_SELECT;
opl3->command(opl3, opl3_reg, opl3->connection_reg);
}
}
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG " --> setting OPL3 connection: 0x%x\n",
opl3->connection_reg);
#endif
/*
* calculate volume depending on connection
* between FM operators (see include/opl3.h)
*/
for (i = 0; i < (instr_4op ? 4 : 2); i++)
vol_op[i] = fm->op[i].ksl_level;
connection = fm->feedback_connection[0] & 0x01;
if (instr_4op) {
connection <<= 1;
connection |= fm->feedback_connection[1] & 0x01;
snd_opl3_calc_volume(&vol_op[3], vel, chan);
switch (connection) {
case 0x03:
snd_opl3_calc_volume(&vol_op[2], vel, chan);
/* fallthru */
case 0x02:
snd_opl3_calc_volume(&vol_op[0], vel, chan);
break;
case 0x01:
snd_opl3_calc_volume(&vol_op[1], vel, chan);
}
} else {
snd_opl3_calc_volume(&vol_op[1], vel, chan);
if (connection)
snd_opl3_calc_volume(&vol_op[0], vel, chan);
}
/* Program the FM voice characteristics */
for (i = 0; i < (instr_4op ? 4 : 2); i++) {
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG " --> programming operator %i\n", i);
#endif
op_offset = snd_opl3_regmap[voice_offset][i];
/* Set OPL3 AM_VIB register of requested voice/operator */
reg_val = fm->op[i].am_vib;
opl3_reg = reg_side | (OPL3_REG_AM_VIB + op_offset);
opl3->command(opl3, opl3_reg, reg_val);
/* Set OPL3 KSL_LEVEL register of requested voice/operator */
reg_val = vol_op[i];
opl3_reg = reg_side | (OPL3_REG_KSL_LEVEL + op_offset);
opl3->command(opl3, opl3_reg, reg_val);
/* Set OPL3 ATTACK_DECAY register of requested voice/operator */
reg_val = fm->op[i].attack_decay;
opl3_reg = reg_side | (OPL3_REG_ATTACK_DECAY + op_offset);
opl3->command(opl3, opl3_reg, reg_val);
/* Set OPL3 SUSTAIN_RELEASE register of requested voice/operator */
reg_val = fm->op[i].sustain_release;
opl3_reg = reg_side | (OPL3_REG_SUSTAIN_RELEASE + op_offset);
opl3->command(opl3, opl3_reg, reg_val);
/* Select waveform */
reg_val = fm->op[i].wave_select;
opl3_reg = reg_side | (OPL3_REG_WAVE_SELECT + op_offset);
opl3->command(opl3, opl3_reg, reg_val);
}
/* Set operator feedback and 2op inter-operator connection */
reg_val = fm->feedback_connection[0];
/* Set output voice connection */
reg_val |= OPL3_STEREO_BITS;
if (chan->gm_pan < 43)
reg_val &= ~OPL3_VOICE_TO_RIGHT;
if (chan->gm_pan > 85)
reg_val &= ~OPL3_VOICE_TO_LEFT;
opl3_reg = reg_side | (OPL3_REG_FEEDBACK_CONNECTION + voice_offset);
opl3->command(opl3, opl3_reg, reg_val);
if (instr_4op) {
/* Set 4op inter-operator connection */
reg_val = fm->feedback_connection[1] & OPL3_CONNECTION_BIT;
/* Set output voice connection */
reg_val |= OPL3_STEREO_BITS;
if (chan->gm_pan < 43)
reg_val &= ~OPL3_VOICE_TO_RIGHT;
if (chan->gm_pan > 85)
reg_val &= ~OPL3_VOICE_TO_LEFT;
opl3_reg = reg_side | (OPL3_REG_FEEDBACK_CONNECTION +
voice_offset + 3);
opl3->command(opl3, opl3_reg, reg_val);
}
/*
* Special treatment of percussion notes for fm:
* Requested pitch is really program, and pitch for
* device is whatever was specified in the patch library.
*/
if (fm->fix_key)
note = fm->fix_key;
/*
* use transpose if defined in patch library
*/
if (fm->trnsps)
note += (fm->trnsps - 64);
snd_opl3_calc_pitch(&fnum, &blocknum, note, chan);
/* Set OPL3 FNUM_LOW register of requested voice */
opl3_reg = reg_side | (OPL3_REG_FNUM_LOW + voice_offset);
opl3->command(opl3, opl3_reg, fnum);
opl3->voices[voice].keyon_reg = blocknum;
/* Set output sound flag */
blocknum |= OPL3_KEYON_BIT;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG " --> trigger voice %i\n", voice);
#endif
/* Set OPL3 KEYON_BLOCK register of requested voice */
opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset);
opl3->command(opl3, opl3_reg, blocknum);
/* kill note after fixed duration (in centiseconds) */
if (fm->fix_dur) {
opl3->voices[voice].note_off = jiffies +
(fm->fix_dur * HZ) / 100;
snd_opl3_start_timer(opl3);
opl3->voices[voice].note_off_check = 1;
} else
opl3->voices[voice].note_off_check = 0;
/* get extra pgm, but avoid possible loops */
extra_prg = (extra_prg) ? 0 : fm->modes;
/* do the bookkeeping */
vp->time = opl3->use_time++;
vp->note = key;
vp->chan = chan;
if (instr_4op) {
vp->state = SNDRV_OPL3_ST_ON_4OP;
vp2 = &opl3->voices[voice + 3];
vp2->time = opl3->use_time++;
vp2->note = key;
vp2->chan = chan;
vp2->state = SNDRV_OPL3_ST_NOT_AVAIL;
} else {
if (vp->state == SNDRV_OPL3_ST_ON_4OP) {
/* 4op killed by 2op, release bounded voice */
vp2 = &opl3->voices[voice + 3];
vp2->time = opl3->use_time++;
vp2->state = SNDRV_OPL3_ST_OFF;
}
vp->state = SNDRV_OPL3_ST_ON_2OP;
}
#ifdef DEBUG_ALLOC
debug_alloc(opl3, "note on ", voice);
#endif
/* allocate extra program if specified in patch library */
if (extra_prg) {
if (extra_prg > 128) {
bank = 128;
/* percussions start at 35 */
prg = extra_prg - 128 + 35 - 1;
} else {
bank = 0;
prg = extra_prg - 1;
}
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG " *** allocating extra program\n");
#endif
goto __extra_prg;
}
spin_unlock_irqrestore(&opl3->voice_lock, flags);
}
static void snd_opl3_kill_voice(struct snd_opl3 *opl3, int voice)
{
unsigned short reg_side;
unsigned char voice_offset;
unsigned short opl3_reg;
struct snd_opl3_voice *vp, *vp2;
if (snd_BUG_ON(voice >= MAX_OPL3_VOICES))
return;
vp = &opl3->voices[voice];
if (voice < MAX_OPL2_VOICES) {
/* Left register block for voices 0 .. 8 */
reg_side = OPL3_LEFT;
voice_offset = voice;
} else {
/* Right register block for voices 9 .. 17 */
reg_side = OPL3_RIGHT;
voice_offset = voice - MAX_OPL2_VOICES;
}
/* kill voice */
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG " --> kill voice %i\n", voice);
#endif
opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset);
/* clear Key ON bit */
opl3->command(opl3, opl3_reg, vp->keyon_reg);
/* do the bookkeeping */
vp->time = opl3->use_time++;
if (vp->state == SNDRV_OPL3_ST_ON_4OP) {
vp2 = &opl3->voices[voice + 3];
vp2->time = opl3->use_time++;
vp2->state = SNDRV_OPL3_ST_OFF;
}
vp->state = SNDRV_OPL3_ST_OFF;
#ifdef DEBUG_ALLOC
debug_alloc(opl3, "note off", voice);
#endif
}
/*
* Release a note in response to a midi note off.
*/
static void snd_opl3_note_off_unsafe(void *p, int note, int vel,
struct snd_midi_channel *chan)
{
struct snd_opl3 *opl3;
int voice;
struct snd_opl3_voice *vp;
opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "Note off, ch %i, inst %i, note %i\n",
chan->number, chan->midi_program, note);
#endif
if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) {
if (chan->drum_channel && use_internal_drums) {
snd_opl3_drum_switch(opl3, note, vel, 0, chan);
return;
}
/* this loop will hopefully kill all extra voices, because
they are grouped by the same channel and note values */
for (voice = 0; voice < opl3->max_voices; voice++) {
vp = &opl3->voices[voice];
if (vp->state > 0 && vp->chan == chan && vp->note == note) {
snd_opl3_kill_voice(opl3, voice);
}
}
} else {
/* remap OSS voices */
if (chan->number < MAX_OPL3_VOICES) {
voice = snd_opl3_oss_map[chan->number];
snd_opl3_kill_voice(opl3, voice);
}
}
}
void snd_opl3_note_off(void *p, int note, int vel,
struct snd_midi_channel *chan)
{
struct snd_opl3 *opl3 = p;
unsigned long flags;
spin_lock_irqsave(&opl3->voice_lock, flags);
snd_opl3_note_off_unsafe(p, note, vel, chan);
spin_unlock_irqrestore(&opl3->voice_lock, flags);
}
/*
* key pressure change
*/
void snd_opl3_key_press(void *p, int note, int vel, struct snd_midi_channel *chan)
{
struct snd_opl3 *opl3;
opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "Key pressure, ch#: %i, inst#: %i\n",
chan->number, chan->midi_program);
#endif
}
/*
* terminate note
*/
void snd_opl3_terminate_note(void *p, int note, struct snd_midi_channel *chan)
{
struct snd_opl3 *opl3;
opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "Terminate note, ch#: %i, inst#: %i\n",
chan->number, chan->midi_program);
#endif
}
static void snd_opl3_update_pitch(struct snd_opl3 *opl3, int voice)
{
unsigned short reg_side;
unsigned char voice_offset;
unsigned short opl3_reg;
unsigned char fnum, blocknum;
struct snd_opl3_voice *vp;
if (snd_BUG_ON(voice >= MAX_OPL3_VOICES))
return;
vp = &opl3->voices[voice];
if (vp->chan == NULL)
return; /* not allocated? */
if (voice < MAX_OPL2_VOICES) {
/* Left register block for voices 0 .. 8 */
reg_side = OPL3_LEFT;
voice_offset = voice;
} else {
/* Right register block for voices 9 .. 17 */
reg_side = OPL3_RIGHT;
voice_offset = voice - MAX_OPL2_VOICES;
}
snd_opl3_calc_pitch(&fnum, &blocknum, vp->note, vp->chan);
/* Set OPL3 FNUM_LOW register of requested voice */
opl3_reg = reg_side | (OPL3_REG_FNUM_LOW + voice_offset);
opl3->command(opl3, opl3_reg, fnum);
vp->keyon_reg = blocknum;
/* Set output sound flag */
blocknum |= OPL3_KEYON_BIT;
/* Set OPL3 KEYON_BLOCK register of requested voice */
opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset);
opl3->command(opl3, opl3_reg, blocknum);
vp->time = opl3->use_time++;
}
/*
* Update voice pitch controller
*/
static void snd_opl3_pitch_ctrl(struct snd_opl3 *opl3, struct snd_midi_channel *chan)
{
int voice;
struct snd_opl3_voice *vp;
unsigned long flags;
spin_lock_irqsave(&opl3->voice_lock, flags);
if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) {
for (voice = 0; voice < opl3->max_voices; voice++) {
vp = &opl3->voices[voice];
if (vp->state > 0 && vp->chan == chan) {
snd_opl3_update_pitch(opl3, voice);
}
}
} else {
/* remap OSS voices */
if (chan->number < MAX_OPL3_VOICES) {
voice = snd_opl3_oss_map[chan->number];
snd_opl3_update_pitch(opl3, voice);
}
}
spin_unlock_irqrestore(&opl3->voice_lock, flags);
}
/*
* Deal with a controller type event. This includes all types of
* control events, not just the midi controllers
*/
void snd_opl3_control(void *p, int type, struct snd_midi_channel *chan)
{
struct snd_opl3 *opl3;
opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "Controller, TYPE = %i, ch#: %i, inst#: %i\n",
type, chan->number, chan->midi_program);
#endif
switch (type) {
case MIDI_CTL_MSB_MODWHEEL:
if (chan->control[MIDI_CTL_MSB_MODWHEEL] > 63)
opl3->drum_reg |= OPL3_VIBRATO_DEPTH;
else
opl3->drum_reg &= ~OPL3_VIBRATO_DEPTH;
opl3->command(opl3, OPL3_LEFT | OPL3_REG_PERCUSSION,
opl3->drum_reg);
break;
case MIDI_CTL_E2_TREMOLO_DEPTH:
if (chan->control[MIDI_CTL_E2_TREMOLO_DEPTH] > 63)
opl3->drum_reg |= OPL3_TREMOLO_DEPTH;
else
opl3->drum_reg &= ~OPL3_TREMOLO_DEPTH;
opl3->command(opl3, OPL3_LEFT | OPL3_REG_PERCUSSION,
opl3->drum_reg);
break;
case MIDI_CTL_PITCHBEND:
snd_opl3_pitch_ctrl(opl3, chan);
break;
}
}
/*
* NRPN events
*/
void snd_opl3_nrpn(void *p, struct snd_midi_channel *chan,
struct snd_midi_channel_set *chset)
{
struct snd_opl3 *opl3;
opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "NRPN, ch#: %i, inst#: %i\n",
chan->number, chan->midi_program);
#endif
}
/*
* receive sysex
*/
void snd_opl3_sysex(void *p, unsigned char *buf, int len,
int parsed, struct snd_midi_channel_set *chset)
{
struct snd_opl3 *opl3;
opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "SYSEX\n");
#endif
}
| gpl-2.0 |
playfulgod/kernel_lge_dory | drivers/gpu/drm/ast/ast_post.c | 2696 | 46150 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include <drm/drmP.h>
#include "ast_drv.h"
#include "ast_dram_tables.h"
static void ast_init_dram_2300(struct drm_device *dev);
static void
ast_enable_vga(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
ast_io_write8(ast, 0x43, 0x01);
ast_io_write8(ast, 0x42, 0x01);
}
#if 0 /* will use later */
static bool
ast_is_vga_enabled(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
u8 ch;
if (ast->chip == AST1180) {
/* TODO 1180 */
} else {
ch = ast_io_read8(ast, 0x43);
if (ch) {
ast_open_key(ast);
ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
return ch & 0x04;
}
}
return 0;
}
#endif
static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff };
static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
static void
ast_set_def_ext_reg(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
u8 i, index, reg;
const u8 *ext_reg_info;
/* reset scratch */
for (i = 0x81; i <= 0x8f; i++)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
if (ast->chip == AST2300) {
if (dev->pdev->revision >= 0x20)
ext_reg_info = extreginfo_ast2300;
else
ext_reg_info = extreginfo_ast2300a0;
} else
ext_reg_info = extreginfo;
index = 0xa0;
while (*ext_reg_info != 0xff) {
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, index, 0x00, *ext_reg_info);
index++;
ext_reg_info++;
}
/* disable standard IO/MEM decode if secondary */
/* ast_set_index_reg-mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x3); */
/* Set Ext. Default */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x8c, 0x00, 0x01);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x00, 0x00);
/* Enable RAMDAC for A1 */
reg = 0x04;
if (ast->chip == AST2300)
reg |= 0x20;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
}
static inline u32 mindwm(struct ast_private *ast, u32 r)
{
ast_write32(ast, 0xf004, r & 0xffff0000);
ast_write32(ast, 0xf000, 0x1);
return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
}
static inline void moutdwm(struct ast_private *ast, u32 r, u32 v)
{
ast_write32(ast, 0xf004, r & 0xffff0000);
ast_write32(ast, 0xf000, 0x1);
ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
}
/*
* AST2100/2150 DLL CBR Setting
*/
#define CBR_SIZE_AST2150 ((16 << 10) - 1)
#define CBR_PASSNUM_AST2150 5
#define CBR_THRESHOLD_AST2150 10
#define CBR_THRESHOLD2_AST2150 10
#define TIMEOUT_AST2150 5000000
#define CBR_PATNUM_AST2150 8
static const u32 pattern_AST2150[14] = {
0xFF00FF00,
0xCC33CC33,
0xAA55AA55,
0xFFFE0001,
0x683501FE,
0x0F1929B0,
0x2D0B4346,
0x60767F02,
0x6FBE36A6,
0x3A253035,
0x3019686D,
0x41C6167E,
0x620152BF,
0x20F050E0
};
static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
{
u32 data, timeout;
moutdwm(ast, 0x1e6e0070, 0x00000000);
moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
timeout = 0;
do {
data = mindwm(ast, 0x1e6e0070) & 0x40;
if (++timeout > TIMEOUT_AST2150) {
moutdwm(ast, 0x1e6e0070, 0x00000000);
return 0xffffffff;
}
} while (!data);
moutdwm(ast, 0x1e6e0070, 0x00000000);
moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
timeout = 0;
do {
data = mindwm(ast, 0x1e6e0070) & 0x40;
if (++timeout > TIMEOUT_AST2150) {
moutdwm(ast, 0x1e6e0070, 0x00000000);
return 0xffffffff;
}
} while (!data);
data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
moutdwm(ast, 0x1e6e0070, 0x00000000);
return data;
}
#if 0 /* unused in DDX driver - here for completeness */
static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
{
u32 data, timeout;
moutdwm(ast, 0x1e6e0070, 0x00000000);
moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
timeout = 0;
do {
data = mindwm(ast, 0x1e6e0070) & 0x40;
if (++timeout > TIMEOUT_AST2150) {
moutdwm(ast, 0x1e6e0070, 0x00000000);
return 0xffffffff;
}
} while (!data);
data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
moutdwm(ast, 0x1e6e0070, 0x00000000);
return data;
}
#endif
static int cbrtest_ast2150(struct ast_private *ast)
{
int i;
for (i = 0; i < 8; i++)
if (mmctestburst2_ast2150(ast, i))
return 0;
return 1;
}
static int cbrscan_ast2150(struct ast_private *ast, int busw)
{
u32 patcnt, loop;
for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
if (cbrtest_ast2150(ast))
break;
}
if (loop == CBR_PASSNUM_AST2150)
return 0;
}
return 1;
}
static void cbrdlli_ast2150(struct ast_private *ast, int busw)
{
u32 dll_min[4], dll_max[4], dlli, data, passcnt;
cbr_start:
dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff;
dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0;
passcnt = 0;
for (dlli = 0; dlli < 100; dlli++) {
moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
data = cbrscan_ast2150(ast, busw);
if (data != 0) {
if (data & 0x1) {
if (dll_min[0] > dlli)
dll_min[0] = dlli;
if (dll_max[0] < dlli)
dll_max[0] = dlli;
}
passcnt++;
} else if (passcnt >= CBR_THRESHOLD_AST2150)
goto cbr_start;
}
if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150)
goto cbr_start;
dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
}
static void ast_init_dram_reg(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
u8 j;
u32 data, temp, i;
const struct ast_dramstruct *dram_reg_info;
j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
if ((j & 0x80) == 0) { /* VGA only */
if (ast->chip == AST2000) {
dram_reg_info = ast2000_dram_table_data;
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
ast_write32(ast, 0x10100, 0xa8);
do {
;
} while (ast_read32(ast, 0x10100) != 0xa8);
} else {/* AST2100/1100 */
if (ast->chip == AST2100 || ast->chip == 2200)
dram_reg_info = ast2100_dram_table_data;
else
dram_reg_info = ast1100_dram_table_data;
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
ast_write32(ast, 0x12000, 0x1688A8A8);
do {
;
} while (ast_read32(ast, 0x12000) != 0x01);
ast_write32(ast, 0x10000, 0xfc600309);
do {
;
} while (ast_read32(ast, 0x10000) != 0x01);
}
while (dram_reg_info->index != 0xffff) {
if (dram_reg_info->index == 0xff00) {/* delay fn */
for (i = 0; i < 15; i++)
udelay(dram_reg_info->data);
} else if (dram_reg_info->index == 0x4 && ast->chip != AST2000) {
data = dram_reg_info->data;
if (ast->dram_type == AST_DRAM_1Gx16)
data = 0x00000d89;
else if (ast->dram_type == AST_DRAM_1Gx32)
data = 0x00000c8d;
temp = ast_read32(ast, 0x12070);
temp &= 0xc;
temp <<= 2;
ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
} else
ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data);
dram_reg_info++;
}
/* AST 2100/2150 DRAM calibration */
data = ast_read32(ast, 0x10120);
if (data == 0x5061) { /* 266Mhz */
data = ast_read32(ast, 0x10004);
if (data & 0x40)
cbrdlli_ast2150(ast, 16); /* 16 bits */
else
cbrdlli_ast2150(ast, 32); /* 32 bits */
}
switch (ast->chip) {
case AST2000:
temp = ast_read32(ast, 0x10140);
ast_write32(ast, 0x10140, temp | 0x40);
break;
case AST1100:
case AST2100:
case AST2200:
case AST2150:
temp = ast_read32(ast, 0x1200c);
ast_write32(ast, 0x1200c, temp & 0xfffffffd);
temp = ast_read32(ast, 0x12040);
ast_write32(ast, 0x12040, temp | 0x40);
break;
default:
break;
}
}
/* wait ready */
do {
j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
} while ((j & 0x40) == 0);
}
void ast_post_gpu(struct drm_device *dev)
{
u32 reg;
struct ast_private *ast = dev->dev_private;
pci_read_config_dword(ast->dev->pdev, 0x04, ®);
reg |= 0x3;
pci_write_config_dword(ast->dev->pdev, 0x04, reg);
ast_enable_vga(dev);
ast_open_key(ast);
ast_set_def_ext_reg(dev);
if (ast->chip == AST2300)
ast_init_dram_2300(dev);
else
ast_init_dram_reg(dev);
}
/* AST 2300 DRAM settings */
#define AST_DDR3 0
#define AST_DDR2 1
struct ast2300_dram_param {
u32 dram_type;
u32 dram_chipid;
u32 dram_freq;
u32 vram_size;
u32 odt;
u32 wodt;
u32 rodt;
u32 dram_config;
u32 reg_PERIOD;
u32 reg_MADJ;
u32 reg_SADJ;
u32 reg_MRS;
u32 reg_EMRS;
u32 reg_AC1;
u32 reg_AC2;
u32 reg_DQSIC;
u32 reg_DRV;
u32 reg_IOZ;
u32 reg_DQIDLY;
u32 reg_FREQ;
u32 madj_max;
u32 dll2_finetune_step;
};
/*
* DQSI DLL CBR Setting
*/
#define CBR_SIZE1 ((4 << 10) - 1)
#define CBR_SIZE2 ((64 << 10) - 1)
#define CBR_PASSNUM 5
#define CBR_PASSNUM2 5
#define CBR_THRESHOLD 10
#define CBR_THRESHOLD2 10
#define TIMEOUT 5000000
#define CBR_PATNUM 8
static const u32 pattern[8] = {
0xFF00FF00,
0xCC33CC33,
0xAA55AA55,
0x88778877,
0x92CC4D6E,
0x543D3CDE,
0xF1E843C7,
0x7C61D253
};
#if 0 /* unused in DDX, included for completeness */
static int mmc_test_burst(struct ast_private *ast, u32 datagen)
{
u32 data, timeout;
moutdwm(ast, 0x1e6e0070, 0x00000000);
moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3));
timeout = 0;
do {
data = mindwm(ast, 0x1e6e0070) & 0x3000;
if (data & 0x2000) {
return 0;
}
if (++timeout > TIMEOUT) {
moutdwm(ast, 0x1e6e0070, 0x00000000);
return 0;
}
} while (!data);
moutdwm(ast, 0x1e6e0070, 0x00000000);
return 1;
}
#endif
static int mmc_test_burst2(struct ast_private *ast, u32 datagen)
{
u32 data, timeout;
moutdwm(ast, 0x1e6e0070, 0x00000000);
moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3));
timeout = 0;
do {
data = mindwm(ast, 0x1e6e0070) & 0x1000;
if (++timeout > TIMEOUT) {
moutdwm(ast, 0x1e6e0070, 0x0);
return -1;
}
} while (!data);
data = mindwm(ast, 0x1e6e0078);
data = (data | (data >> 16)) & 0xffff;
moutdwm(ast, 0x1e6e0070, 0x0);
return data;
}
#if 0 /* Unused in DDX here for completeness */
static int mmc_test_single(struct ast_private *ast, u32 datagen)
{
u32 data, timeout;
moutdwm(ast, 0x1e6e0070, 0x00000000);
moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3));
timeout = 0;
do {
data = mindwm(ast, 0x1e6e0070) & 0x3000;
if (data & 0x2000)
return 0;
if (++timeout > TIMEOUT) {
moutdwm(ast, 0x1e6e0070, 0x0);
return 0;
}
} while (!data);
moutdwm(ast, 0x1e6e0070, 0x0);
return 1;
}
#endif
static int mmc_test_single2(struct ast_private *ast, u32 datagen)
{
u32 data, timeout;
moutdwm(ast, 0x1e6e0070, 0x00000000);
moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
timeout = 0;
do {
data = mindwm(ast, 0x1e6e0070) & 0x1000;
if (++timeout > TIMEOUT) {
moutdwm(ast, 0x1e6e0070, 0x0);
return -1;
}
} while (!data);
data = mindwm(ast, 0x1e6e0078);
data = (data | (data >> 16)) & 0xffff;
moutdwm(ast, 0x1e6e0070, 0x0);
return data;
}
static int cbr_test(struct ast_private *ast)
{
u32 data;
int i;
data = mmc_test_single2(ast, 0);
if ((data & 0xff) && (data & 0xff00))
return 0;
for (i = 0; i < 8; i++) {
data = mmc_test_burst2(ast, i);
if ((data & 0xff) && (data & 0xff00))
return 0;
}
if (!data)
return 3;
else if (data & 0xff)
return 2;
return 1;
}
static int cbr_scan(struct ast_private *ast)
{
u32 data, data2, patcnt, loop;
data2 = 3;
for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
for (loop = 0; loop < CBR_PASSNUM2; loop++) {
if ((data = cbr_test(ast)) != 0) {
data2 &= data;
if (!data2)
return 0;
break;
}
}
if (loop == CBR_PASSNUM2)
return 0;
}
return data2;
}
static u32 cbr_test2(struct ast_private *ast)
{
u32 data;
data = mmc_test_burst2(ast, 0);
if (data == 0xffff)
return 0;
data |= mmc_test_single2(ast, 0);
if (data == 0xffff)
return 0;
return ~data & 0xffff;
}
static u32 cbr_scan2(struct ast_private *ast)
{
u32 data, data2, patcnt, loop;
data2 = 0xffff;
for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
for (loop = 0; loop < CBR_PASSNUM2; loop++) {
if ((data = cbr_test2(ast)) != 0) {
data2 &= data;
if (!data)
return 0;
break;
}
}
if (loop == CBR_PASSNUM2)
return 0;
}
return data2;
}
#if 0 /* unused in DDX - added for completeness */
static void finetuneDQI(struct ast_private *ast, struct ast2300_dram_param *param)
{
u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
gold_sadj[0] = (mindwm(ast, 0x1E6E0024) >> 16) & 0xffff;
gold_sadj[1] = gold_sadj[0] >> 8;
gold_sadj[0] = gold_sadj[0] & 0xff;
gold_sadj[0] = (gold_sadj[0] + gold_sadj[1]) >> 1;
gold_sadj[1] = gold_sadj[0];
for (cnt = 0; cnt < 16; cnt++) {
dllmin[cnt] = 0xff;
dllmax[cnt] = 0x0;
}
passcnt = 0;
for (dlli = 0; dlli < 76; dlli++) {
moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
/* Wait DQSI latch phase calibration */
moutdwm(ast, 0x1E6E0074, 0x00000010);
moutdwm(ast, 0x1E6E0070, 0x00000003);
do {
data = mindwm(ast, 0x1E6E0070);
} while (!(data & 0x00001000));
moutdwm(ast, 0x1E6E0070, 0x00000000);
moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
data = cbr_scan2(ast);
if (data != 0) {
mask = 0x00010001;
for (cnt = 0; cnt < 16; cnt++) {
if (data & mask) {
if (dllmin[cnt] > dlli) {
dllmin[cnt] = dlli;
}
if (dllmax[cnt] < dlli) {
dllmax[cnt] = dlli;
}
}
mask <<= 1;
}
passcnt++;
} else if (passcnt >= CBR_THRESHOLD) {
break;
}
}
data = 0;
for (cnt = 0; cnt < 8; cnt++) {
data >>= 3;
if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
if (gold_sadj[0] >= dlli) {
dlli = (gold_sadj[0] - dlli) >> 1;
if (dlli > 3) {
dlli = 3;
}
} else {
dlli = (dlli - gold_sadj[0]) >> 1;
if (dlli > 4) {
dlli = 4;
}
dlli = (8 - dlli) & 0x7;
}
data |= dlli << 21;
}
}
moutdwm(ast, 0x1E6E0080, data);
data = 0;
for (cnt = 8; cnt < 16; cnt++) {
data >>= 3;
if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
if (gold_sadj[1] >= dlli) {
dlli = (gold_sadj[1] - dlli) >> 1;
if (dlli > 3) {
dlli = 3;
} else {
dlli = (dlli - 1) & 0x7;
}
} else {
dlli = (dlli - gold_sadj[1]) >> 1;
dlli += 1;
if (dlli > 4) {
dlli = 4;
}
dlli = (8 - dlli) & 0x7;
}
data |= dlli << 21;
}
}
moutdwm(ast, 0x1E6E0084, data);
} /* finetuneDQI */
#endif
static void finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
{
u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
FINETUNE_START:
for (cnt = 0; cnt < 16; cnt++) {
dllmin[cnt] = 0xff;
dllmax[cnt] = 0x0;
}
passcnt = 0;
for (dlli = 0; dlli < 76; dlli++) {
moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
/* Wait DQSI latch phase calibration */
moutdwm(ast, 0x1E6E0074, 0x00000010);
moutdwm(ast, 0x1E6E0070, 0x00000003);
do {
data = mindwm(ast, 0x1E6E0070);
} while (!(data & 0x00001000));
moutdwm(ast, 0x1E6E0070, 0x00000000);
moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
data = cbr_scan2(ast);
if (data != 0) {
mask = 0x00010001;
for (cnt = 0; cnt < 16; cnt++) {
if (data & mask) {
if (dllmin[cnt] > dlli) {
dllmin[cnt] = dlli;
}
if (dllmax[cnt] < dlli) {
dllmax[cnt] = dlli;
}
}
mask <<= 1;
}
passcnt++;
} else if (passcnt >= CBR_THRESHOLD2) {
break;
}
}
gold_sadj[0] = 0x0;
passcnt = 0;
for (cnt = 0; cnt < 16; cnt++) {
if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
gold_sadj[0] += dllmin[cnt];
passcnt++;
}
}
if (passcnt != 16) {
goto FINETUNE_START;
}
gold_sadj[0] = gold_sadj[0] >> 4;
gold_sadj[1] = gold_sadj[0];
data = 0;
for (cnt = 0; cnt < 8; cnt++) {
data >>= 3;
if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
dlli = dllmin[cnt];
if (gold_sadj[0] >= dlli) {
dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
if (dlli > 3) {
dlli = 3;
}
} else {
dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
if (dlli > 4) {
dlli = 4;
}
dlli = (8 - dlli) & 0x7;
}
data |= dlli << 21;
}
}
moutdwm(ast, 0x1E6E0080, data);
data = 0;
for (cnt = 8; cnt < 16; cnt++) {
data >>= 3;
if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
dlli = dllmin[cnt];
if (gold_sadj[1] >= dlli) {
dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
if (dlli > 3) {
dlli = 3;
} else {
dlli = (dlli - 1) & 0x7;
}
} else {
dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
dlli += 1;
if (dlli > 4) {
dlli = 4;
}
dlli = (8 - dlli) & 0x7;
}
data |= dlli << 21;
}
}
moutdwm(ast, 0x1E6E0084, data);
} /* finetuneDQI_L */
static void finetuneDQI_L2(struct ast_private *ast, struct ast2300_dram_param *param)
{
u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, data2;
for (cnt = 0; cnt < 16; cnt++) {
dllmin[cnt] = 0xff;
dllmax[cnt] = 0x0;
}
passcnt = 0;
for (dlli = 0; dlli < 76; dlli++) {
moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
/* Wait DQSI latch phase calibration */
moutdwm(ast, 0x1E6E0074, 0x00000010);
moutdwm(ast, 0x1E6E0070, 0x00000003);
do {
data = mindwm(ast, 0x1E6E0070);
} while (!(data & 0x00001000));
moutdwm(ast, 0x1E6E0070, 0x00000000);
moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
data = cbr_scan2(ast);
if (data != 0) {
mask = 0x00010001;
for (cnt = 0; cnt < 16; cnt++) {
if (data & mask) {
if (dllmin[cnt] > dlli) {
dllmin[cnt] = dlli;
}
if (dllmax[cnt] < dlli) {
dllmax[cnt] = dlli;
}
}
mask <<= 1;
}
passcnt++;
} else if (passcnt >= CBR_THRESHOLD2) {
break;
}
}
gold_sadj[0] = 0x0;
gold_sadj[1] = 0xFF;
for (cnt = 0; cnt < 8; cnt++) {
if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
if (gold_sadj[0] < dllmin[cnt]) {
gold_sadj[0] = dllmin[cnt];
}
if (gold_sadj[1] > dllmax[cnt]) {
gold_sadj[1] = dllmax[cnt];
}
}
}
gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
gold_sadj[1] = mindwm(ast, 0x1E6E0080);
data = 0;
for (cnt = 0; cnt < 8; cnt++) {
data >>= 3;
data2 = gold_sadj[1] & 0x7;
gold_sadj[1] >>= 3;
if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
if (gold_sadj[0] >= dlli) {
dlli = (gold_sadj[0] - dlli) >> 1;
if (dlli > 0) {
dlli = 1;
}
if (data2 != 3) {
data2 = (data2 + dlli) & 0x7;
}
} else {
dlli = (dlli - gold_sadj[0]) >> 1;
if (dlli > 0) {
dlli = 1;
}
if (data2 != 4) {
data2 = (data2 - dlli) & 0x7;
}
}
}
data |= data2 << 21;
}
moutdwm(ast, 0x1E6E0080, data);
gold_sadj[0] = 0x0;
gold_sadj[1] = 0xFF;
for (cnt = 8; cnt < 16; cnt++) {
if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
if (gold_sadj[0] < dllmin[cnt]) {
gold_sadj[0] = dllmin[cnt];
}
if (gold_sadj[1] > dllmax[cnt]) {
gold_sadj[1] = dllmax[cnt];
}
}
}
gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
gold_sadj[1] = mindwm(ast, 0x1E6E0084);
data = 0;
for (cnt = 8; cnt < 16; cnt++) {
data >>= 3;
data2 = gold_sadj[1] & 0x7;
gold_sadj[1] >>= 3;
if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
if (gold_sadj[0] >= dlli) {
dlli = (gold_sadj[0] - dlli) >> 1;
if (dlli > 0) {
dlli = 1;
}
if (data2 != 3) {
data2 = (data2 + dlli) & 0x7;
}
} else {
dlli = (dlli - gold_sadj[0]) >> 1;
if (dlli > 0) {
dlli = 1;
}
if (data2 != 4) {
data2 = (data2 - dlli) & 0x7;
}
}
}
data |= data2 << 21;
}
moutdwm(ast, 0x1E6E0084, data);
} /* finetuneDQI_L2 */
static void cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
{
u32 dllmin[2], dllmax[2], dlli, data, data2, passcnt;
finetuneDQI_L(ast, param);
finetuneDQI_L2(ast, param);
CBR_START2:
dllmin[0] = dllmin[1] = 0xff;
dllmax[0] = dllmax[1] = 0x0;
passcnt = 0;
for (dlli = 0; dlli < 76; dlli++) {
moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
/* Wait DQSI latch phase calibration */
moutdwm(ast, 0x1E6E0074, 0x00000010);
moutdwm(ast, 0x1E6E0070, 0x00000003);
do {
data = mindwm(ast, 0x1E6E0070);
} while (!(data & 0x00001000));
moutdwm(ast, 0x1E6E0070, 0x00000000);
moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
data = cbr_scan(ast);
if (data != 0) {
if (data & 0x1) {
if (dllmin[0] > dlli) {
dllmin[0] = dlli;
}
if (dllmax[0] < dlli) {
dllmax[0] = dlli;
}
}
if (data & 0x2) {
if (dllmin[1] > dlli) {
dllmin[1] = dlli;
}
if (dllmax[1] < dlli) {
dllmax[1] = dlli;
}
}
passcnt++;
} else if (passcnt >= CBR_THRESHOLD) {
break;
}
}
if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
goto CBR_START2;
}
if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
goto CBR_START2;
}
dlli = (dllmin[1] + dllmax[1]) >> 1;
dlli <<= 8;
dlli += (dllmin[0] + dllmax[0]) >> 1;
moutdwm(ast, 0x1E6E0068, (mindwm(ast, 0x1E6E0068) & 0xFFFF) | (dlli << 16));
data = (mindwm(ast, 0x1E6E0080) >> 24) & 0x1F;
data2 = (mindwm(ast, 0x1E6E0018) & 0xff80ffff) | (data << 16);
moutdwm(ast, 0x1E6E0018, data2);
moutdwm(ast, 0x1E6E0024, 0x8001 | (data << 1) | (param->dll2_finetune_step << 8));
/* Wait DQSI latch phase calibration */
moutdwm(ast, 0x1E6E0074, 0x00000010);
moutdwm(ast, 0x1E6E0070, 0x00000003);
do {
data = mindwm(ast, 0x1E6E0070);
} while (!(data & 0x00001000));
moutdwm(ast, 0x1E6E0070, 0x00000000);
moutdwm(ast, 0x1E6E0070, 0x00000003);
do {
data = mindwm(ast, 0x1E6E0070);
} while (!(data & 0x00001000));
moutdwm(ast, 0x1E6E0070, 0x00000000);
} /* CBRDLL2 */
static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
{
u32 trap, trap_AC2, trap_MRS;
moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
/* Ger trap info */
trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
trap_AC2 = 0x00020000 + (trap << 16);
trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
trap_MRS = 0x00000010 + (trap << 4);
trap_MRS |= ((trap & 0x2) << 18);
param->reg_MADJ = 0x00034C4C;
param->reg_SADJ = 0x00001800;
param->reg_DRV = 0x000000F0;
param->reg_PERIOD = param->dram_freq;
param->rodt = 0;
switch (param->dram_freq) {
case 336:
moutdwm(ast, 0x1E6E2020, 0x0190);
param->wodt = 0;
param->reg_AC1 = 0x22202725;
param->reg_AC2 = 0xAA007613 | trap_AC2;
param->reg_DQSIC = 0x000000BA;
param->reg_MRS = 0x04001400 | trap_MRS;
param->reg_EMRS = 0x00000000;
param->reg_IOZ = 0x00000034;
param->reg_DQIDLY = 0x00000074;
param->reg_FREQ = 0x00004DC0;
param->madj_max = 96;
param->dll2_finetune_step = 3;
break;
default:
case 396:
moutdwm(ast, 0x1E6E2020, 0x03F1);
param->wodt = 1;
param->reg_AC1 = 0x33302825;
param->reg_AC2 = 0xCC009617 | trap_AC2;
param->reg_DQSIC = 0x000000E2;
param->reg_MRS = 0x04001600 | trap_MRS;
param->reg_EMRS = 0x00000000;
param->reg_IOZ = 0x00000034;
param->reg_DRV = 0x000000FA;
param->reg_DQIDLY = 0x00000089;
param->reg_FREQ = 0x000050C0;
param->madj_max = 96;
param->dll2_finetune_step = 4;
switch (param->dram_chipid) {
default:
case AST_DRAM_512Mx16:
case AST_DRAM_1Gx16:
param->reg_AC2 = 0xCC009617 | trap_AC2;
break;
case AST_DRAM_2Gx16:
param->reg_AC2 = 0xCC009622 | trap_AC2;
break;
case AST_DRAM_4Gx16:
param->reg_AC2 = 0xCC00963F | trap_AC2;
break;
}
break;
case 408:
moutdwm(ast, 0x1E6E2020, 0x01F0);
param->wodt = 1;
param->reg_AC1 = 0x33302825;
param->reg_AC2 = 0xCC009617 | trap_AC2;
param->reg_DQSIC = 0x000000E2;
param->reg_MRS = 0x04001600 | trap_MRS;
param->reg_EMRS = 0x00000000;
param->reg_IOZ = 0x00000034;
param->reg_DRV = 0x000000FA;
param->reg_DQIDLY = 0x00000089;
param->reg_FREQ = 0x000050C0;
param->madj_max = 96;
param->dll2_finetune_step = 4;
switch (param->dram_chipid) {
default:
case AST_DRAM_512Mx16:
case AST_DRAM_1Gx16:
param->reg_AC2 = 0xCC009617 | trap_AC2;
break;
case AST_DRAM_2Gx16:
param->reg_AC2 = 0xCC009622 | trap_AC2;
break;
case AST_DRAM_4Gx16:
param->reg_AC2 = 0xCC00963F | trap_AC2;
break;
}
break;
case 456:
moutdwm(ast, 0x1E6E2020, 0x0230);
param->wodt = 0;
param->reg_AC1 = 0x33302926;
param->reg_AC2 = 0xCD44961A;
param->reg_DQSIC = 0x000000FC;
param->reg_MRS = 0x00081830;
param->reg_EMRS = 0x00000000;
param->reg_IOZ = 0x00000045;
param->reg_DQIDLY = 0x00000097;
param->reg_FREQ = 0x000052C0;
param->madj_max = 88;
param->dll2_finetune_step = 4;
break;
case 504:
moutdwm(ast, 0x1E6E2020, 0x0270);
param->wodt = 1;
param->reg_AC1 = 0x33302926;
param->reg_AC2 = 0xDE44A61D;
param->reg_DQSIC = 0x00000117;
param->reg_MRS = 0x00081A30;
param->reg_EMRS = 0x00000000;
param->reg_IOZ = 0x070000BB;
param->reg_DQIDLY = 0x000000A0;
param->reg_FREQ = 0x000054C0;
param->madj_max = 79;
param->dll2_finetune_step = 4;
break;
case 528:
moutdwm(ast, 0x1E6E2020, 0x0290);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x33302926;
param->reg_AC2 = 0xEF44B61E;
param->reg_DQSIC = 0x00000125;
param->reg_MRS = 0x00081A30;
param->reg_EMRS = 0x00000040;
param->reg_DRV = 0x000000F5;
param->reg_IOZ = 0x00000023;
param->reg_DQIDLY = 0x00000088;
param->reg_FREQ = 0x000055C0;
param->madj_max = 76;
param->dll2_finetune_step = 3;
break;
case 576:
moutdwm(ast, 0x1E6E2020, 0x0140);
param->reg_MADJ = 0x00136868;
param->reg_SADJ = 0x00004534;
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x33302A37;
param->reg_AC2 = 0xEF56B61E;
param->reg_DQSIC = 0x0000013F;
param->reg_MRS = 0x00101A50;
param->reg_EMRS = 0x00000040;
param->reg_DRV = 0x000000FA;
param->reg_IOZ = 0x00000023;
param->reg_DQIDLY = 0x00000078;
param->reg_FREQ = 0x000057C0;
param->madj_max = 136;
param->dll2_finetune_step = 3;
break;
case 600:
moutdwm(ast, 0x1E6E2020, 0x02E1);
param->reg_MADJ = 0x00136868;
param->reg_SADJ = 0x00004534;
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x32302A37;
param->reg_AC2 = 0xDF56B61F;
param->reg_DQSIC = 0x0000014D;
param->reg_MRS = 0x00101A50;
param->reg_EMRS = 0x00000004;
param->reg_DRV = 0x000000F5;
param->reg_IOZ = 0x00000023;
param->reg_DQIDLY = 0x00000078;
param->reg_FREQ = 0x000058C0;
param->madj_max = 132;
param->dll2_finetune_step = 3;
break;
case 624:
moutdwm(ast, 0x1E6E2020, 0x0160);
param->reg_MADJ = 0x00136868;
param->reg_SADJ = 0x00004534;
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x32302A37;
param->reg_AC2 = 0xEF56B621;
param->reg_DQSIC = 0x0000015A;
param->reg_MRS = 0x02101A50;
param->reg_EMRS = 0x00000004;
param->reg_DRV = 0x000000F5;
param->reg_IOZ = 0x00000034;
param->reg_DQIDLY = 0x00000078;
param->reg_FREQ = 0x000059C0;
param->madj_max = 128;
param->dll2_finetune_step = 3;
break;
} /* switch freq */
switch (param->dram_chipid) {
case AST_DRAM_512Mx16:
param->dram_config = 0x130;
break;
default:
case AST_DRAM_1Gx16:
param->dram_config = 0x131;
break;
case AST_DRAM_2Gx16:
param->dram_config = 0x132;
break;
case AST_DRAM_4Gx16:
param->dram_config = 0x133;
break;
}; /* switch size */
switch (param->vram_size) {
default:
case AST_VIDMEM_SIZE_8M:
param->dram_config |= 0x00;
break;
case AST_VIDMEM_SIZE_16M:
param->dram_config |= 0x04;
break;
case AST_VIDMEM_SIZE_32M:
param->dram_config |= 0x08;
break;
case AST_VIDMEM_SIZE_64M:
param->dram_config |= 0x0c;
break;
}
}
static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
{
u32 data, data2;
moutdwm(ast, 0x1E6E0000, 0xFC600309);
moutdwm(ast, 0x1E6E0018, 0x00000100);
moutdwm(ast, 0x1E6E0024, 0x00000000);
moutdwm(ast, 0x1E6E0034, 0x00000000);
udelay(10);
moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
udelay(10);
moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
udelay(10);
moutdwm(ast, 0x1E6E0004, param->dram_config);
moutdwm(ast, 0x1E6E0008, 0x90040f);
moutdwm(ast, 0x1E6E0010, param->reg_AC1);
moutdwm(ast, 0x1E6E0014, param->reg_AC2);
moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
moutdwm(ast, 0x1E6E0080, 0x00000000);
moutdwm(ast, 0x1E6E0084, 0x00000000);
moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
moutdwm(ast, 0x1E6E0018, 0x4040A170);
moutdwm(ast, 0x1E6E0018, 0x20402370);
moutdwm(ast, 0x1E6E0038, 0x00000000);
moutdwm(ast, 0x1E6E0040, 0xFF444444);
moutdwm(ast, 0x1E6E0044, 0x22222222);
moutdwm(ast, 0x1E6E0048, 0x22222222);
moutdwm(ast, 0x1E6E004C, 0x00000002);
moutdwm(ast, 0x1E6E0050, 0x80000000);
moutdwm(ast, 0x1E6E0050, 0x00000000);
moutdwm(ast, 0x1E6E0054, 0);
moutdwm(ast, 0x1E6E0060, param->reg_DRV);
moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
moutdwm(ast, 0x1E6E0070, 0x00000000);
moutdwm(ast, 0x1E6E0074, 0x00000000);
moutdwm(ast, 0x1E6E0078, 0x00000000);
moutdwm(ast, 0x1E6E007C, 0x00000000);
/* Wait MCLK2X lock to MCLK */
do {
data = mindwm(ast, 0x1E6E001C);
} while (!(data & 0x08000000));
moutdwm(ast, 0x1E6E0034, 0x00000001);
moutdwm(ast, 0x1E6E000C, 0x00005C04);
udelay(10);
moutdwm(ast, 0x1E6E000C, 0x00000000);
moutdwm(ast, 0x1E6E0034, 0x00000000);
data = mindwm(ast, 0x1E6E001C);
data = (data >> 8) & 0xff;
while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
if ((data2 & 0xff) > param->madj_max) {
break;
}
moutdwm(ast, 0x1E6E0064, data2);
if (data2 & 0x00100000) {
data2 = ((data2 & 0xff) >> 3) + 3;
} else {
data2 = ((data2 & 0xff) >> 2) + 5;
}
data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
data2 += data & 0xff;
data = data | (data2 << 8);
moutdwm(ast, 0x1E6E0068, data);
udelay(10);
moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
udelay(10);
data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
moutdwm(ast, 0x1E6E0018, data);
data = data | 0x200;
moutdwm(ast, 0x1E6E0018, data);
do {
data = mindwm(ast, 0x1E6E001C);
} while (!(data & 0x08000000));
moutdwm(ast, 0x1E6E0034, 0x00000001);
moutdwm(ast, 0x1E6E000C, 0x00005C04);
udelay(10);
moutdwm(ast, 0x1E6E000C, 0x00000000);
moutdwm(ast, 0x1E6E0034, 0x00000000);
data = mindwm(ast, 0x1E6E001C);
data = (data >> 8) & 0xff;
}
data = mindwm(ast, 0x1E6E0018) | 0xC00;
moutdwm(ast, 0x1E6E0018, data);
moutdwm(ast, 0x1E6E0034, 0x00000001);
moutdwm(ast, 0x1E6E000C, 0x00000040);
udelay(50);
/* Mode Register Setting */
moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
moutdwm(ast, 0x1E6E0028, 0x00000005);
moutdwm(ast, 0x1E6E0028, 0x00000007);
moutdwm(ast, 0x1E6E0028, 0x00000003);
moutdwm(ast, 0x1E6E0028, 0x00000001);
moutdwm(ast, 0x1E6E002C, param->reg_MRS);
moutdwm(ast, 0x1E6E000C, 0x00005C08);
moutdwm(ast, 0x1E6E0028, 0x00000001);
moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
data = 0;
if (param->wodt) {
data = 0x300;
}
if (param->rodt) {
data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
}
moutdwm(ast, 0x1E6E0034, data | 0x3);
/* Wait DQI delay lock */
do {
data = mindwm(ast, 0x1E6E0080);
} while (!(data & 0x40000000));
/* Wait DQSI delay lock */
do {
data = mindwm(ast, 0x1E6E0020);
} while (!(data & 0x00000800));
/* Calibrate the DQSI delay */
cbr_dll2(ast, param);
moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
/* ECC Memory Initialization */
#ifdef ECC
moutdwm(ast, 0x1E6E007C, 0x00000000);
moutdwm(ast, 0x1E6E0070, 0x221);
do {
data = mindwm(ast, 0x1E6E0070);
} while (!(data & 0x00001000));
moutdwm(ast, 0x1E6E0070, 0x00000000);
moutdwm(ast, 0x1E6E0050, 0x80000000);
moutdwm(ast, 0x1E6E0050, 0x00000000);
#endif
}
static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param)
{
u32 trap, trap_AC2, trap_MRS;
moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
/* Ger trap info */
trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
trap_AC2 = (trap << 20) | (trap << 16);
trap_AC2 += 0x00110000;
trap_MRS = 0x00000040 | (trap << 4);
param->reg_MADJ = 0x00034C4C;
param->reg_SADJ = 0x00001800;
param->reg_DRV = 0x000000F0;
param->reg_PERIOD = param->dram_freq;
param->rodt = 0;
switch (param->dram_freq) {
case 264:
moutdwm(ast, 0x1E6E2020, 0x0130);
param->wodt = 0;
param->reg_AC1 = 0x11101513;
param->reg_AC2 = 0x78117011;
param->reg_DQSIC = 0x00000092;
param->reg_MRS = 0x00000842;
param->reg_EMRS = 0x00000000;
param->reg_DRV = 0x000000F0;
param->reg_IOZ = 0x00000034;
param->reg_DQIDLY = 0x0000005A;
param->reg_FREQ = 0x00004AC0;
param->madj_max = 138;
param->dll2_finetune_step = 3;
break;
case 336:
moutdwm(ast, 0x1E6E2020, 0x0190);
param->wodt = 1;
param->reg_AC1 = 0x22202613;
param->reg_AC2 = 0xAA009016 | trap_AC2;
param->reg_DQSIC = 0x000000BA;
param->reg_MRS = 0x00000A02 | trap_MRS;
param->reg_EMRS = 0x00000040;
param->reg_DRV = 0x000000FA;
param->reg_IOZ = 0x00000034;
param->reg_DQIDLY = 0x00000074;
param->reg_FREQ = 0x00004DC0;
param->madj_max = 96;
param->dll2_finetune_step = 3;
break;
default:
case 396:
moutdwm(ast, 0x1E6E2020, 0x03F1);
param->wodt = 1;
param->rodt = 0;
param->reg_AC1 = 0x33302714;
param->reg_AC2 = 0xCC00B01B | trap_AC2;
param->reg_DQSIC = 0x000000E2;
param->reg_MRS = 0x00000C02 | trap_MRS;
param->reg_EMRS = 0x00000040;
param->reg_DRV = 0x000000FA;
param->reg_IOZ = 0x00000034;
param->reg_DQIDLY = 0x00000089;
param->reg_FREQ = 0x000050C0;
param->madj_max = 96;
param->dll2_finetune_step = 4;
switch (param->dram_chipid) {
case AST_DRAM_512Mx16:
param->reg_AC2 = 0xCC00B016 | trap_AC2;
break;
default:
case AST_DRAM_1Gx16:
param->reg_AC2 = 0xCC00B01B | trap_AC2;
break;
case AST_DRAM_2Gx16:
param->reg_AC2 = 0xCC00B02B | trap_AC2;
break;
case AST_DRAM_4Gx16:
param->reg_AC2 = 0xCC00B03F | trap_AC2;
break;
}
break;
case 408:
moutdwm(ast, 0x1E6E2020, 0x01F0);
param->wodt = 1;
param->rodt = 0;
param->reg_AC1 = 0x33302714;
param->reg_AC2 = 0xCC00B01B | trap_AC2;
param->reg_DQSIC = 0x000000E2;
param->reg_MRS = 0x00000C02 | trap_MRS;
param->reg_EMRS = 0x00000040;
param->reg_DRV = 0x000000FA;
param->reg_IOZ = 0x00000034;
param->reg_DQIDLY = 0x00000089;
param->reg_FREQ = 0x000050C0;
param->madj_max = 96;
param->dll2_finetune_step = 4;
switch (param->dram_chipid) {
case AST_DRAM_512Mx16:
param->reg_AC2 = 0xCC00B016 | trap_AC2;
break;
default:
case AST_DRAM_1Gx16:
param->reg_AC2 = 0xCC00B01B | trap_AC2;
break;
case AST_DRAM_2Gx16:
param->reg_AC2 = 0xCC00B02B | trap_AC2;
break;
case AST_DRAM_4Gx16:
param->reg_AC2 = 0xCC00B03F | trap_AC2;
break;
}
break;
case 456:
moutdwm(ast, 0x1E6E2020, 0x0230);
param->wodt = 0;
param->reg_AC1 = 0x33302815;
param->reg_AC2 = 0xCD44B01E;
param->reg_DQSIC = 0x000000FC;
param->reg_MRS = 0x00000E72;
param->reg_EMRS = 0x00000000;
param->reg_DRV = 0x00000000;
param->reg_IOZ = 0x00000034;
param->reg_DQIDLY = 0x00000097;
param->reg_FREQ = 0x000052C0;
param->madj_max = 88;
param->dll2_finetune_step = 3;
break;
case 504:
moutdwm(ast, 0x1E6E2020, 0x0261);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x33302815;
param->reg_AC2 = 0xDE44C022;
param->reg_DQSIC = 0x00000117;
param->reg_MRS = 0x00000E72;
param->reg_EMRS = 0x00000040;
param->reg_DRV = 0x0000000A;
param->reg_IOZ = 0x00000045;
param->reg_DQIDLY = 0x000000A0;
param->reg_FREQ = 0x000054C0;
param->madj_max = 79;
param->dll2_finetune_step = 3;
break;
case 528:
moutdwm(ast, 0x1E6E2020, 0x0120);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x33302815;
param->reg_AC2 = 0xEF44D024;
param->reg_DQSIC = 0x00000125;
param->reg_MRS = 0x00000E72;
param->reg_EMRS = 0x00000004;
param->reg_DRV = 0x000000F9;
param->reg_IOZ = 0x00000045;
param->reg_DQIDLY = 0x000000A7;
param->reg_FREQ = 0x000055C0;
param->madj_max = 76;
param->dll2_finetune_step = 3;
break;
case 552:
moutdwm(ast, 0x1E6E2020, 0x02A1);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x43402915;
param->reg_AC2 = 0xFF44E025;
param->reg_DQSIC = 0x00000132;
param->reg_MRS = 0x00000E72;
param->reg_EMRS = 0x00000040;
param->reg_DRV = 0x0000000A;
param->reg_IOZ = 0x00000045;
param->reg_DQIDLY = 0x000000AD;
param->reg_FREQ = 0x000056C0;
param->madj_max = 76;
param->dll2_finetune_step = 3;
break;
case 576:
moutdwm(ast, 0x1E6E2020, 0x0140);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x43402915;
param->reg_AC2 = 0xFF44E027;
param->reg_DQSIC = 0x0000013F;
param->reg_MRS = 0x00000E72;
param->reg_EMRS = 0x00000004;
param->reg_DRV = 0x000000F5;
param->reg_IOZ = 0x00000045;
param->reg_DQIDLY = 0x000000B3;
param->reg_FREQ = 0x000057C0;
param->madj_max = 76;
param->dll2_finetune_step = 3;
break;
}
switch (param->dram_chipid) {
case AST_DRAM_512Mx16:
param->dram_config = 0x100;
break;
default:
case AST_DRAM_1Gx16:
param->dram_config = 0x121;
break;
case AST_DRAM_2Gx16:
param->dram_config = 0x122;
break;
case AST_DRAM_4Gx16:
param->dram_config = 0x123;
break;
}; /* switch size */
switch (param->vram_size) {
default:
case AST_VIDMEM_SIZE_8M:
param->dram_config |= 0x00;
break;
case AST_VIDMEM_SIZE_16M:
param->dram_config |= 0x04;
break;
case AST_VIDMEM_SIZE_32M:
param->dram_config |= 0x08;
break;
case AST_VIDMEM_SIZE_64M:
param->dram_config |= 0x0c;
break;
}
}
static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
{
u32 data, data2;
moutdwm(ast, 0x1E6E0000, 0xFC600309);
moutdwm(ast, 0x1E6E0018, 0x00000100);
moutdwm(ast, 0x1E6E0024, 0x00000000);
moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
udelay(10);
moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
udelay(10);
moutdwm(ast, 0x1E6E0004, param->dram_config);
moutdwm(ast, 0x1E6E0008, 0x90040f);
moutdwm(ast, 0x1E6E0010, param->reg_AC1);
moutdwm(ast, 0x1E6E0014, param->reg_AC2);
moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
moutdwm(ast, 0x1E6E0080, 0x00000000);
moutdwm(ast, 0x1E6E0084, 0x00000000);
moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
moutdwm(ast, 0x1E6E0018, 0x4040A130);
moutdwm(ast, 0x1E6E0018, 0x20402330);
moutdwm(ast, 0x1E6E0038, 0x00000000);
moutdwm(ast, 0x1E6E0040, 0xFF808000);
moutdwm(ast, 0x1E6E0044, 0x88848466);
moutdwm(ast, 0x1E6E0048, 0x44440008);
moutdwm(ast, 0x1E6E004C, 0x00000000);
moutdwm(ast, 0x1E6E0050, 0x80000000);
moutdwm(ast, 0x1E6E0050, 0x00000000);
moutdwm(ast, 0x1E6E0054, 0);
moutdwm(ast, 0x1E6E0060, param->reg_DRV);
moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
moutdwm(ast, 0x1E6E0070, 0x00000000);
moutdwm(ast, 0x1E6E0074, 0x00000000);
moutdwm(ast, 0x1E6E0078, 0x00000000);
moutdwm(ast, 0x1E6E007C, 0x00000000);
/* Wait MCLK2X lock to MCLK */
do {
data = mindwm(ast, 0x1E6E001C);
} while (!(data & 0x08000000));
moutdwm(ast, 0x1E6E0034, 0x00000001);
moutdwm(ast, 0x1E6E000C, 0x00005C04);
udelay(10);
moutdwm(ast, 0x1E6E000C, 0x00000000);
moutdwm(ast, 0x1E6E0034, 0x00000000);
data = mindwm(ast, 0x1E6E001C);
data = (data >> 8) & 0xff;
while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
if ((data2 & 0xff) > param->madj_max) {
break;
}
moutdwm(ast, 0x1E6E0064, data2);
if (data2 & 0x00100000) {
data2 = ((data2 & 0xff) >> 3) + 3;
} else {
data2 = ((data2 & 0xff) >> 2) + 5;
}
data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
data2 += data & 0xff;
data = data | (data2 << 8);
moutdwm(ast, 0x1E6E0068, data);
udelay(10);
moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
udelay(10);
data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
moutdwm(ast, 0x1E6E0018, data);
data = data | 0x200;
moutdwm(ast, 0x1E6E0018, data);
do {
data = mindwm(ast, 0x1E6E001C);
} while (!(data & 0x08000000));
moutdwm(ast, 0x1E6E0034, 0x00000001);
moutdwm(ast, 0x1E6E000C, 0x00005C04);
udelay(10);
moutdwm(ast, 0x1E6E000C, 0x00000000);
moutdwm(ast, 0x1E6E0034, 0x00000000);
data = mindwm(ast, 0x1E6E001C);
data = (data >> 8) & 0xff;
}
data = mindwm(ast, 0x1E6E0018) | 0xC00;
moutdwm(ast, 0x1E6E0018, data);
moutdwm(ast, 0x1E6E0034, 0x00000001);
moutdwm(ast, 0x1E6E000C, 0x00000000);
udelay(50);
/* Mode Register Setting */
moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
moutdwm(ast, 0x1E6E0028, 0x00000005);
moutdwm(ast, 0x1E6E0028, 0x00000007);
moutdwm(ast, 0x1E6E0028, 0x00000003);
moutdwm(ast, 0x1E6E0028, 0x00000001);
moutdwm(ast, 0x1E6E000C, 0x00005C08);
moutdwm(ast, 0x1E6E002C, param->reg_MRS);
moutdwm(ast, 0x1E6E0028, 0x00000001);
moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
moutdwm(ast, 0x1E6E0028, 0x00000003);
moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
moutdwm(ast, 0x1E6E0028, 0x00000003);
moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
data = 0;
if (param->wodt) {
data = 0x500;
}
if (param->rodt) {
data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
}
moutdwm(ast, 0x1E6E0034, data | 0x3);
moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
/* Wait DQI delay lock */
do {
data = mindwm(ast, 0x1E6E0080);
} while (!(data & 0x40000000));
/* Wait DQSI delay lock */
do {
data = mindwm(ast, 0x1E6E0020);
} while (!(data & 0x00000800));
/* Calibrate the DQSI delay */
cbr_dll2(ast, param);
/* ECC Memory Initialization */
#ifdef ECC
moutdwm(ast, 0x1E6E007C, 0x00000000);
moutdwm(ast, 0x1E6E0070, 0x221);
do {
data = mindwm(ast, 0x1E6E0070);
} while (!(data & 0x00001000));
moutdwm(ast, 0x1E6E0070, 0x00000000);
moutdwm(ast, 0x1E6E0050, 0x80000000);
moutdwm(ast, 0x1E6E0050, 0x00000000);
#endif
}
static void ast_init_dram_2300(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
struct ast2300_dram_param param;
u32 temp;
u8 reg;
reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
if ((reg & 0x80) == 0) {/* vga only */
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
ast_write32(ast, 0x12000, 0x1688a8a8);
do {
;
} while (ast_read32(ast, 0x12000) != 0x1);
ast_write32(ast, 0x10000, 0xfc600309);
do {
;
} while (ast_read32(ast, 0x10000) != 0x1);
/* Slow down CPU/AHB CLK in VGA only mode */
temp = ast_read32(ast, 0x12008);
temp |= 0x73;
ast_write32(ast, 0x12008, temp);
param.dram_type = AST_DDR3;
if (temp & 0x01000000)
param.dram_type = AST_DDR2;
param.dram_chipid = ast->dram_type;
param.dram_freq = ast->mclk;
param.vram_size = ast->vram_size;
if (param.dram_type == AST_DDR3) {
get_ddr3_info(ast, ¶m);
ddr3_init(ast, ¶m);
} else {
get_ddr2_info(ast, ¶m);
ddr2_init(ast, ¶m);
}
temp = mindwm(ast, 0x1e6e2040);
moutdwm(ast, 0x1e6e2040, temp | 0x40);
}
/* wait ready */
do {
reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
} while ((reg & 0x40) == 0);
}
| gpl-2.0 |
gproj-m/lge-kernel-gproj | net/sctp/input.c | 2952 | 31281 | /* SCTP kernel implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
* This file is part of the SCTP kernel implementation
*
* These functions handle all input from the IP layer into SCTP.
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Xingang Guo <xingang.guo@intel.com>
* Jon Grimm <jgrimm@us.ibm.com>
* Hui Huang <hui.huang@nokia.com>
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#include <linux/types.h>
#include <linux/list.h> /* For struct list_head */
#include <linux/socket.h>
#include <linux/ip.h>
#include <linux/time.h> /* For struct timeval */
#include <linux/slab.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/snmp.h>
#include <net/sock.h>
#include <net/xfrm.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/checksum.h>
#include <net/net_namespace.h>
/* Forward declarations for internal helpers. */
static int sctp_rcv_ootb(struct sk_buff *);
static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb,
const union sctp_addr *laddr,
const union sctp_addr *paddr,
struct sctp_transport **transportp);
static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr);
static struct sctp_association *__sctp_lookup_association(
const union sctp_addr *local,
const union sctp_addr *peer,
struct sctp_transport **pt);
static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
/* Calculate the SCTP checksum of an SCTP packet. */
static inline int sctp_rcv_checksum(struct sk_buff *skb)
{
struct sctphdr *sh = sctp_hdr(skb);
__le32 cmp = sh->checksum;
struct sk_buff *list;
__le32 val;
__u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb));
skb_walk_frags(skb, list)
tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list),
tmp);
val = sctp_end_cksum(tmp);
if (val != cmp) {
/* CRC failure, dump it. */
SCTP_INC_STATS_BH(SCTP_MIB_CHECKSUMERRORS);
return -1;
}
return 0;
}
struct sctp_input_cb {
union {
struct inet_skb_parm h4;
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_skb_parm h6;
#endif
} header;
struct sctp_chunk *chunk;
};
#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0]))
/*
* This is the routine which IP calls when receiving an SCTP packet.
*/
int sctp_rcv(struct sk_buff *skb)
{
struct sock *sk;
struct sctp_association *asoc;
struct sctp_endpoint *ep = NULL;
struct sctp_ep_common *rcvr;
struct sctp_transport *transport = NULL;
struct sctp_chunk *chunk;
struct sctphdr *sh;
union sctp_addr src;
union sctp_addr dest;
int family;
struct sctp_af *af;
if (skb->pkt_type!=PACKET_HOST)
goto discard_it;
SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS);
if (skb_linearize(skb))
goto discard_it;
sh = sctp_hdr(skb);
/* Pull up the IP and SCTP headers. */
__skb_pull(skb, skb_transport_offset(skb));
if (skb->len < sizeof(struct sctphdr))
goto discard_it;
if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) &&
sctp_rcv_checksum(skb) < 0)
goto discard_it;
skb_pull(skb, sizeof(struct sctphdr));
/* Make sure we at least have chunk headers worth of data left. */
if (skb->len < sizeof(struct sctp_chunkhdr))
goto discard_it;
family = ipver2af(ip_hdr(skb)->version);
af = sctp_get_af_specific(family);
if (unlikely(!af))
goto discard_it;
/* Initialize local addresses for lookups. */
af->from_skb(&src, skb, 1);
af->from_skb(&dest, skb, 0);
/* If the packet is to or from a non-unicast address,
* silently discard the packet.
*
* This is not clearly defined in the RFC except in section
* 8.4 - OOTB handling. However, based on the book "Stream Control
* Transmission Protocol" 2.1, "It is important to note that the
* IP address of an SCTP transport address must be a routable
* unicast address. In other words, IP multicast addresses and
* IP broadcast addresses cannot be used in an SCTP transport
* address."
*/
if (!af->addr_valid(&src, NULL, skb) ||
!af->addr_valid(&dest, NULL, skb))
goto discard_it;
asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport);
if (!asoc)
ep = __sctp_rcv_lookup_endpoint(&dest);
/* Retrieve the common input handling substructure. */
rcvr = asoc ? &asoc->base : &ep->base;
sk = rcvr->sk;
/*
* If a frame arrives on an interface and the receiving socket is
* bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
*/
if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
{
if (asoc) {
sctp_association_put(asoc);
asoc = NULL;
} else {
sctp_endpoint_put(ep);
ep = NULL;
}
sk = sctp_get_ctl_sock();
ep = sctp_sk(sk)->ep;
sctp_endpoint_hold(ep);
rcvr = &ep->base;
}
/*
* RFC 2960, 8.4 - Handle "Out of the blue" Packets.
* An SCTP packet is called an "out of the blue" (OOTB)
* packet if it is correctly formed, i.e., passed the
* receiver's checksum check, but the receiver is not
* able to identify the association to which this
* packet belongs.
*/
if (!asoc) {
if (sctp_rcv_ootb(skb)) {
SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES);
goto discard_release;
}
}
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
goto discard_release;
nf_reset(skb);
if (sk_filter(sk, skb))
goto discard_release;
/* Create an SCTP packet structure. */
chunk = sctp_chunkify(skb, asoc, sk);
if (!chunk)
goto discard_release;
SCTP_INPUT_CB(skb)->chunk = chunk;
/* Remember what endpoint is to handle this packet. */
chunk->rcvr = rcvr;
/* Remember the SCTP header. */
chunk->sctp_hdr = sh;
/* Set the source and destination addresses of the incoming chunk. */
sctp_init_addrs(chunk, &src, &dest);
/* Remember where we came from. */
chunk->transport = transport;
/* Acquire access to the sock lock. Note: We are safe from other
* bottom halves on this lock, but a user may be in the lock too,
* so check if it is busy.
*/
sctp_bh_lock_sock(sk);
if (sk != rcvr->sk) {
/* Our cached sk is different from the rcvr->sk. This is
* because migrate()/accept() may have moved the association
* to a new socket and released all the sockets. So now we
* are holding a lock on the old socket while the user may
* be doing something with the new socket. Switch our veiw
* of the current sk.
*/
sctp_bh_unlock_sock(sk);
sk = rcvr->sk;
sctp_bh_lock_sock(sk);
}
if (sock_owned_by_user(sk)) {
if (sctp_add_backlog(sk, skb)) {
sctp_bh_unlock_sock(sk);
sctp_chunk_free(chunk);
skb = NULL; /* sctp_chunk_free already freed the skb */
goto discard_release;
}
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
} else {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
sctp_inq_push(&chunk->rcvr->inqueue, chunk);
}
sctp_bh_unlock_sock(sk);
/* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
sctp_endpoint_put(ep);
return 0;
discard_it:
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_DISCARDS);
kfree_skb(skb);
return 0;
discard_release:
/* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
sctp_endpoint_put(ep);
goto discard_it;
}
/* Process the backlog queue of the socket. Every skb on
* the backlog holds a ref on an association or endpoint.
* We hold this ref throughout the state machine to make
* sure that the structure we need is still around.
*/
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
struct sctp_ep_common *rcvr = NULL;
int backloged = 0;
rcvr = chunk->rcvr;
/* If the rcvr is dead then the association or endpoint
* has been deleted and we can safely drop the chunk
* and refs that we are holding.
*/
if (rcvr->dead) {
sctp_chunk_free(chunk);
goto done;
}
if (unlikely(rcvr->sk != sk)) {
/* In this case, the association moved from one socket to
* another. We are currently sitting on the backlog of the
* old socket, so we need to move.
* However, since we are here in the process context we
* need to take make sure that the user doesn't own
* the new socket when we process the packet.
* If the new socket is user-owned, queue the chunk to the
* backlog of the new socket without dropping any refs.
* Otherwise, we can safely push the chunk on the inqueue.
*/
sk = rcvr->sk;
sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
if (sk_add_backlog(sk, skb))
sctp_chunk_free(chunk);
else
backloged = 1;
} else
sctp_inq_push(inqueue, chunk);
sctp_bh_unlock_sock(sk);
/* If the chunk was backloged again, don't drop refs */
if (backloged)
return 0;
} else {
sctp_inq_push(inqueue, chunk);
}
done:
/* Release the refs we took in sctp_add_backlog */
if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
sctp_association_put(sctp_assoc(rcvr));
else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
sctp_endpoint_put(sctp_ep(rcvr));
else
BUG();
return 0;
}
static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
struct sctp_ep_common *rcvr = chunk->rcvr;
int ret;
ret = sk_add_backlog(sk, skb);
if (!ret) {
/* Hold the assoc/ep while hanging on the backlog queue.
* This way, we know structures we need will not disappear
* from us
*/
if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
sctp_association_hold(sctp_assoc(rcvr));
else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
sctp_endpoint_hold(sctp_ep(rcvr));
else
BUG();
}
return ret;
}
/* Handle icmp frag needed error. */
void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
struct sctp_transport *t, __u32 pmtu)
{
if (!t || (t->pathmtu <= pmtu))
return;
if (sock_owned_by_user(sk)) {
asoc->pmtu_pending = 1;
t->pmtu_pending = 1;
return;
}
if (t->param_flags & SPP_PMTUD_ENABLE) {
/* Update transports view of the MTU */
sctp_transport_update_pmtu(t, pmtu);
/* Update association pmtu. */
sctp_assoc_sync_pmtu(asoc);
}
/* Retransmit with the new pmtu setting.
* Normally, if PMTU discovery is disabled, an ICMP Fragmentation
* Needed will never be sent, but if a message was sent before
* PMTU discovery was disabled that was larger than the PMTU, it
* would not be fragmented, so it must be re-transmitted fragmented.
*/
sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
}
/*
* SCTP Implementer's Guide, 2.37 ICMP handling procedures
*
* ICMP8) If the ICMP code is a "Unrecognized next header type encountered"
* or a "Protocol Unreachable" treat this message as an abort
* with the T bit set.
*
* This function sends an event to the state machine, which will abort the
* association.
*
*/
void sctp_icmp_proto_unreachable(struct sock *sk,
struct sctp_association *asoc,
struct sctp_transport *t)
{
SCTP_DEBUG_PRINTK("%s\n", __func__);
if (sock_owned_by_user(sk)) {
if (timer_pending(&t->proto_unreach_timer))
return;
else {
if (!mod_timer(&t->proto_unreach_timer,
jiffies + (HZ/20)))
sctp_association_hold(asoc);
}
} else {
if (timer_pending(&t->proto_unreach_timer) &&
del_timer(&t->proto_unreach_timer))
sctp_association_put(asoc);
sctp_do_sm(SCTP_EVENT_T_OTHER,
SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
asoc->state, asoc->ep, asoc, t,
GFP_ATOMIC);
}
}
/* Common lookup code for icmp/icmpv6 error handler. */
struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
struct sctphdr *sctphdr,
struct sctp_association **app,
struct sctp_transport **tpp)
{
union sctp_addr saddr;
union sctp_addr daddr;
struct sctp_af *af;
struct sock *sk = NULL;
struct sctp_association *asoc;
struct sctp_transport *transport = NULL;
struct sctp_init_chunk *chunkhdr;
__u32 vtag = ntohl(sctphdr->vtag);
int len = skb->len - ((void *)sctphdr - (void *)skb->data);
*app = NULL; *tpp = NULL;
af = sctp_get_af_specific(family);
if (unlikely(!af)) {
return NULL;
}
/* Initialize local addresses for lookups. */
af->from_skb(&saddr, skb, 1);
af->from_skb(&daddr, skb, 0);
/* Look for an association that matches the incoming ICMP error
* packet.
*/
asoc = __sctp_lookup_association(&saddr, &daddr, &transport);
if (!asoc)
return NULL;
sk = asoc->base.sk;
/* RFC 4960, Appendix C. ICMP Handling
*
* ICMP6) An implementation MUST validate that the Verification Tag
* contained in the ICMP message matches the Verification Tag of
* the peer. If the Verification Tag is not 0 and does NOT
* match, discard the ICMP message. If it is 0 and the ICMP
* message contains enough bytes to verify that the chunk type is
* an INIT chunk and that the Initiate Tag matches the tag of the
* peer, continue with ICMP7. If the ICMP message is too short
* or the chunk type or the Initiate Tag does not match, silently
* discard the packet.
*/
if (vtag == 0) {
chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
+ sizeof(__be32) ||
chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
goto out;
}
} else if (vtag != asoc->c.peer_vtag) {
goto out;
}
sctp_bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
*/
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(&init_net, LINUX_MIB_LOCKDROPPEDICMPS);
*app = asoc;
*tpp = transport;
return sk;
out:
if (asoc)
sctp_association_put(asoc);
return NULL;
}
/* Common cleanup code for icmp/icmpv6 error handler. */
void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
{
sctp_bh_unlock_sock(sk);
if (asoc)
sctp_association_put(asoc);
}
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code. After adjustment
* header points to the first 8 bytes of the sctp header. We need
* to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When
* someone else accesses the socket the ICMP is just dropped
* and for some paths there is no check at all.
* A more general error queue to queue errors for later handling
* is probably better.
*
*/
void sctp_v4_err(struct sk_buff *skb, __u32 info)
{
const struct iphdr *iph = (const struct iphdr *)skb->data;
const int ihlen = iph->ihl * 4;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct sock *sk;
struct sctp_association *asoc = NULL;
struct sctp_transport *transport;
struct inet_sock *inet;
sk_buff_data_t saveip, savesctp;
int err;
if (skb->len < ihlen + 8) {
ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS);
return;
}
/* Fix up skb to look at the embedded net header. */
saveip = skb->network_header;
savesctp = skb->transport_header;
skb_reset_network_header(skb);
skb_set_transport_header(skb, ihlen);
sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
/* Put back, the original values. */
skb->network_header = saveip;
skb->transport_header = savesctp;
if (!sk) {
ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS);
return;
}
/* Warning: The sock lock is held. Remember to call
* sctp_err_finish!
*/
switch (type) {
case ICMP_PARAMETERPROB:
err = EPROTO;
break;
case ICMP_DEST_UNREACH:
if (code > NR_ICMP_UNREACH)
goto out_unlock;
/* PMTU discovery (RFC1191) */
if (ICMP_FRAG_NEEDED == code) {
sctp_icmp_frag_needed(sk, asoc, transport, info);
goto out_unlock;
}
else {
if (ICMP_PROT_UNREACH == code) {
sctp_icmp_proto_unreachable(sk, asoc,
transport);
goto out_unlock;
}
}
err = icmp_err_convert[code].errno;
break;
case ICMP_TIME_EXCEEDED:
/* Ignore any time exceeded errors due to fragment reassembly
* timeouts.
*/
if (ICMP_EXC_FRAGTIME == code)
goto out_unlock;
err = EHOSTUNREACH;
break;
default:
goto out_unlock;
}
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
sk->sk_err = err;
sk->sk_error_report(sk);
} else { /* Only an error on timeout */
sk->sk_err_soft = err;
}
out_unlock:
sctp_err_finish(sk, asoc);
}
/*
* RFC 2960, 8.4 - Handle "Out of the blue" Packets.
*
* This function scans all the chunks in the OOTB packet to determine if
* the packet should be discarded right away. If a response might be needed
* for this packet, or, if further processing is possible, the packet will
* be queued to a proper inqueue for the next phase of handling.
*
* Output:
* Return 0 - If further processing is needed.
* Return 1 - If the packet can be discarded right away.
*/
static int sctp_rcv_ootb(struct sk_buff *skb)
{
sctp_chunkhdr_t *ch;
__u8 *ch_end;
ch = (sctp_chunkhdr_t *) skb->data;
/* Scan through all the chunks in the packet. */
do {
/* Break out if chunk length is less then minimal. */
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
break;
ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
if (ch_end > skb_tail_pointer(skb))
break;
/* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
* receiver MUST silently discard the OOTB packet and take no
* further action.
*/
if (SCTP_CID_ABORT == ch->type)
goto discard;
/* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE
* chunk, the receiver should silently discard the packet
* and take no further action.
*/
if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
goto discard;
/* RFC 4460, 2.11.2
* This will discard packets with INIT chunk bundled as
* subsequent chunks in the packet. When INIT is first,
* the normal INIT processing will discard the chunk.
*/
if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
goto discard;
ch = (sctp_chunkhdr_t *) ch_end;
} while (ch_end < skb_tail_pointer(skb));
return 0;
discard:
return 1;
}
/* Insert endpoint into the hash table. */
static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
{
struct sctp_ep_common *epb;
struct sctp_hashbucket *head;
epb = &ep->base;
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
hlist_add_head(&epb->node, &head->chain);
sctp_write_unlock(&head->lock);
}
/* Add an endpoint to the hash. Local BH-safe. */
void sctp_hash_endpoint(struct sctp_endpoint *ep)
{
sctp_local_bh_disable();
__sctp_hash_endpoint(ep);
sctp_local_bh_enable();
}
/* Remove endpoint from the hash table. */
static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
{
struct sctp_hashbucket *head;
struct sctp_ep_common *epb;
epb = &ep->base;
if (hlist_unhashed(&epb->node))
return;
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
__hlist_del(&epb->node);
sctp_write_unlock(&head->lock);
}
/* Remove endpoint from the hash. Local BH-safe. */
void sctp_unhash_endpoint(struct sctp_endpoint *ep)
{
sctp_local_bh_disable();
__sctp_unhash_endpoint(ep);
sctp_local_bh_enable();
}
/* Look up an endpoint. */
static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr)
{
struct sctp_hashbucket *head;
struct sctp_ep_common *epb;
struct sctp_endpoint *ep;
struct hlist_node *node;
int hash;
hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port));
head = &sctp_ep_hashtable[hash];
read_lock(&head->lock);
sctp_for_each_hentry(epb, node, &head->chain) {
ep = sctp_ep(epb);
if (sctp_endpoint_is_match(ep, laddr))
goto hit;
}
ep = sctp_sk((sctp_get_ctl_sock()))->ep;
hit:
sctp_endpoint_hold(ep);
read_unlock(&head->lock);
return ep;
}
/* Insert association into the hash table. */
static void __sctp_hash_established(struct sctp_association *asoc)
{
struct sctp_ep_common *epb;
struct sctp_hashbucket *head;
epb = &asoc->base;
/* Calculate which chain this entry will belong to. */
epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port);
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
hlist_add_head(&epb->node, &head->chain);
sctp_write_unlock(&head->lock);
}
/* Add an association to the hash. Local BH-safe. */
void sctp_hash_established(struct sctp_association *asoc)
{
if (asoc->temp)
return;
sctp_local_bh_disable();
__sctp_hash_established(asoc);
sctp_local_bh_enable();
}
/* Remove association from the hash table. */
static void __sctp_unhash_established(struct sctp_association *asoc)
{
struct sctp_hashbucket *head;
struct sctp_ep_common *epb;
epb = &asoc->base;
epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port,
asoc->peer.port);
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
__hlist_del(&epb->node);
sctp_write_unlock(&head->lock);
}
/* Remove association from the hash table. Local BH-safe. */
void sctp_unhash_established(struct sctp_association *asoc)
{
if (asoc->temp)
return;
sctp_local_bh_disable();
__sctp_unhash_established(asoc);
sctp_local_bh_enable();
}
/* Look up an association. */
static struct sctp_association *__sctp_lookup_association(
const union sctp_addr *local,
const union sctp_addr *peer,
struct sctp_transport **pt)
{
struct sctp_hashbucket *head;
struct sctp_ep_common *epb;
struct sctp_association *asoc;
struct sctp_transport *transport;
struct hlist_node *node;
int hash;
/* Optimize here for direct hit, only listening connections can
* have wildcards anyways.
*/
hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port));
head = &sctp_assoc_hashtable[hash];
read_lock(&head->lock);
sctp_for_each_hentry(epb, node, &head->chain) {
asoc = sctp_assoc(epb);
transport = sctp_assoc_is_match(asoc, local, peer);
if (transport)
goto hit;
}
read_unlock(&head->lock);
return NULL;
hit:
*pt = transport;
sctp_association_hold(asoc);
read_unlock(&head->lock);
return asoc;
}
/* Look up an association. BH-safe. */
SCTP_STATIC
struct sctp_association *sctp_lookup_association(const union sctp_addr *laddr,
const union sctp_addr *paddr,
struct sctp_transport **transportp)
{
struct sctp_association *asoc;
sctp_local_bh_disable();
asoc = __sctp_lookup_association(laddr, paddr, transportp);
sctp_local_bh_enable();
return asoc;
}
/* Is there an association matching the given local and peer addresses? */
int sctp_has_association(const union sctp_addr *laddr,
const union sctp_addr *paddr)
{
struct sctp_association *asoc;
struct sctp_transport *transport;
if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
sctp_association_put(asoc);
return 1;
}
return 0;
}
/*
* SCTP Implementors Guide, 2.18 Handling of address
* parameters within the INIT or INIT-ACK.
*
* D) When searching for a matching TCB upon reception of an INIT
* or INIT-ACK chunk the receiver SHOULD use not only the
* source address of the packet (containing the INIT or
* INIT-ACK) but the receiver SHOULD also use all valid
* address parameters contained within the chunk.
*
* 2.18.3 Solution description
*
* This new text clearly specifies to an implementor the need
* to look within the INIT or INIT-ACK. Any implementation that
* does not do this, may not be able to establish associations
* in certain circumstances.
*
*/
static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
const union sctp_addr *laddr, struct sctp_transport **transportp)
{
struct sctp_association *asoc;
union sctp_addr addr;
union sctp_addr *paddr = &addr;
struct sctphdr *sh = sctp_hdr(skb);
union sctp_params params;
sctp_init_chunk_t *init;
struct sctp_transport *transport;
struct sctp_af *af;
/*
* This code will NOT touch anything inside the chunk--it is
* strictly READ-ONLY.
*
* RFC 2960 3 SCTP packet Format
*
* Multiple chunks can be bundled into one SCTP packet up to
* the MTU size, except for the INIT, INIT ACK, and SHUTDOWN
* COMPLETE chunks. These chunks MUST NOT be bundled with any
* other chunk in a packet. See Section 6.10 for more details
* on chunk bundling.
*/
/* Find the start of the TLVs and the end of the chunk. This is
* the region we search for address parameters.
*/
init = (sctp_init_chunk_t *)skb->data;
/* Walk the parameters looking for embedded addresses. */
sctp_walk_params(params, init, init_hdr.params) {
/* Note: Ignoring hostname addresses. */
af = sctp_get_af_specific(param_type2af(params.p->type));
if (!af)
continue;
af->from_addr_param(paddr, params.addr, sh->source, 0);
asoc = __sctp_lookup_association(laddr, paddr, &transport);
if (asoc)
return asoc;
}
return NULL;
}
/* ADD-IP, Section 5.2
* When an endpoint receives an ASCONF Chunk from the remote peer
* special procedures may be needed to identify the association the
* ASCONF Chunk is associated with. To properly find the association
* the following procedures SHOULD be followed:
*
* D2) If the association is not found, use the address found in the
* Address Parameter TLV combined with the port number found in the
* SCTP common header. If found proceed to rule D4.
*
* D2-ext) If more than one ASCONF Chunks are packed together, use the
* address found in the ASCONF Address Parameter TLV of each of the
* subsequent ASCONF Chunks. If found, proceed to rule D4.
*/
static struct sctp_association *__sctp_rcv_asconf_lookup(
sctp_chunkhdr_t *ch,
const union sctp_addr *laddr,
__be16 peer_port,
struct sctp_transport **transportp)
{
sctp_addip_chunk_t *asconf = (struct sctp_addip_chunk *)ch;
struct sctp_af *af;
union sctp_addr_param *param;
union sctp_addr paddr;
/* Skip over the ADDIP header and find the Address parameter */
param = (union sctp_addr_param *)(asconf + 1);
af = sctp_get_af_specific(param_type2af(param->p.type));
if (unlikely(!af))
return NULL;
af->from_addr_param(&paddr, param, peer_port, 0);
return __sctp_lookup_association(laddr, &paddr, transportp);
}
/* SCTP-AUTH, Section 6.3:
* If the receiver does not find a STCB for a packet containing an AUTH
* chunk as the first chunk and not a COOKIE-ECHO chunk as the second
* chunk, it MUST use the chunks after the AUTH chunk to look up an existing
* association.
*
* This means that any chunks that can help us identify the association need
* to be looked at to find this association.
*/
static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
const union sctp_addr *laddr,
struct sctp_transport **transportp)
{
struct sctp_association *asoc = NULL;
sctp_chunkhdr_t *ch;
int have_auth = 0;
unsigned int chunk_num = 1;
__u8 *ch_end;
/* Walk through the chunks looking for AUTH or ASCONF chunks
* to help us find the association.
*/
ch = (sctp_chunkhdr_t *) skb->data;
do {
/* Break out if chunk length is less then minimal. */
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
break;
ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
if (ch_end > skb_tail_pointer(skb))
break;
switch(ch->type) {
case SCTP_CID_AUTH:
have_auth = chunk_num;
break;
case SCTP_CID_COOKIE_ECHO:
/* If a packet arrives containing an AUTH chunk as
* a first chunk, a COOKIE-ECHO chunk as the second
* chunk, and possibly more chunks after them, and
* the receiver does not have an STCB for that
* packet, then authentication is based on
* the contents of the COOKIE- ECHO chunk.
*/
if (have_auth == 1 && chunk_num == 2)
return NULL;
break;
case SCTP_CID_ASCONF:
if (have_auth || sctp_addip_noauth)
asoc = __sctp_rcv_asconf_lookup(ch, laddr,
sctp_hdr(skb)->source,
transportp);
default:
break;
}
if (asoc)
break;
ch = (sctp_chunkhdr_t *) ch_end;
chunk_num++;
} while (ch_end < skb_tail_pointer(skb));
return asoc;
}
/*
* There are circumstances when we need to look inside the SCTP packet
* for information to help us find the association. Examples
* include looking inside of INIT/INIT-ACK chunks or after the AUTH
* chunks.
*/
static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb,
const union sctp_addr *laddr,
struct sctp_transport **transportp)
{
sctp_chunkhdr_t *ch;
ch = (sctp_chunkhdr_t *) skb->data;
/* The code below will attempt to walk the chunk and extract
* parameter information. Before we do that, we need to verify
* that the chunk length doesn't cause overflow. Otherwise, we'll
* walk off the end.
*/
if (WORD_ROUND(ntohs(ch->length)) > skb->len)
return NULL;
/* If this is INIT/INIT-ACK look inside the chunk too. */
switch (ch->type) {
case SCTP_CID_INIT:
case SCTP_CID_INIT_ACK:
return __sctp_rcv_init_lookup(skb, laddr, transportp);
break;
default:
return __sctp_rcv_walk_lookup(skb, laddr, transportp);
break;
}
return NULL;
}
/* Lookup an association for an inbound skb. */
static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb,
const union sctp_addr *paddr,
const union sctp_addr *laddr,
struct sctp_transport **transportp)
{
struct sctp_association *asoc;
asoc = __sctp_lookup_association(laddr, paddr, transportp);
/* Further lookup for INIT/INIT-ACK packets.
* SCTP Implementors Guide, 2.18 Handling of address
* parameters within the INIT or INIT-ACK.
*/
if (!asoc)
asoc = __sctp_rcv_lookup_harder(skb, laddr, transportp);
return asoc;
}
| gpl-2.0 |
icebluechao/stuttgart_kernel | net/netfilter/nf_conntrack_standalone.c | 2952 | 13748 | /* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/percpu.h>
#include <linux/netdevice.h>
#include <linux/security.h>
#include <net/net_namespace.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <linux/rculist_nulls.h>
MODULE_LICENSE("GPL");
#ifdef CONFIG_PROC_FS
int
print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto)
{
return l3proto->print_tuple(s, tuple) || l4proto->print_tuple(s, tuple);
}
EXPORT_SYMBOL_GPL(print_tuple);
struct ct_iter_state {
struct seq_net_private p;
unsigned int bucket;
u_int64_t time_now;
};
static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
struct hlist_nulls_node *n;
for (st->bucket = 0;
st->bucket < net->ct.htable_size;
st->bucket++) {
n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
if (!is_a_nulls(n))
return n;
}
return NULL;
}
static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
struct hlist_nulls_node *head)
{
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
head = rcu_dereference(hlist_nulls_next_rcu(head));
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
if (++st->bucket >= net->ct.htable_size)
return NULL;
}
head = rcu_dereference(
hlist_nulls_first_rcu(
&net->ct.hash[st->bucket]));
}
return head;
}
static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
{
struct hlist_nulls_node *head = ct_get_first(seq);
if (head)
while (pos && (head = ct_get_next(seq, head)))
pos--;
return pos ? NULL : head;
}
static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
struct ct_iter_state *st = seq->private;
st->time_now = ktime_to_ns(ktime_get_real());
rcu_read_lock();
return ct_get_idx(seq, *pos);
}
static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
(*pos)++;
return ct_get_next(s, v);
}
static void ct_seq_stop(struct seq_file *s, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
#ifdef CONFIG_NF_CONNTRACK_SECMARK
static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
{
int ret;
u32 len;
char *secctx;
ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
if (ret)
return 0;
ret = seq_printf(s, "secctx=%s ", secctx);
security_release_secctx(secctx, len);
return ret;
}
#else
static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
{
return 0;
}
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
static int ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
{
struct ct_iter_state *st = s->private;
struct nf_conn_tstamp *tstamp;
s64 delta_time;
tstamp = nf_conn_tstamp_find(ct);
if (tstamp) {
delta_time = st->time_now - tstamp->start;
if (delta_time > 0)
delta_time = div_s64(delta_time, NSEC_PER_SEC);
else
delta_time = 0;
return seq_printf(s, "delta-time=%llu ",
(unsigned long long)delta_time);
}
return 0;
}
#else
static inline int
ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
{
return 0;
}
#endif
/* return 0 on success, 1 in case of error */
static int ct_seq_show(struct seq_file *s, void *v)
{
struct nf_conntrack_tuple_hash *hash = v;
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
int ret = 0;
NF_CT_ASSERT(ct);
if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
return 0;
/* we only want to print DIR_ORIGINAL */
if (NF_CT_DIRECTION(hash))
goto release;
l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
NF_CT_ASSERT(l3proto);
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
NF_CT_ASSERT(l4proto);
ret = -ENOSPC;
if (seq_printf(s, "%-8s %u %-8s %u %ld ",
l3proto->name, nf_ct_l3num(ct),
l4proto->name, nf_ct_protonum(ct),
timer_pending(&ct->timeout)
? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
goto release;
if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
goto release;
if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
l3proto, l4proto))
goto release;
if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
goto release;
if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
if (seq_printf(s, "[UNREPLIED] "))
goto release;
if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
l3proto, l4proto))
goto release;
if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
goto release;
if (test_bit(IPS_ASSURED_BIT, &ct->status))
if (seq_printf(s, "[ASSURED] "))
goto release;
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (seq_printf(s, "mark=%u ", ct->mark))
goto release;
#endif
if (ct_show_secctx(s, ct))
goto release;
#ifdef CONFIG_NF_CONNTRACK_ZONES
if (seq_printf(s, "zone=%u ", nf_ct_zone(ct)))
goto release;
#endif
if (ct_show_delta_time(s, ct))
goto release;
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
goto release;
ret = 0;
release:
nf_ct_put(ct);
return ret;
}
static const struct seq_operations ct_seq_ops = {
.start = ct_seq_start,
.next = ct_seq_next,
.stop = ct_seq_stop,
.show = ct_seq_show
};
static int ct_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ct_seq_ops,
sizeof(struct ct_iter_state));
}
static const struct file_operations ct_file_ops = {
.owner = THIS_MODULE,
.open = ct_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ct.stat, cpu);
}
return NULL;
}
static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ct.stat, cpu);
}
return NULL;
}
static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
{
}
static int ct_cpu_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_net(seq);
unsigned int nr_conntracks = atomic_read(&net->ct.count);
const struct ip_conntrack_stat *st = v;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
return 0;
}
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
"%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
nr_conntracks,
st->searched,
st->found,
st->new,
st->invalid,
st->ignore,
st->delete,
st->delete_list,
st->insert,
st->insert_failed,
st->drop,
st->early_drop,
st->error,
st->expect_new,
st->expect_create,
st->expect_delete,
st->search_restart
);
return 0;
}
static const struct seq_operations ct_cpu_seq_ops = {
.start = ct_cpu_seq_start,
.next = ct_cpu_seq_next,
.stop = ct_cpu_seq_stop,
.show = ct_cpu_seq_show,
};
static int ct_cpu_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ct_cpu_seq_ops,
sizeof(struct seq_net_private));
}
static const struct file_operations ct_cpu_seq_fops = {
.owner = THIS_MODULE,
.open = ct_cpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int nf_conntrack_standalone_init_proc(struct net *net)
{
struct proc_dir_entry *pde;
pde = proc_net_fops_create(net, "nf_conntrack", 0440, &ct_file_ops);
if (!pde)
goto out_nf_conntrack;
pde = proc_create("nf_conntrack", S_IRUGO, net->proc_net_stat,
&ct_cpu_seq_fops);
if (!pde)
goto out_stat_nf_conntrack;
return 0;
out_stat_nf_conntrack:
proc_net_remove(net, "nf_conntrack");
out_nf_conntrack:
return -ENOMEM;
}
static void nf_conntrack_standalone_fini_proc(struct net *net)
{
remove_proc_entry("nf_conntrack", net->proc_net_stat);
proc_net_remove(net, "nf_conntrack");
}
#else
static int nf_conntrack_standalone_init_proc(struct net *net)
{
return 0;
}
static void nf_conntrack_standalone_fini_proc(struct net *net)
{
}
#endif /* CONFIG_PROC_FS */
/* Sysctl support */
#ifdef CONFIG_SYSCTL
/* Log invalid packets of a given protocol */
static int log_invalid_proto_min = 0;
static int log_invalid_proto_max = 255;
static struct ctl_table_header *nf_ct_netfilter_header;
static ctl_table nf_ct_sysctl_table[] = {
{
.procname = "nf_conntrack_max",
.data = &nf_conntrack_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_count",
.data = &init_net.ct.count,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_buckets",
.data = &init_net.ct.htable_size,
.maxlen = sizeof(unsigned int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_checksum",
.data = &init_net.ct.sysctl_checksum,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_log_invalid",
.data = &init_net.ct.sysctl_log_invalid,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &log_invalid_proto_min,
.extra2 = &log_invalid_proto_max,
},
{
.procname = "nf_conntrack_expect_max",
.data = &nf_ct_expect_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
#define NET_NF_CONNTRACK_MAX 2089
static ctl_table nf_ct_netfilter_table[] = {
{
.procname = "nf_conntrack_max",
.data = &nf_conntrack_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static struct ctl_path nf_ct_path[] = {
{ .procname = "net", },
{ }
};
static int nf_conntrack_standalone_init_sysctl(struct net *net)
{
struct ctl_table *table;
if (net_eq(net, &init_net)) {
nf_ct_netfilter_header =
register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table);
if (!nf_ct_netfilter_header)
goto out;
}
table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
GFP_KERNEL);
if (!table)
goto out_kmemdup;
table[1].data = &net->ct.count;
table[2].data = &net->ct.htable_size;
table[3].data = &net->ct.sysctl_checksum;
table[4].data = &net->ct.sysctl_log_invalid;
net->ct.sysctl_header = register_net_sysctl_table(net,
nf_net_netfilter_sysctl_path, table);
if (!net->ct.sysctl_header)
goto out_unregister_netfilter;
return 0;
out_unregister_netfilter:
kfree(table);
out_kmemdup:
if (net_eq(net, &init_net))
unregister_sysctl_table(nf_ct_netfilter_header);
out:
printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n");
return -ENOMEM;
}
static void nf_conntrack_standalone_fini_sysctl(struct net *net)
{
struct ctl_table *table;
if (net_eq(net, &init_net))
unregister_sysctl_table(nf_ct_netfilter_header);
table = net->ct.sysctl_header->ctl_table_arg;
unregister_net_sysctl_table(net->ct.sysctl_header);
kfree(table);
}
#else
static int nf_conntrack_standalone_init_sysctl(struct net *net)
{
return 0;
}
static void nf_conntrack_standalone_fini_sysctl(struct net *net)
{
}
#endif /* CONFIG_SYSCTL */
static int nf_conntrack_net_init(struct net *net)
{
int ret;
ret = nf_conntrack_init(net);
if (ret < 0)
goto out_init;
ret = nf_conntrack_standalone_init_proc(net);
if (ret < 0)
goto out_proc;
net->ct.sysctl_checksum = 1;
net->ct.sysctl_log_invalid = 0;
ret = nf_conntrack_standalone_init_sysctl(net);
if (ret < 0)
goto out_sysctl;
return 0;
out_sysctl:
nf_conntrack_standalone_fini_proc(net);
out_proc:
nf_conntrack_cleanup(net);
out_init:
return ret;
}
static void nf_conntrack_net_exit(struct net *net)
{
nf_conntrack_standalone_fini_sysctl(net);
nf_conntrack_standalone_fini_proc(net);
nf_conntrack_cleanup(net);
}
static struct pernet_operations nf_conntrack_net_ops = {
.init = nf_conntrack_net_init,
.exit = nf_conntrack_net_exit,
};
static int __init nf_conntrack_standalone_init(void)
{
return register_pernet_subsys(&nf_conntrack_net_ops);
}
static void __exit nf_conntrack_standalone_fini(void)
{
unregister_pernet_subsys(&nf_conntrack_net_ops);
}
module_init(nf_conntrack_standalone_init);
module_exit(nf_conntrack_standalone_fini);
/* Some modules need us, but don't depend directly on any symbol.
They should call this. */
void need_conntrack(void)
{
}
EXPORT_SYMBOL_GPL(need_conntrack);
| gpl-2.0 |
DutchDanny/Holiday-ICE | arch/um/kernel/skas/mmu.c | 3208 | 3950 | /*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include "linux/mm.h"
#include "linux/sched.h"
#include "linux/slab.h"
#include "asm/pgalloc.h"
#include "asm/pgtable.h"
#include "as-layout.h"
#include "os.h"
#include "skas.h"
extern int __syscall_stub_start;
static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
unsigned long kernel)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset(mm, proc);
pud = pud_alloc(mm, pgd, proc);
if (!pud)
goto out;
pmd = pmd_alloc(mm, pud, proc);
if (!pmd)
goto out_pmd;
pte = pte_alloc_map(mm, NULL, pmd, proc);
if (!pte)
goto out_pte;
*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
*pte = pte_mkread(*pte);
return 0;
out_pte:
pmd_free(mm, pmd);
out_pmd:
pud_free(mm, pud);
out:
return -ENOMEM;
}
int init_new_context(struct task_struct *task, struct mm_struct *mm)
{
struct mm_context *from_mm = NULL;
struct mm_context *to_mm = &mm->context;
unsigned long stack = 0;
int ret = -ENOMEM;
if (skas_needs_stub) {
stack = get_zeroed_page(GFP_KERNEL);
if (stack == 0)
goto out;
}
to_mm->id.stack = stack;
if (current->mm != NULL && current->mm != &init_mm)
from_mm = ¤t->mm->context;
if (proc_mm) {
ret = new_mm(stack);
if (ret < 0) {
printk(KERN_ERR "init_new_context_skas - "
"new_mm failed, errno = %d\n", ret);
goto out_free;
}
to_mm->id.u.mm_fd = ret;
}
else {
if (from_mm)
to_mm->id.u.pid = copy_context_skas0(stack,
from_mm->id.u.pid);
else to_mm->id.u.pid = start_userspace(stack);
if (to_mm->id.u.pid < 0) {
ret = to_mm->id.u.pid;
goto out_free;
}
}
ret = init_new_ldt(to_mm, from_mm);
if (ret < 0) {
printk(KERN_ERR "init_new_context_skas - init_ldt"
" failed, errno = %d\n", ret);
goto out_free;
}
to_mm->stub_pages = NULL;
return 0;
out_free:
if (to_mm->id.stack != 0)
free_page(to_mm->id.stack);
out:
return ret;
}
void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
{
struct page **pages;
int err, ret;
if (!skas_needs_stub)
return;
ret = init_stub_pte(mm, STUB_CODE,
(unsigned long) &__syscall_stub_start);
if (ret)
goto out;
ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack);
if (ret)
goto out;
pages = kmalloc(2 * sizeof(struct page *), GFP_KERNEL);
if (pages == NULL) {
printk(KERN_ERR "arch_dup_mmap failed to allocate 2 page "
"pointers\n");
goto out;
}
pages[0] = virt_to_page(&__syscall_stub_start);
pages[1] = virt_to_page(mm->context.id.stack);
mm->context.stub_pages = pages;
/* dup_mmap already holds mmap_sem */
err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
VM_READ | VM_MAYREAD | VM_EXEC |
VM_MAYEXEC | VM_DONTCOPY, pages);
if (err) {
printk(KERN_ERR "install_special_mapping returned %d\n", err);
goto out_free;
}
return;
out_free:
kfree(pages);
out:
force_sigsegv(SIGSEGV, current);
}
void arch_exit_mmap(struct mm_struct *mm)
{
pte_t *pte;
if (mm->context.stub_pages != NULL)
kfree(mm->context.stub_pages);
pte = virt_to_pte(mm, STUB_CODE);
if (pte != NULL)
pte_clear(mm, STUB_CODE, pte);
pte = virt_to_pte(mm, STUB_DATA);
if (pte == NULL)
return;
pte_clear(mm, STUB_DATA, pte);
}
void destroy_context(struct mm_struct *mm)
{
struct mm_context *mmu = &mm->context;
if (proc_mm)
os_close_file(mmu->id.u.mm_fd);
else {
/*
* If init_new_context wasn't called, this will be
* zero, resulting in a kill(0), which will result in the
* whole UML suddenly dying. Also, cover negative and
* 1 cases, since they shouldn't happen either.
*/
if (mmu->id.u.pid < 2) {
printk(KERN_ERR "corrupt mm_context - pid = %d\n",
mmu->id.u.pid);
return;
}
os_kill_ptraced_process(mmu->id.u.pid, 1);
}
if (skas_needs_stub)
free_page(mmu->id.stack);
free_ldt(mmu);
}
| gpl-2.0 |
erikvanzijst/flatlinux | drivers/net/wimax/i2400m/fw.c | 4488 | 51038 | /*
* Intel Wireless WiMAX Connection 2400m
* Firmware uploader
*
*
* Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Intel Corporation <linux-wimax@intel.com>
* Yanir Lubetkin <yanirx.lubetkin@intel.com>
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* - Initial implementation
*
*
* THE PROCEDURE
*
* The 2400m and derived devices work in two modes: boot-mode or
* normal mode. In boot mode we can execute only a handful of commands
* targeted at uploading the firmware and launching it.
*
* The 2400m enters boot mode when it is first connected to the
* system, when it crashes and when you ask it to reboot. There are
* two submodes of the boot mode: signed and non-signed. Signed takes
* firmwares signed with a certain private key, non-signed takes any
* firmware. Normal hardware takes only signed firmware.
*
* On boot mode, in USB, we write to the device using the bulk out
* endpoint and read from it in the notification endpoint.
*
* Upon entrance to boot mode, the device sends (preceded with a few
* zero length packets (ZLPs) on the notification endpoint in USB) a
* reboot barker (4 le32 words with the same value). We ack it by
* sending the same barker to the device. The device acks with a
* reboot ack barker (4 le32 words with value I2400M_ACK_BARKER) and
* then is fully booted. At this point we can upload the firmware.
*
* Note that different iterations of the device and EEPROM
* configurations will send different [re]boot barkers; these are
* collected in i2400m_barker_db along with the firmware
* characteristics they require.
*
* This process is accomplished by the i2400m_bootrom_init()
* function. All the device interaction happens through the
* i2400m_bm_cmd() [boot mode command]. Special return values will
* indicate if the device did reset during the process.
*
* After this, we read the MAC address and then (if needed)
* reinitialize the device. We need to read it ahead of time because
* in the future, we might not upload the firmware until userspace
* 'ifconfig up's the device.
*
* We can then upload the firmware file. The file is composed of a BCF
* header (basic data, keys and signatures) and a list of write
* commands and payloads. Optionally more BCF headers might follow the
* main payload. We first upload the header [i2400m_dnload_init()] and
* then pass the commands and payloads verbatim to the i2400m_bm_cmd()
* function [i2400m_dnload_bcf()]. Then we tell the device to jump to
* the new firmware [i2400m_dnload_finalize()].
*
* Once firmware is uploaded, we are good to go :)
*
* When we don't know in which mode we are, we first try by sending a
* warm reset request that will take us to boot-mode. If we time out
* waiting for a reboot barker, that means maybe we are already in
* boot mode, so we send a reboot barker.
*
* COMMAND EXECUTION
*
* This code (and process) is single threaded; for executing commands,
* we post a URB to the notification endpoint, post the command, wait
* for data on the notification buffer. We don't need to worry about
* others as we know we are the only ones in there.
*
* BACKEND IMPLEMENTATION
*
* This code is bus-generic; the bus-specific driver provides back end
* implementations to send a boot mode command to the device and to
* read an acknolwedgement from it (or an asynchronous notification)
* from it.
*
* FIRMWARE LOADING
*
* Note that in some cases, we can't just load a firmware file (for
* example, when resuming). For that, we might cache the firmware
* file. Thus, when doing the bootstrap, if there is a cache firmware
* file, it is used; if not, loading from disk is attempted.
*
* ROADMAP
*
* i2400m_barker_db_init Called by i2400m_driver_init()
* i2400m_barker_db_add
*
* i2400m_barker_db_exit Called by i2400m_driver_exit()
*
* i2400m_dev_bootstrap Called by __i2400m_dev_start()
* request_firmware
* i2400m_fw_bootstrap
* i2400m_fw_check
* i2400m_fw_hdr_check
* i2400m_fw_dnload
* release_firmware
*
* i2400m_fw_dnload
* i2400m_bootrom_init
* i2400m_bm_cmd
* i2400m_reset
* i2400m_dnload_init
* i2400m_dnload_init_signed
* i2400m_dnload_init_nonsigned
* i2400m_download_chunk
* i2400m_bm_cmd
* i2400m_dnload_bcf
* i2400m_bm_cmd
* i2400m_dnload_finalize
* i2400m_bm_cmd
*
* i2400m_bm_cmd
* i2400m->bus_bm_cmd_send()
* i2400m->bus_bm_wait_for_ack
* __i2400m_bm_ack_verify
* i2400m_is_boot_barker
*
* i2400m_bm_cmd_prepare Used by bus-drivers to prep
* commands before sending
*
* i2400m_pm_notifier Called on Power Management events
* i2400m_fw_cache
* i2400m_fw_uncache
*/
#include <linux/firmware.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/export.h>
#include "i2400m.h"
#define D_SUBMODULE fw
#include "debug-levels.h"
static const __le32 i2400m_ACK_BARKER[4] = {
cpu_to_le32(I2400M_ACK_BARKER),
cpu_to_le32(I2400M_ACK_BARKER),
cpu_to_le32(I2400M_ACK_BARKER),
cpu_to_le32(I2400M_ACK_BARKER)
};
/**
* Prepare a boot-mode command for delivery
*
* @cmd: pointer to bootrom header to prepare
*
* Computes checksum if so needed. After calling this function, DO NOT
* modify the command or header as the checksum won't work anymore.
*
* We do it from here because some times we cannot do it in the
* original context the command was sent (it is a const), so when we
* copy it to our staging buffer, we add the checksum there.
*/
void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *cmd)
{
if (i2400m_brh_get_use_checksum(cmd)) {
int i;
u32 checksum = 0;
const u32 *checksum_ptr = (void *) cmd->payload;
for (i = 0; i < cmd->data_size / 4; i++)
checksum += cpu_to_le32(*checksum_ptr++);
checksum += cmd->command + cmd->target_addr + cmd->data_size;
cmd->block_checksum = cpu_to_le32(checksum);
}
}
EXPORT_SYMBOL_GPL(i2400m_bm_cmd_prepare);
/*
* Database of known barkers.
*
* A barker is what the device sends indicating he is ready to be
* bootloaded. Different versions of the device will send different
* barkers. Depending on the barker, it might mean the device wants
* some kind of firmware or the other.
*/
static struct i2400m_barker_db {
__le32 data[4];
} *i2400m_barker_db;
static size_t i2400m_barker_db_used, i2400m_barker_db_size;
static
int i2400m_zrealloc_2x(void **ptr, size_t *_count, size_t el_size,
gfp_t gfp_flags)
{
size_t old_count = *_count,
new_count = old_count ? 2 * old_count : 2,
old_size = el_size * old_count,
new_size = el_size * new_count;
void *nptr = krealloc(*ptr, new_size, gfp_flags);
if (nptr) {
/* zero the other half or the whole thing if old_count
* was zero */
if (old_size == 0)
memset(nptr, 0, new_size);
else
memset(nptr + old_size, 0, old_size);
*_count = new_count;
*ptr = nptr;
return 0;
} else
return -ENOMEM;
}
/*
* Add a barker to the database
*
* This cannot used outside of this module and only at at module_init
* time. This is to avoid the need to do locking.
*/
static
int i2400m_barker_db_add(u32 barker_id)
{
int result;
struct i2400m_barker_db *barker;
if (i2400m_barker_db_used >= i2400m_barker_db_size) {
result = i2400m_zrealloc_2x(
(void **) &i2400m_barker_db, &i2400m_barker_db_size,
sizeof(i2400m_barker_db[0]), GFP_KERNEL);
if (result < 0)
return result;
}
barker = i2400m_barker_db + i2400m_barker_db_used++;
barker->data[0] = le32_to_cpu(barker_id);
barker->data[1] = le32_to_cpu(barker_id);
barker->data[2] = le32_to_cpu(barker_id);
barker->data[3] = le32_to_cpu(barker_id);
return 0;
}
void i2400m_barker_db_exit(void)
{
kfree(i2400m_barker_db);
i2400m_barker_db = NULL;
i2400m_barker_db_size = 0;
i2400m_barker_db_used = 0;
}
/*
* Helper function to add all the known stable barkers to the barker
* database.
*/
static
int i2400m_barker_db_known_barkers(void)
{
int result;
result = i2400m_barker_db_add(I2400M_NBOOT_BARKER);
if (result < 0)
goto error_add;
result = i2400m_barker_db_add(I2400M_SBOOT_BARKER);
if (result < 0)
goto error_add;
result = i2400m_barker_db_add(I2400M_SBOOT_BARKER_6050);
if (result < 0)
goto error_add;
error_add:
return result;
}
/*
* Initialize the barker database
*
* This can only be used from the module_init function for this
* module; this is to avoid the need to do locking.
*
* @options: command line argument with extra barkers to
* recognize. This is a comma-separated list of 32-bit hex
* numbers. They are appended to the existing list. Setting 0
* cleans the existing list and starts a new one.
*/
int i2400m_barker_db_init(const char *_options)
{
int result;
char *options = NULL, *options_orig, *token;
i2400m_barker_db = NULL;
i2400m_barker_db_size = 0;
i2400m_barker_db_used = 0;
result = i2400m_barker_db_known_barkers();
if (result < 0)
goto error_add;
/* parse command line options from i2400m.barkers */
if (_options != NULL) {
unsigned barker;
options_orig = kstrdup(_options, GFP_KERNEL);
if (options_orig == NULL) {
result = -ENOMEM;
goto error_parse;
}
options = options_orig;
while ((token = strsep(&options, ",")) != NULL) {
if (*token == '\0') /* eat joint commas */
continue;
if (sscanf(token, "%x", &barker) != 1
|| barker > 0xffffffff) {
printk(KERN_ERR "%s: can't recognize "
"i2400m.barkers value '%s' as "
"a 32-bit number\n",
__func__, token);
result = -EINVAL;
goto error_parse;
}
if (barker == 0) {
/* clean list and start new */
i2400m_barker_db_exit();
continue;
}
result = i2400m_barker_db_add(barker);
if (result < 0)
goto error_add;
}
kfree(options_orig);
}
return 0;
error_parse:
error_add:
kfree(i2400m_barker_db);
return result;
}
/*
* Recognize a boot barker
*
* @buf: buffer where the boot barker.
* @buf_size: size of the buffer (has to be 16 bytes). It is passed
* here so the function can check it for the caller.
*
* Note that as a side effect, upon identifying the obtained boot
* barker, this function will set i2400m->barker to point to the right
* barker database entry. Subsequent calls to the function will result
* in verifying that the same type of boot barker is returned when the
* device [re]boots (as long as the same device instance is used).
*
* Return: 0 if @buf matches a known boot barker. -ENOENT if the
* buffer in @buf doesn't match any boot barker in the database or
* -EILSEQ if the buffer doesn't have the right size.
*/
int i2400m_is_boot_barker(struct i2400m *i2400m,
const void *buf, size_t buf_size)
{
int result;
struct device *dev = i2400m_dev(i2400m);
struct i2400m_barker_db *barker;
int i;
result = -ENOENT;
if (buf_size != sizeof(i2400m_barker_db[i].data))
return result;
/* Short circuit if we have already discovered the barker
* associated with the device. */
if (i2400m->barker
&& !memcmp(buf, i2400m->barker, sizeof(i2400m->barker->data))) {
unsigned index = (i2400m->barker - i2400m_barker_db)
/ sizeof(*i2400m->barker);
d_printf(2, dev, "boot barker cache-confirmed #%u/%08x\n",
index, le32_to_cpu(i2400m->barker->data[0]));
return 0;
}
for (i = 0; i < i2400m_barker_db_used; i++) {
barker = &i2400m_barker_db[i];
BUILD_BUG_ON(sizeof(barker->data) != 16);
if (memcmp(buf, barker->data, sizeof(barker->data)))
continue;
if (i2400m->barker == NULL) {
i2400m->barker = barker;
d_printf(1, dev, "boot barker set to #%u/%08x\n",
i, le32_to_cpu(barker->data[0]));
if (barker->data[0] == le32_to_cpu(I2400M_NBOOT_BARKER))
i2400m->sboot = 0;
else
i2400m->sboot = 1;
} else if (i2400m->barker != barker) {
dev_err(dev, "HW inconsistency: device "
"reports a different boot barker "
"than set (from %08x to %08x)\n",
le32_to_cpu(i2400m->barker->data[0]),
le32_to_cpu(barker->data[0]));
result = -EIO;
} else
d_printf(2, dev, "boot barker confirmed #%u/%08x\n",
i, le32_to_cpu(barker->data[0]));
result = 0;
break;
}
return result;
}
EXPORT_SYMBOL_GPL(i2400m_is_boot_barker);
/*
* Verify the ack data received
*
* Given a reply to a boot mode command, chew it and verify everything
* is ok.
*
* @opcode: opcode which generated this ack. For error messages.
* @ack: pointer to ack data we received
* @ack_size: size of that data buffer
* @flags: I2400M_BM_CMD_* flags we called the command with.
*
* Way too long function -- maybe it should be further split
*/
static
ssize_t __i2400m_bm_ack_verify(struct i2400m *i2400m, int opcode,
struct i2400m_bootrom_header *ack,
size_t ack_size, int flags)
{
ssize_t result = -ENOMEM;
struct device *dev = i2400m_dev(i2400m);
d_fnstart(8, dev, "(i2400m %p opcode %d ack %p size %zu)\n",
i2400m, opcode, ack, ack_size);
if (ack_size < sizeof(*ack)) {
result = -EIO;
dev_err(dev, "boot-mode cmd %d: HW BUG? notification didn't "
"return enough data (%zu bytes vs %zu expected)\n",
opcode, ack_size, sizeof(*ack));
goto error_ack_short;
}
result = i2400m_is_boot_barker(i2400m, ack, ack_size);
if (result >= 0) {
result = -ERESTARTSYS;
d_printf(6, dev, "boot-mode cmd %d: HW boot barker\n", opcode);
goto error_reboot;
}
if (ack_size == sizeof(i2400m_ACK_BARKER)
&& memcmp(ack, i2400m_ACK_BARKER, sizeof(*ack)) == 0) {
result = -EISCONN;
d_printf(3, dev, "boot-mode cmd %d: HW reboot ack barker\n",
opcode);
goto error_reboot_ack;
}
result = 0;
if (flags & I2400M_BM_CMD_RAW)
goto out_raw;
ack->data_size = le32_to_cpu(ack->data_size);
ack->target_addr = le32_to_cpu(ack->target_addr);
ack->block_checksum = le32_to_cpu(ack->block_checksum);
d_printf(5, dev, "boot-mode cmd %d: notification for opcode %u "
"response %u csum %u rr %u da %u\n",
opcode, i2400m_brh_get_opcode(ack),
i2400m_brh_get_response(ack),
i2400m_brh_get_use_checksum(ack),
i2400m_brh_get_response_required(ack),
i2400m_brh_get_direct_access(ack));
result = -EIO;
if (i2400m_brh_get_signature(ack) != 0xcbbc) {
dev_err(dev, "boot-mode cmd %d: HW BUG? wrong signature "
"0x%04x\n", opcode, i2400m_brh_get_signature(ack));
goto error_ack_signature;
}
if (opcode != -1 && opcode != i2400m_brh_get_opcode(ack)) {
dev_err(dev, "boot-mode cmd %d: HW BUG? "
"received response for opcode %u, expected %u\n",
opcode, i2400m_brh_get_opcode(ack), opcode);
goto error_ack_opcode;
}
if (i2400m_brh_get_response(ack) != 0) { /* failed? */
dev_err(dev, "boot-mode cmd %d: error; hw response %u\n",
opcode, i2400m_brh_get_response(ack));
goto error_ack_failed;
}
if (ack_size < ack->data_size + sizeof(*ack)) {
dev_err(dev, "boot-mode cmd %d: SW BUG "
"driver provided only %zu bytes for %zu bytes "
"of data\n", opcode, ack_size,
(size_t) le32_to_cpu(ack->data_size) + sizeof(*ack));
goto error_ack_short_buffer;
}
result = ack_size;
/* Don't you love this stack of empty targets? Well, I don't
* either, but it helps track exactly who comes in here and
* why :) */
error_ack_short_buffer:
error_ack_failed:
error_ack_opcode:
error_ack_signature:
out_raw:
error_reboot_ack:
error_reboot:
error_ack_short:
d_fnend(8, dev, "(i2400m %p opcode %d ack %p size %zu) = %d\n",
i2400m, opcode, ack, ack_size, (int) result);
return result;
}
/**
* i2400m_bm_cmd - Execute a boot mode command
*
* @cmd: buffer containing the command data (pointing at the header).
* This data can be ANYWHERE (for USB, we will copy it to an
* specific buffer). Make sure everything is in proper little
* endian.
*
* A raw buffer can be also sent, just cast it and set flags to
* I2400M_BM_CMD_RAW.
*
* This function will generate a checksum for you if the
* checksum bit in the command is set (unless I2400M_BM_CMD_RAW
* is set).
*
* You can use the i2400m->bm_cmd_buf to stage your commands and
* send them.
*
* If NULL, no command is sent (we just wait for an ack).
*
* @cmd_size: size of the command. Will be auto padded to the
* bus-specific drivers padding requirements.
*
* @ack: buffer where to place the acknowledgement. If it is a regular
* command response, all fields will be returned with the right,
* native endianess.
*
* You *cannot* use i2400m->bm_ack_buf for this buffer.
*
* @ack_size: size of @ack, 16 aligned; you need to provide at least
* sizeof(*ack) bytes and then enough to contain the return data
* from the command
*
* @flags: see I2400M_BM_CMD_* above.
*
* @returns: bytes received by the notification; if < 0, an errno code
* denoting an error or:
*
* -ERESTARTSYS The device has rebooted
*
* Executes a boot-mode command and waits for a response, doing basic
* validation on it; if a zero length response is received, it retries
* waiting for a response until a non-zero one is received (timing out
* after %I2400M_BOOT_RETRIES retries).
*/
static
ssize_t i2400m_bm_cmd(struct i2400m *i2400m,
const struct i2400m_bootrom_header *cmd, size_t cmd_size,
struct i2400m_bootrom_header *ack, size_t ack_size,
int flags)
{
ssize_t result = -ENOMEM, rx_bytes;
struct device *dev = i2400m_dev(i2400m);
int opcode = cmd == NULL ? -1 : i2400m_brh_get_opcode(cmd);
d_fnstart(6, dev, "(i2400m %p cmd %p size %zu ack %p size %zu)\n",
i2400m, cmd, cmd_size, ack, ack_size);
BUG_ON(ack_size < sizeof(*ack));
BUG_ON(i2400m->boot_mode == 0);
if (cmd != NULL) { /* send the command */
result = i2400m->bus_bm_cmd_send(i2400m, cmd, cmd_size, flags);
if (result < 0)
goto error_cmd_send;
if ((flags & I2400M_BM_CMD_RAW) == 0)
d_printf(5, dev,
"boot-mode cmd %d csum %u rr %u da %u: "
"addr 0x%04x size %u block csum 0x%04x\n",
opcode, i2400m_brh_get_use_checksum(cmd),
i2400m_brh_get_response_required(cmd),
i2400m_brh_get_direct_access(cmd),
cmd->target_addr, cmd->data_size,
cmd->block_checksum);
}
result = i2400m->bus_bm_wait_for_ack(i2400m, ack, ack_size);
if (result < 0) {
dev_err(dev, "boot-mode cmd %d: error waiting for an ack: %d\n",
opcode, (int) result); /* bah, %zd doesn't work */
goto error_wait_for_ack;
}
rx_bytes = result;
/* verify the ack and read more if necessary [result is the
* final amount of bytes we get in the ack] */
result = __i2400m_bm_ack_verify(i2400m, opcode, ack, ack_size, flags);
if (result < 0)
goto error_bad_ack;
/* Don't you love this stack of empty targets? Well, I don't
* either, but it helps track exactly who comes in here and
* why :) */
result = rx_bytes;
error_bad_ack:
error_wait_for_ack:
error_cmd_send:
d_fnend(6, dev, "(i2400m %p cmd %p size %zu ack %p size %zu) = %d\n",
i2400m, cmd, cmd_size, ack, ack_size, (int) result);
return result;
}
/**
* i2400m_download_chunk - write a single chunk of data to the device's memory
*
* @i2400m: device descriptor
* @buf: the buffer to write
* @buf_len: length of the buffer to write
* @addr: address in the device memory space
* @direct: bootrom write mode
* @do_csum: should a checksum validation be performed
*/
static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
size_t __chunk_len, unsigned long addr,
unsigned int direct, unsigned int do_csum)
{
int ret;
size_t chunk_len = ALIGN(__chunk_len, I2400M_PL_ALIGN);
struct device *dev = i2400m_dev(i2400m);
struct {
struct i2400m_bootrom_header cmd;
u8 cmd_payload[chunk_len];
} __packed *buf;
struct i2400m_bootrom_header ack;
d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx "
"direct %u do_csum %u)\n", i2400m, chunk, __chunk_len,
addr, direct, do_csum);
buf = i2400m->bm_cmd_buf;
memcpy(buf->cmd_payload, chunk, __chunk_len);
memset(buf->cmd_payload + __chunk_len, 0xad, chunk_len - __chunk_len);
buf->cmd.command = i2400m_brh_command(I2400M_BRH_WRITE,
__chunk_len & 0x3 ? 0 : do_csum,
__chunk_len & 0xf ? 0 : direct);
buf->cmd.target_addr = cpu_to_le32(addr);
buf->cmd.data_size = cpu_to_le32(__chunk_len);
ret = i2400m_bm_cmd(i2400m, &buf->cmd, sizeof(buf->cmd) + chunk_len,
&ack, sizeof(ack), 0);
if (ret >= 0)
ret = 0;
d_fnend(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx "
"direct %u do_csum %u) = %d\n", i2400m, chunk, __chunk_len,
addr, direct, do_csum, ret);
return ret;
}
/*
* Download a BCF file's sections to the device
*
* @i2400m: device descriptor
* @bcf: pointer to firmware data (first header followed by the
* payloads). Assumed verified and consistent.
* @bcf_len: length (in bytes) of the @bcf buffer.
*
* Returns: < 0 errno code on error or the offset to the jump instruction.
*
* Given a BCF file, downloads each section (a command and a payload)
* to the device's address space. Actually, it just executes each
* command i the BCF file.
*
* The section size has to be aligned to 4 bytes AND the padding has
* to be taken from the firmware file, as the signature takes it into
* account.
*/
static
ssize_t i2400m_dnload_bcf(struct i2400m *i2400m,
const struct i2400m_bcf_hdr *bcf, size_t bcf_len)
{
ssize_t ret;
struct device *dev = i2400m_dev(i2400m);
size_t offset, /* iterator offset */
data_size, /* Size of the data payload */
section_size, /* Size of the whole section (cmd + payload) */
section = 1;
const struct i2400m_bootrom_header *bh;
struct i2400m_bootrom_header ack;
d_fnstart(3, dev, "(i2400m %p bcf %p bcf_len %zu)\n",
i2400m, bcf, bcf_len);
/* Iterate over the command blocks in the BCF file that start
* after the header */
offset = le32_to_cpu(bcf->header_len) * sizeof(u32);
while (1) { /* start sending the file */
bh = (void *) bcf + offset;
data_size = le32_to_cpu(bh->data_size);
section_size = ALIGN(sizeof(*bh) + data_size, 4);
d_printf(7, dev,
"downloading section #%zu (@%zu %zu B) to 0x%08x\n",
section, offset, sizeof(*bh) + data_size,
le32_to_cpu(bh->target_addr));
/*
* We look for JUMP cmd from the bootmode header,
* either I2400M_BRH_SIGNED_JUMP for secure boot
* or I2400M_BRH_JUMP for unsecure boot, the last chunk
* should be the bootmode header with JUMP cmd.
*/
if (i2400m_brh_get_opcode(bh) == I2400M_BRH_SIGNED_JUMP ||
i2400m_brh_get_opcode(bh) == I2400M_BRH_JUMP) {
d_printf(5, dev, "jump found @%zu\n", offset);
break;
}
if (offset + section_size > bcf_len) {
dev_err(dev, "fw %s: bad section #%zu, "
"end (@%zu) beyond EOF (@%zu)\n",
i2400m->fw_name, section,
offset + section_size, bcf_len);
ret = -EINVAL;
goto error_section_beyond_eof;
}
__i2400m_msleep(20);
ret = i2400m_bm_cmd(i2400m, bh, section_size,
&ack, sizeof(ack), I2400M_BM_CMD_RAW);
if (ret < 0) {
dev_err(dev, "fw %s: section #%zu (@%zu %zu B) "
"failed %d\n", i2400m->fw_name, section,
offset, sizeof(*bh) + data_size, (int) ret);
goto error_send;
}
offset += section_size;
section++;
}
ret = offset;
error_section_beyond_eof:
error_send:
d_fnend(3, dev, "(i2400m %p bcf %p bcf_len %zu) = %d\n",
i2400m, bcf, bcf_len, (int) ret);
return ret;
}
/*
* Indicate if the device emitted a reboot barker that indicates
* "signed boot"
*/
static
unsigned i2400m_boot_is_signed(struct i2400m *i2400m)
{
return likely(i2400m->sboot);
}
/*
* Do the final steps of uploading firmware
*
* @bcf_hdr: BCF header we are actually using
* @bcf: pointer to the firmware image (which matches the first header
* that is followed by the actual payloads).
* @offset: [byte] offset into @bcf for the command we need to send.
*
* Depending on the boot mode (signed vs non-signed), different
* actions need to be taken.
*/
static
int i2400m_dnload_finalize(struct i2400m *i2400m,
const struct i2400m_bcf_hdr *bcf_hdr,
const struct i2400m_bcf_hdr *bcf, size_t offset)
{
int ret = 0;
struct device *dev = i2400m_dev(i2400m);
struct i2400m_bootrom_header *cmd, ack;
struct {
struct i2400m_bootrom_header cmd;
u8 cmd_pl[0];
} __packed *cmd_buf;
size_t signature_block_offset, signature_block_size;
d_fnstart(3, dev, "offset %zu\n", offset);
cmd = (void *) bcf + offset;
if (i2400m_boot_is_signed(i2400m) == 0) {
struct i2400m_bootrom_header jump_ack;
d_printf(1, dev, "unsecure boot, jumping to 0x%08x\n",
le32_to_cpu(cmd->target_addr));
cmd_buf = i2400m->bm_cmd_buf;
memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd));
cmd = &cmd_buf->cmd;
/* now cmd points to the actual bootrom_header in cmd_buf */
i2400m_brh_set_opcode(cmd, I2400M_BRH_JUMP);
cmd->data_size = 0;
ret = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
&jump_ack, sizeof(jump_ack), 0);
} else {
d_printf(1, dev, "secure boot, jumping to 0x%08x\n",
le32_to_cpu(cmd->target_addr));
cmd_buf = i2400m->bm_cmd_buf;
memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd));
signature_block_offset =
sizeof(*bcf_hdr)
+ le32_to_cpu(bcf_hdr->key_size) * sizeof(u32)
+ le32_to_cpu(bcf_hdr->exponent_size) * sizeof(u32);
signature_block_size =
le32_to_cpu(bcf_hdr->modulus_size) * sizeof(u32);
memcpy(cmd_buf->cmd_pl,
(void *) bcf_hdr + signature_block_offset,
signature_block_size);
ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd,
sizeof(cmd_buf->cmd) + signature_block_size,
&ack, sizeof(ack), I2400M_BM_CMD_RAW);
}
d_fnend(3, dev, "returning %d\n", ret);
return ret;
}
/**
* i2400m_bootrom_init - Reboots a powered device into boot mode
*
* @i2400m: device descriptor
* @flags:
* I2400M_BRI_SOFT: a reboot barker has been seen
* already, so don't wait for it.
*
* I2400M_BRI_NO_REBOOT: Don't send a reboot command, but wait
* for a reboot barker notification. This is a one shot; if
* the state machine needs to send a reboot command it will.
*
* Returns:
*
* < 0 errno code on error, 0 if ok.
*
* Description:
*
* Tries hard enough to put the device in boot-mode. There are two
* main phases to this:
*
* a. (1) send a reboot command and (2) get a reboot barker
*
* b. (1) echo/ack the reboot sending the reboot barker back and (2)
* getting an ack barker in return
*
* We want to skip (a) in some cases [soft]. The state machine is
* horrible, but it is basically: on each phase, send what has to be
* sent (if any), wait for the answer and act on the answer. We might
* have to backtrack and retry, so we keep a max tries counter for
* that.
*
* It sucks because we don't know ahead of time which is going to be
* the reboot barker (the device might send different ones depending
* on its EEPROM config) and once the device reboots and waits for the
* echo/ack reboot barker being sent back, it doesn't understand
* anything else. So we can be left at the point where we don't know
* what to send to it -- cold reset and bus reset seem to have little
* effect. So the function iterates (in this case) through all the
* known barkers and tries them all until an ACK is
* received. Otherwise, it gives up.
*
* If we get a timeout after sending a warm reset, we do it again.
*/
int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags)
{
int result;
struct device *dev = i2400m_dev(i2400m);
struct i2400m_bootrom_header *cmd;
struct i2400m_bootrom_header ack;
int count = i2400m->bus_bm_retries;
int ack_timeout_cnt = 1;
unsigned i;
BUILD_BUG_ON(sizeof(*cmd) != sizeof(i2400m_barker_db[0].data));
BUILD_BUG_ON(sizeof(ack) != sizeof(i2400m_ACK_BARKER));
d_fnstart(4, dev, "(i2400m %p flags 0x%08x)\n", i2400m, flags);
result = -ENOMEM;
cmd = i2400m->bm_cmd_buf;
if (flags & I2400M_BRI_SOFT)
goto do_reboot_ack;
do_reboot:
ack_timeout_cnt = 1;
if (--count < 0)
goto error_timeout;
d_printf(4, dev, "device reboot: reboot command [%d # left]\n",
count);
if ((flags & I2400M_BRI_NO_REBOOT) == 0)
i2400m_reset(i2400m, I2400M_RT_WARM);
result = i2400m_bm_cmd(i2400m, NULL, 0, &ack, sizeof(ack),
I2400M_BM_CMD_RAW);
flags &= ~I2400M_BRI_NO_REBOOT;
switch (result) {
case -ERESTARTSYS:
/*
* at this point, i2400m_bm_cmd(), through
* __i2400m_bm_ack_process(), has updated
* i2400m->barker and we are good to go.
*/
d_printf(4, dev, "device reboot: got reboot barker\n");
break;
case -EISCONN: /* we don't know how it got here...but we follow it */
d_printf(4, dev, "device reboot: got ack barker - whatever\n");
goto do_reboot;
case -ETIMEDOUT:
/*
* Device has timed out, we might be in boot mode
* already and expecting an ack; if we don't know what
* the barker is, we just send them all. Cold reset
* and bus reset don't work. Beats me.
*/
if (i2400m->barker != NULL) {
dev_err(dev, "device boot: reboot barker timed out, "
"trying (set) %08x echo/ack\n",
le32_to_cpu(i2400m->barker->data[0]));
goto do_reboot_ack;
}
for (i = 0; i < i2400m_barker_db_used; i++) {
struct i2400m_barker_db *barker = &i2400m_barker_db[i];
memcpy(cmd, barker->data, sizeof(barker->data));
result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
&ack, sizeof(ack),
I2400M_BM_CMD_RAW);
if (result == -EISCONN) {
dev_warn(dev, "device boot: got ack barker "
"after sending echo/ack barker "
"#%d/%08x; rebooting j.i.c.\n",
i, le32_to_cpu(barker->data[0]));
flags &= ~I2400M_BRI_NO_REBOOT;
goto do_reboot;
}
}
dev_err(dev, "device boot: tried all the echo/acks, could "
"not get device to respond; giving up");
result = -ESHUTDOWN;
case -EPROTO:
case -ESHUTDOWN: /* dev is gone */
case -EINTR: /* user cancelled */
goto error_dev_gone;
default:
dev_err(dev, "device reboot: error %d while waiting "
"for reboot barker - rebooting\n", result);
d_dump(1, dev, &ack, result);
goto do_reboot;
}
/* At this point we ack back with 4 REBOOT barkers and expect
* 4 ACK barkers. This is ugly, as we send a raw command --
* hence the cast. _bm_cmd() will catch the reboot ack
* notification and report it as -EISCONN. */
do_reboot_ack:
d_printf(4, dev, "device reboot ack: sending ack [%d # left]\n", count);
memcpy(cmd, i2400m->barker->data, sizeof(i2400m->barker->data));
result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
&ack, sizeof(ack), I2400M_BM_CMD_RAW);
switch (result) {
case -ERESTARTSYS:
d_printf(4, dev, "reboot ack: got reboot barker - retrying\n");
if (--count < 0)
goto error_timeout;
goto do_reboot_ack;
case -EISCONN:
d_printf(4, dev, "reboot ack: got ack barker - good\n");
break;
case -ETIMEDOUT: /* no response, maybe it is the other type? */
if (ack_timeout_cnt-- < 0) {
d_printf(4, dev, "reboot ack timedout: retrying\n");
goto do_reboot_ack;
} else {
dev_err(dev, "reboot ack timedout too long: "
"trying reboot\n");
goto do_reboot;
}
break;
case -EPROTO:
case -ESHUTDOWN: /* dev is gone */
goto error_dev_gone;
default:
dev_err(dev, "device reboot ack: error %d while waiting for "
"reboot ack barker - rebooting\n", result);
goto do_reboot;
}
d_printf(2, dev, "device reboot ack: got ack barker - boot done\n");
result = 0;
exit_timeout:
error_dev_gone:
d_fnend(4, dev, "(i2400m %p flags 0x%08x) = %d\n",
i2400m, flags, result);
return result;
error_timeout:
dev_err(dev, "Timed out waiting for reboot ack\n");
result = -ETIMEDOUT;
goto exit_timeout;
}
/*
* Read the MAC addr
*
* The position this function reads is fixed in device memory and
* always available, even without firmware.
*
* Note we specify we want to read only six bytes, but provide space
* for 16, as we always get it rounded up.
*/
int i2400m_read_mac_addr(struct i2400m *i2400m)
{
int result;
struct device *dev = i2400m_dev(i2400m);
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
struct i2400m_bootrom_header *cmd;
struct {
struct i2400m_bootrom_header ack;
u8 ack_pl[16];
} __packed ack_buf;
d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
cmd = i2400m->bm_cmd_buf;
cmd->command = i2400m_brh_command(I2400M_BRH_READ, 0, 1);
cmd->target_addr = cpu_to_le32(0x00203fe8);
cmd->data_size = cpu_to_le32(6);
result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
&ack_buf.ack, sizeof(ack_buf), 0);
if (result < 0) {
dev_err(dev, "BM: read mac addr failed: %d\n", result);
goto error_read_mac;
}
d_printf(2, dev, "mac addr is %pM\n", ack_buf.ack_pl);
if (i2400m->bus_bm_mac_addr_impaired == 1) {
ack_buf.ack_pl[0] = 0x00;
ack_buf.ack_pl[1] = 0x16;
ack_buf.ack_pl[2] = 0xd3;
get_random_bytes(&ack_buf.ack_pl[3], 3);
dev_err(dev, "BM is MAC addr impaired, faking MAC addr to "
"mac addr is %pM\n", ack_buf.ack_pl);
result = 0;
}
net_dev->addr_len = ETH_ALEN;
memcpy(net_dev->dev_addr, ack_buf.ack_pl, ETH_ALEN);
error_read_mac:
d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, result);
return result;
}
/*
* Initialize a non signed boot
*
* This implies sending some magic values to the device's memory. Note
* we convert the values to little endian in the same array
* declaration.
*/
static
int i2400m_dnload_init_nonsigned(struct i2400m *i2400m)
{
unsigned i = 0;
int ret = 0;
struct device *dev = i2400m_dev(i2400m);
d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
if (i2400m->bus_bm_pokes_table) {
while (i2400m->bus_bm_pokes_table[i].address) {
ret = i2400m_download_chunk(
i2400m,
&i2400m->bus_bm_pokes_table[i].data,
sizeof(i2400m->bus_bm_pokes_table[i].data),
i2400m->bus_bm_pokes_table[i].address, 1, 1);
if (ret < 0)
break;
i++;
}
}
d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
return ret;
}
/*
* Initialize the signed boot process
*
* @i2400m: device descriptor
*
* @bcf_hdr: pointer to the firmware header; assumes it is fully in
* memory (it has gone through basic validation).
*
* Returns: 0 if ok, < 0 errno code on error, -ERESTARTSYS if the hw
* rebooted.
*
* This writes the firmware BCF header to the device using the
* HASH_PAYLOAD_ONLY command.
*/
static
int i2400m_dnload_init_signed(struct i2400m *i2400m,
const struct i2400m_bcf_hdr *bcf_hdr)
{
int ret;
struct device *dev = i2400m_dev(i2400m);
struct {
struct i2400m_bootrom_header cmd;
struct i2400m_bcf_hdr cmd_pl;
} __packed *cmd_buf;
struct i2400m_bootrom_header ack;
d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr);
cmd_buf = i2400m->bm_cmd_buf;
cmd_buf->cmd.command =
i2400m_brh_command(I2400M_BRH_HASH_PAYLOAD_ONLY, 0, 0);
cmd_buf->cmd.target_addr = 0;
cmd_buf->cmd.data_size = cpu_to_le32(sizeof(cmd_buf->cmd_pl));
memcpy(&cmd_buf->cmd_pl, bcf_hdr, sizeof(*bcf_hdr));
ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd, sizeof(*cmd_buf),
&ack, sizeof(ack), 0);
if (ret >= 0)
ret = 0;
d_fnend(5, dev, "(i2400m %p bcf_hdr %p) = %d\n", i2400m, bcf_hdr, ret);
return ret;
}
/*
* Initialize the firmware download at the device size
*
* Multiplex to the one that matters based on the device's mode
* (signed or non-signed).
*/
static
int i2400m_dnload_init(struct i2400m *i2400m,
const struct i2400m_bcf_hdr *bcf_hdr)
{
int result;
struct device *dev = i2400m_dev(i2400m);
if (i2400m_boot_is_signed(i2400m)) {
d_printf(1, dev, "signed boot\n");
result = i2400m_dnload_init_signed(i2400m, bcf_hdr);
if (result == -ERESTARTSYS)
return result;
if (result < 0)
dev_err(dev, "firmware %s: signed boot download "
"initialization failed: %d\n",
i2400m->fw_name, result);
} else {
/* non-signed boot process without pokes */
d_printf(1, dev, "non-signed boot\n");
result = i2400m_dnload_init_nonsigned(i2400m);
if (result == -ERESTARTSYS)
return result;
if (result < 0)
dev_err(dev, "firmware %s: non-signed download "
"initialization failed: %d\n",
i2400m->fw_name, result);
}
return result;
}
/*
* Run consistency tests on the firmware file and load up headers
*
* Check for the firmware being made for the i2400m device,
* etc...These checks are mostly informative, as the device will make
* them too; but the driver's response is more informative on what
* went wrong.
*
* This will also look at all the headers present on the firmware
* file, and update i2400m->fw_bcf_hdr to point to them.
*/
static
int i2400m_fw_hdr_check(struct i2400m *i2400m,
const struct i2400m_bcf_hdr *bcf_hdr,
size_t index, size_t offset)
{
struct device *dev = i2400m_dev(i2400m);
unsigned module_type, header_len, major_version, minor_version,
module_id, module_vendor, date, size;
module_type = le32_to_cpu(bcf_hdr->module_type);
header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000)
>> 16;
minor_version = le32_to_cpu(bcf_hdr->header_version) & 0x0000ffff;
module_id = le32_to_cpu(bcf_hdr->module_id);
module_vendor = le32_to_cpu(bcf_hdr->module_vendor);
date = le32_to_cpu(bcf_hdr->date);
size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
d_printf(1, dev, "firmware %s #%zd@%08zx: BCF header "
"type:vendor:id 0x%x:%x:%x v%u.%u (%u/%u B) built %08x\n",
i2400m->fw_name, index, offset,
module_type, module_vendor, module_id,
major_version, minor_version, header_len, size, date);
/* Hard errors */
if (major_version != 1) {
dev_err(dev, "firmware %s #%zd@%08zx: major header version "
"v%u.%u not supported\n",
i2400m->fw_name, index, offset,
major_version, minor_version);
return -EBADF;
}
if (module_type != 6) { /* built for the right hardware? */
dev_err(dev, "firmware %s #%zd@%08zx: unexpected module "
"type 0x%x; aborting\n",
i2400m->fw_name, index, offset,
module_type);
return -EBADF;
}
if (module_vendor != 0x8086) {
dev_err(dev, "firmware %s #%zd@%08zx: unexpected module "
"vendor 0x%x; aborting\n",
i2400m->fw_name, index, offset, module_vendor);
return -EBADF;
}
if (date < 0x20080300)
dev_warn(dev, "firmware %s #%zd@%08zx: build date %08x "
"too old; unsupported\n",
i2400m->fw_name, index, offset, date);
return 0;
}
/*
* Run consistency tests on the firmware file and load up headers
*
* Check for the firmware being made for the i2400m device,
* etc...These checks are mostly informative, as the device will make
* them too; but the driver's response is more informative on what
* went wrong.
*
* This will also look at all the headers present on the firmware
* file, and update i2400m->fw_hdrs to point to them.
*/
static
int i2400m_fw_check(struct i2400m *i2400m, const void *bcf, size_t bcf_size)
{
int result;
struct device *dev = i2400m_dev(i2400m);
size_t headers = 0;
const struct i2400m_bcf_hdr *bcf_hdr;
const void *itr, *next, *top;
size_t slots = 0, used_slots = 0;
for (itr = bcf, top = itr + bcf_size;
itr < top;
headers++, itr = next) {
size_t leftover, offset, header_len, size;
leftover = top - itr;
offset = itr - bcf;
if (leftover <= sizeof(*bcf_hdr)) {
dev_err(dev, "firmware %s: %zu B left at @%zx, "
"not enough for BCF header\n",
i2400m->fw_name, leftover, offset);
break;
}
bcf_hdr = itr;
/* Only the first header is supposed to be followed by
* payload */
header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
if (headers == 0)
next = itr + size;
else
next = itr + header_len;
result = i2400m_fw_hdr_check(i2400m, bcf_hdr, headers, offset);
if (result < 0)
continue;
if (used_slots + 1 >= slots) {
/* +1 -> we need to account for the one we'll
* occupy and at least an extra one for
* always being NULL */
result = i2400m_zrealloc_2x(
(void **) &i2400m->fw_hdrs, &slots,
sizeof(i2400m->fw_hdrs[0]),
GFP_KERNEL);
if (result < 0)
goto error_zrealloc;
}
i2400m->fw_hdrs[used_slots] = bcf_hdr;
used_slots++;
}
if (headers == 0) {
dev_err(dev, "firmware %s: no usable headers found\n",
i2400m->fw_name);
result = -EBADF;
} else
result = 0;
error_zrealloc:
return result;
}
/*
* Match a barker to a BCF header module ID
*
* The device sends a barker which tells the firmware loader which
* header in the BCF file has to be used. This does the matching.
*/
static
unsigned i2400m_bcf_hdr_match(struct i2400m *i2400m,
const struct i2400m_bcf_hdr *bcf_hdr)
{
u32 barker = le32_to_cpu(i2400m->barker->data[0])
& 0x7fffffff;
u32 module_id = le32_to_cpu(bcf_hdr->module_id)
& 0x7fffffff; /* high bit used for something else */
/* special case for 5x50 */
if (barker == I2400M_SBOOT_BARKER && module_id == 0)
return 1;
if (module_id == barker)
return 1;
return 0;
}
static
const struct i2400m_bcf_hdr *i2400m_bcf_hdr_find(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
const struct i2400m_bcf_hdr **bcf_itr, *bcf_hdr;
unsigned i = 0;
u32 barker = le32_to_cpu(i2400m->barker->data[0]);
d_printf(2, dev, "finding BCF header for barker %08x\n", barker);
if (barker == I2400M_NBOOT_BARKER) {
bcf_hdr = i2400m->fw_hdrs[0];
d_printf(1, dev, "using BCF header #%u/%08x for non-signed "
"barker\n", 0, le32_to_cpu(bcf_hdr->module_id));
return bcf_hdr;
}
for (bcf_itr = i2400m->fw_hdrs; *bcf_itr != NULL; bcf_itr++, i++) {
bcf_hdr = *bcf_itr;
if (i2400m_bcf_hdr_match(i2400m, bcf_hdr)) {
d_printf(1, dev, "hit on BCF hdr #%u/%08x\n",
i, le32_to_cpu(bcf_hdr->module_id));
return bcf_hdr;
} else
d_printf(1, dev, "miss on BCF hdr #%u/%08x\n",
i, le32_to_cpu(bcf_hdr->module_id));
}
dev_err(dev, "cannot find a matching BCF header for barker %08x\n",
barker);
return NULL;
}
/*
* Download the firmware to the device
*
* @i2400m: device descriptor
* @bcf: pointer to loaded (and minimally verified for consistency)
* firmware
* @bcf_size: size of the @bcf buffer (header plus payloads)
*
* The process for doing this is described in this file's header.
*
* Note we only reinitialize boot-mode if the flags say so. Some hw
* iterations need it, some don't. In any case, if we loop, we always
* need to reinitialize the boot room, hence the flags modification.
*/
static
int i2400m_fw_dnload(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf,
size_t fw_size, enum i2400m_bri flags)
{
int ret = 0;
struct device *dev = i2400m_dev(i2400m);
int count = i2400m->bus_bm_retries;
const struct i2400m_bcf_hdr *bcf_hdr;
size_t bcf_size;
d_fnstart(5, dev, "(i2400m %p bcf %p fw size %zu)\n",
i2400m, bcf, fw_size);
i2400m->boot_mode = 1;
wmb(); /* Make sure other readers see it */
hw_reboot:
if (count-- == 0) {
ret = -ERESTARTSYS;
dev_err(dev, "device rebooted too many times, aborting\n");
goto error_too_many_reboots;
}
if (flags & I2400M_BRI_MAC_REINIT) {
ret = i2400m_bootrom_init(i2400m, flags);
if (ret < 0) {
dev_err(dev, "bootrom init failed: %d\n", ret);
goto error_bootrom_init;
}
}
flags |= I2400M_BRI_MAC_REINIT;
/*
* Initialize the download, push the bytes to the device and
* then jump to the new firmware. Note @ret is passed with the
* offset of the jump instruction to _dnload_finalize()
*
* Note we need to use the BCF header in the firmware image
* that matches the barker that the device sent when it
* rebooted, so it has to be passed along.
*/
ret = -EBADF;
bcf_hdr = i2400m_bcf_hdr_find(i2400m);
if (bcf_hdr == NULL)
goto error_bcf_hdr_find;
ret = i2400m_dnload_init(i2400m, bcf_hdr);
if (ret == -ERESTARTSYS)
goto error_dev_rebooted;
if (ret < 0)
goto error_dnload_init;
/*
* bcf_size refers to one header size plus the fw sections size
* indicated by the header,ie. if there are other extended headers
* at the tail, they are not counted
*/
bcf_size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
ret = i2400m_dnload_bcf(i2400m, bcf, bcf_size);
if (ret == -ERESTARTSYS)
goto error_dev_rebooted;
if (ret < 0) {
dev_err(dev, "fw %s: download failed: %d\n",
i2400m->fw_name, ret);
goto error_dnload_bcf;
}
ret = i2400m_dnload_finalize(i2400m, bcf_hdr, bcf, ret);
if (ret == -ERESTARTSYS)
goto error_dev_rebooted;
if (ret < 0) {
dev_err(dev, "fw %s: "
"download finalization failed: %d\n",
i2400m->fw_name, ret);
goto error_dnload_finalize;
}
d_printf(2, dev, "fw %s successfully uploaded\n",
i2400m->fw_name);
i2400m->boot_mode = 0;
wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
error_dnload_finalize:
error_dnload_bcf:
error_dnload_init:
error_bcf_hdr_find:
error_bootrom_init:
error_too_many_reboots:
d_fnend(5, dev, "(i2400m %p bcf %p size %zu) = %d\n",
i2400m, bcf, fw_size, ret);
return ret;
error_dev_rebooted:
dev_err(dev, "device rebooted, %d tries left\n", count);
/* we got the notification already, no need to wait for it again */
flags |= I2400M_BRI_SOFT;
goto hw_reboot;
}
static
int i2400m_fw_bootstrap(struct i2400m *i2400m, const struct firmware *fw,
enum i2400m_bri flags)
{
int ret;
struct device *dev = i2400m_dev(i2400m);
const struct i2400m_bcf_hdr *bcf; /* Firmware data */
d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
bcf = (void *) fw->data;
ret = i2400m_fw_check(i2400m, bcf, fw->size);
if (ret >= 0)
ret = i2400m_fw_dnload(i2400m, bcf, fw->size, flags);
if (ret < 0)
dev_err(dev, "%s: cannot use: %d, skipping\n",
i2400m->fw_name, ret);
kfree(i2400m->fw_hdrs);
i2400m->fw_hdrs = NULL;
d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
return ret;
}
/* Refcounted container for firmware data */
struct i2400m_fw {
struct kref kref;
const struct firmware *fw;
};
static
void i2400m_fw_destroy(struct kref *kref)
{
struct i2400m_fw *i2400m_fw =
container_of(kref, struct i2400m_fw, kref);
release_firmware(i2400m_fw->fw);
kfree(i2400m_fw);
}
static
struct i2400m_fw *i2400m_fw_get(struct i2400m_fw *i2400m_fw)
{
if (i2400m_fw != NULL && i2400m_fw != (void *) ~0)
kref_get(&i2400m_fw->kref);
return i2400m_fw;
}
static
void i2400m_fw_put(struct i2400m_fw *i2400m_fw)
{
kref_put(&i2400m_fw->kref, i2400m_fw_destroy);
}
/**
* i2400m_dev_bootstrap - Bring the device to a known state and upload firmware
*
* @i2400m: device descriptor
*
* Returns: >= 0 if ok, < 0 errno code on error.
*
* This sets up the firmware upload environment, loads the firmware
* file from disk, verifies and then calls the firmware upload process
* per se.
*
* Can be called either from probe, or after a warm reset. Can not be
* called from within an interrupt. All the flow in this code is
* single-threade; all I/Os are synchronous.
*/
int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
{
int ret, itr;
struct device *dev = i2400m_dev(i2400m);
struct i2400m_fw *i2400m_fw;
const struct i2400m_bcf_hdr *bcf; /* Firmware data */
const struct firmware *fw;
const char *fw_name;
d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
ret = -ENODEV;
spin_lock(&i2400m->rx_lock);
i2400m_fw = i2400m_fw_get(i2400m->fw_cached);
spin_unlock(&i2400m->rx_lock);
if (i2400m_fw == (void *) ~0) {
dev_err(dev, "can't load firmware now!");
goto out;
} else if (i2400m_fw != NULL) {
dev_info(dev, "firmware %s: loading from cache\n",
i2400m->fw_name);
ret = i2400m_fw_bootstrap(i2400m, i2400m_fw->fw, flags);
i2400m_fw_put(i2400m_fw);
goto out;
}
/* Load firmware files to memory. */
for (itr = 0, bcf = NULL, ret = -ENOENT; ; itr++) {
fw_name = i2400m->bus_fw_names[itr];
if (fw_name == NULL) {
dev_err(dev, "Could not find a usable firmware image\n");
break;
}
d_printf(1, dev, "trying firmware %s (%d)\n", fw_name, itr);
ret = request_firmware(&fw, fw_name, dev);
if (ret < 0) {
dev_err(dev, "fw %s: cannot load file: %d\n",
fw_name, ret);
continue;
}
i2400m->fw_name = fw_name;
ret = i2400m_fw_bootstrap(i2400m, fw, flags);
release_firmware(fw);
if (ret >= 0) /* firmware loaded successfully */
break;
i2400m->fw_name = NULL;
}
out:
d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
return ret;
}
EXPORT_SYMBOL_GPL(i2400m_dev_bootstrap);
void i2400m_fw_cache(struct i2400m *i2400m)
{
int result;
struct i2400m_fw *i2400m_fw;
struct device *dev = i2400m_dev(i2400m);
/* if there is anything there, free it -- now, this'd be weird */
spin_lock(&i2400m->rx_lock);
i2400m_fw = i2400m->fw_cached;
spin_unlock(&i2400m->rx_lock);
if (i2400m_fw != NULL && i2400m_fw != (void *) ~0) {
i2400m_fw_put(i2400m_fw);
WARN(1, "%s:%u: still cached fw still present?\n",
__func__, __LINE__);
}
if (i2400m->fw_name == NULL) {
dev_err(dev, "firmware n/a: can't cache\n");
i2400m_fw = (void *) ~0;
goto out;
}
i2400m_fw = kzalloc(sizeof(*i2400m_fw), GFP_ATOMIC);
if (i2400m_fw == NULL)
goto out;
kref_init(&i2400m_fw->kref);
result = request_firmware(&i2400m_fw->fw, i2400m->fw_name, dev);
if (result < 0) {
dev_err(dev, "firmware %s: failed to cache: %d\n",
i2400m->fw_name, result);
kfree(i2400m_fw);
i2400m_fw = (void *) ~0;
} else
dev_info(dev, "firmware %s: cached\n", i2400m->fw_name);
out:
spin_lock(&i2400m->rx_lock);
i2400m->fw_cached = i2400m_fw;
spin_unlock(&i2400m->rx_lock);
}
void i2400m_fw_uncache(struct i2400m *i2400m)
{
struct i2400m_fw *i2400m_fw;
spin_lock(&i2400m->rx_lock);
i2400m_fw = i2400m->fw_cached;
i2400m->fw_cached = NULL;
spin_unlock(&i2400m->rx_lock);
if (i2400m_fw != NULL && i2400m_fw != (void *) ~0)
i2400m_fw_put(i2400m_fw);
}
| gpl-2.0 |
blackdeviant/hybrid-nicki | arch/x86/crypto/serpent_sse2_glue.c | 4744 | 24812 | /*
* Glue Code for SSE2 assembler versions of Serpent Cipher
*
* Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* Glue code based on aesni-intel_glue.c by:
* Copyright (C) 2008, Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
* CTR part based on code (crypto/ctr.c) by:
* (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/serpent.h>
#include <crypto/cryptd.h>
#include <crypto/b128ops.h>
#include <crypto/ctr.h>
#include <crypto/lrw.h>
#include <crypto/xts.h>
#include <asm/i387.h>
#include <asm/serpent.h>
#include <crypto/scatterwalk.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
struct async_serpent_ctx {
struct cryptd_ablkcipher *cryptd_tfm;
};
static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
{
if (fpu_enabled)
return true;
/* SSE2 is only used when chunk to be processed is large enough, so
* do not enable FPU until it is necessary.
*/
if (nbytes < SERPENT_BLOCK_SIZE * SERPENT_PARALLEL_BLOCKS)
return false;
kernel_fpu_begin();
return true;
}
static inline void serpent_fpu_end(bool fpu_enabled)
{
if (fpu_enabled)
kernel_fpu_end();
}
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
bool enc)
{
bool fpu_enabled = false;
struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = SERPENT_BLOCK_SIZE;
unsigned int nbytes;
int err;
err = blkcipher_walk_virt(desc, walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk->nbytes)) {
u8 *wsrc = walk->src.virt.addr;
u8 *wdst = walk->dst.virt.addr;
fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
/* Process multi-block batch */
if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
do {
if (enc)
serpent_enc_blk_xway(ctx, wdst, wsrc);
else
serpent_dec_blk_xway(ctx, wdst, wsrc);
wsrc += bsize * SERPENT_PARALLEL_BLOCKS;
wdst += bsize * SERPENT_PARALLEL_BLOCKS;
nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
} while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
if (nbytes < bsize)
goto done;
}
/* Handle leftovers */
do {
if (enc)
__serpent_encrypt(ctx, wdst, wsrc);
else
__serpent_decrypt(ctx, wdst, wsrc);
wsrc += bsize;
wdst += bsize;
nbytes -= bsize;
} while (nbytes >= bsize);
done:
err = blkcipher_walk_done(desc, walk, nbytes);
}
serpent_fpu_end(fpu_enabled);
return err;
}
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_crypt(desc, &walk, true);
}
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_crypt(desc, &walk, false);
}
static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = SERPENT_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u128 *src = (u128 *)walk->src.virt.addr;
u128 *dst = (u128 *)walk->dst.virt.addr;
u128 *iv = (u128 *)walk->iv;
do {
u128_xor(dst, src, iv);
__serpent_encrypt(ctx, (u8 *)dst, (u8 *)dst);
iv = dst;
src += 1;
dst += 1;
nbytes -= bsize;
} while (nbytes >= bsize);
u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
return nbytes;
}
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
nbytes = __cbc_encrypt(desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = SERPENT_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u128 *src = (u128 *)walk->src.virt.addr;
u128 *dst = (u128 *)walk->dst.virt.addr;
u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
u128 last_iv;
int i;
/* Start of the last block. */
src += nbytes / bsize - 1;
dst += nbytes / bsize - 1;
last_iv = *src;
/* Process multi-block batch */
if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
do {
nbytes -= bsize * (SERPENT_PARALLEL_BLOCKS - 1);
src -= SERPENT_PARALLEL_BLOCKS - 1;
dst -= SERPENT_PARALLEL_BLOCKS - 1;
for (i = 0; i < SERPENT_PARALLEL_BLOCKS - 1; i++)
ivs[i] = src[i];
serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
for (i = 0; i < SERPENT_PARALLEL_BLOCKS - 1; i++)
u128_xor(dst + (i + 1), dst + (i + 1), ivs + i);
nbytes -= bsize;
if (nbytes < bsize)
goto done;
u128_xor(dst, dst, src - 1);
src -= 1;
dst -= 1;
} while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
if (nbytes < bsize)
goto done;
}
/* Handle leftovers */
for (;;) {
__serpent_decrypt(ctx, (u8 *)dst, (u8 *)src);
nbytes -= bsize;
if (nbytes < bsize)
break;
u128_xor(dst, dst, src - 1);
src -= 1;
dst -= 1;
}
done:
u128_xor(dst, dst, (u128 *)walk->iv);
*(u128 *)walk->iv = last_iv;
return nbytes;
}
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
bool fpu_enabled = false;
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes)) {
fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
nbytes = __cbc_decrypt(desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
serpent_fpu_end(fpu_enabled);
return err;
}
static inline void u128_to_be128(be128 *dst, const u128 *src)
{
dst->a = cpu_to_be64(src->a);
dst->b = cpu_to_be64(src->b);
}
static inline void be128_to_u128(u128 *dst, const be128 *src)
{
dst->a = be64_to_cpu(src->a);
dst->b = be64_to_cpu(src->b);
}
static inline void u128_inc(u128 *i)
{
i->b++;
if (!i->b)
i->a++;
}
static void ctr_crypt_final(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
u8 *ctrblk = walk->iv;
u8 keystream[SERPENT_BLOCK_SIZE];
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
__serpent_encrypt(ctx, keystream, ctrblk);
crypto_xor(keystream, src, nbytes);
memcpy(dst, keystream, nbytes);
crypto_inc(ctrblk, SERPENT_BLOCK_SIZE);
}
static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = SERPENT_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u128 *src = (u128 *)walk->src.virt.addr;
u128 *dst = (u128 *)walk->dst.virt.addr;
u128 ctrblk;
be128 ctrblocks[SERPENT_PARALLEL_BLOCKS];
int i;
be128_to_u128(&ctrblk, (be128 *)walk->iv);
/* Process multi-block batch */
if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
do {
/* create ctrblks for parallel encrypt */
for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
if (dst != src)
dst[i] = src[i];
u128_to_be128(&ctrblocks[i], &ctrblk);
u128_inc(&ctrblk);
}
serpent_enc_blk_xway_xor(ctx, (u8 *)dst,
(u8 *)ctrblocks);
src += SERPENT_PARALLEL_BLOCKS;
dst += SERPENT_PARALLEL_BLOCKS;
nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
} while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
if (nbytes < bsize)
goto done;
}
/* Handle leftovers */
do {
if (dst != src)
*dst = *src;
u128_to_be128(&ctrblocks[0], &ctrblk);
u128_inc(&ctrblk);
__serpent_encrypt(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
u128_xor(dst, dst, (u128 *)ctrblocks);
src += 1;
dst += 1;
nbytes -= bsize;
} while (nbytes >= bsize);
done:
u128_to_be128((be128 *)walk->iv, &ctrblk);
return nbytes;
}
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
bool fpu_enabled = false;
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, SERPENT_BLOCK_SIZE);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes) >= SERPENT_BLOCK_SIZE) {
fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
nbytes = __ctr_crypt(desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
serpent_fpu_end(fpu_enabled);
if (walk.nbytes) {
ctr_crypt_final(desc, &walk);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
}
struct crypt_priv {
struct serpent_ctx *ctx;
bool fpu_enabled;
};
static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
{
const unsigned int bsize = SERPENT_BLOCK_SIZE;
struct crypt_priv *ctx = priv;
int i;
ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
return;
}
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
__serpent_encrypt(ctx->ctx, srcdst, srcdst);
}
static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
{
const unsigned int bsize = SERPENT_BLOCK_SIZE;
struct crypt_priv *ctx = priv;
int i;
ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
return;
}
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
__serpent_decrypt(ctx->ctx, srcdst, srcdst);
}
struct serpent_lrw_ctx {
struct lrw_table_ctx lrw_table;
struct serpent_ctx serpent_ctx;
};
static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
SERPENT_BLOCK_SIZE);
if (err)
return err;
return lrw_init_table(&ctx->lrw_table, key + keylen -
SERPENT_BLOCK_SIZE);
}
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[SERPENT_PARALLEL_BLOCKS];
struct crypt_priv crypt_ctx = {
.ctx = &ctx->serpent_ctx,
.fpu_enabled = false,
};
struct lrw_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.table_ctx = &ctx->lrw_table,
.crypt_ctx = &crypt_ctx,
.crypt_fn = encrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ret = lrw_crypt(desc, dst, src, nbytes, &req);
serpent_fpu_end(crypt_ctx.fpu_enabled);
return ret;
}
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[SERPENT_PARALLEL_BLOCKS];
struct crypt_priv crypt_ctx = {
.ctx = &ctx->serpent_ctx,
.fpu_enabled = false,
};
struct lrw_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.table_ctx = &ctx->lrw_table,
.crypt_ctx = &crypt_ctx,
.crypt_fn = decrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ret = lrw_crypt(desc, dst, src, nbytes, &req);
serpent_fpu_end(crypt_ctx.fpu_enabled);
return ret;
}
static void lrw_exit_tfm(struct crypto_tfm *tfm)
{
struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
lrw_free_table(&ctx->lrw_table);
}
struct serpent_xts_ctx {
struct serpent_ctx tweak_ctx;
struct serpent_ctx crypt_ctx;
};
static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
int err;
/* key consists of keys of equal size concatenated, therefore
* the length must be even
*/
if (keylen % 2) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
/* first half of xts-key is for crypt */
err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
if (err)
return err;
/* second half of xts-key is for tweak */
return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
}
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[SERPENT_PARALLEL_BLOCKS];
struct crypt_priv crypt_ctx = {
.ctx = &ctx->crypt_ctx,
.fpu_enabled = false,
};
struct xts_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.tweak_ctx = &ctx->tweak_ctx,
.tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
.crypt_ctx = &crypt_ctx,
.crypt_fn = encrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ret = xts_crypt(desc, dst, src, nbytes, &req);
serpent_fpu_end(crypt_ctx.fpu_enabled);
return ret;
}
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[SERPENT_PARALLEL_BLOCKS];
struct crypt_priv crypt_ctx = {
.ctx = &ctx->crypt_ctx,
.fpu_enabled = false,
};
struct xts_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.tweak_ctx = &ctx->tweak_ctx,
.tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
.crypt_ctx = &crypt_ctx,
.crypt_fn = decrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ret = xts_crypt(desc, dst, src, nbytes, &req);
serpent_fpu_end(crypt_ctx.fpu_enabled);
return ret;
}
static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
int err;
crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
& CRYPTO_TFM_REQ_MASK);
err = crypto_ablkcipher_setkey(child, key, key_len);
crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
& CRYPTO_TFM_RES_MASK);
return err;
}
static int __ablk_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct blkcipher_desc desc;
desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
desc.info = req->info;
desc.flags = 0;
return crypto_blkcipher_crt(desc.tfm)->encrypt(
&desc, req->dst, req->src, req->nbytes);
}
static int ablk_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (!irq_fpu_usable()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req));
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_encrypt(cryptd_req);
} else {
return __ablk_encrypt(req);
}
}
static int ablk_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (!irq_fpu_usable()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req));
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_decrypt(cryptd_req);
} else {
struct blkcipher_desc desc;
desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
desc.info = req->info;
desc.flags = 0;
return crypto_blkcipher_crt(desc.tfm)->decrypt(
&desc, req->dst, req->src, req->nbytes);
}
}
static void ablk_exit(struct crypto_tfm *tfm)
{
struct async_serpent_ctx *ctx = crypto_tfm_ctx(tfm);
cryptd_free_ablkcipher(ctx->cryptd_tfm);
}
static int ablk_init(struct crypto_tfm *tfm)
{
struct async_serpent_ctx *ctx = crypto_tfm_ctx(tfm);
struct cryptd_ablkcipher *cryptd_tfm;
char drv_name[CRYPTO_MAX_ALG_NAME];
snprintf(drv_name, sizeof(drv_name), "__driver-%s",
crypto_tfm_alg_driver_name(tfm));
cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ctx->cryptd_tfm = cryptd_tfm;
tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
crypto_ablkcipher_reqsize(&cryptd_tfm->base);
return 0;
}
static struct crypto_alg serpent_algs[10] = { {
.cra_name = "__ecb-serpent-sse2",
.cra_driver_name = "__driver-ecb-serpent-sse2",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[0].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE,
.max_keysize = SERPENT_MAX_KEY_SIZE,
.setkey = serpent_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
},
},
}, {
.cra_name = "__cbc-serpent-sse2",
.cra_driver_name = "__driver-cbc-serpent-sse2",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[1].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE,
.max_keysize = SERPENT_MAX_KEY_SIZE,
.setkey = serpent_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
},
}, {
.cra_name = "__ctr-serpent-sse2",
.cra_driver_name = "__driver-ctr-serpent-sse2",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[2].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE,
.max_keysize = SERPENT_MAX_KEY_SIZE,
.ivsize = SERPENT_BLOCK_SIZE,
.setkey = serpent_setkey,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,
},
},
}, {
.cra_name = "__lrw-serpent-sse2",
.cra_driver_name = "__driver-lrw-serpent-sse2",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_lrw_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[3].cra_list),
.cra_exit = lrw_exit_tfm,
.cra_u = {
.blkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE +
SERPENT_BLOCK_SIZE,
.max_keysize = SERPENT_MAX_KEY_SIZE +
SERPENT_BLOCK_SIZE,
.ivsize = SERPENT_BLOCK_SIZE,
.setkey = lrw_serpent_setkey,
.encrypt = lrw_encrypt,
.decrypt = lrw_decrypt,
},
},
}, {
.cra_name = "__xts-serpent-sse2",
.cra_driver_name = "__driver-xts-serpent-sse2",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_xts_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[4].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE * 2,
.max_keysize = SERPENT_MAX_KEY_SIZE * 2,
.ivsize = SERPENT_BLOCK_SIZE,
.setkey = xts_serpent_setkey,
.encrypt = xts_encrypt,
.decrypt = xts_decrypt,
},
},
}, {
.cra_name = "ecb(serpent)",
.cra_driver_name = "ecb-serpent-sse2",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_serpent_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[5].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE,
.max_keysize = SERPENT_MAX_KEY_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
}, {
.cra_name = "cbc(serpent)",
.cra_driver_name = "cbc-serpent-sse2",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_serpent_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[6].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE,
.max_keysize = SERPENT_MAX_KEY_SIZE,
.ivsize = SERPENT_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = __ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
}, {
.cra_name = "ctr(serpent)",
.cra_driver_name = "ctr-serpent-sse2",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct async_serpent_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[7].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE,
.max_keysize = SERPENT_MAX_KEY_SIZE,
.ivsize = SERPENT_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_encrypt,
.geniv = "chainiv",
},
},
}, {
.cra_name = "lrw(serpent)",
.cra_driver_name = "lrw-serpent-sse2",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_serpent_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[8].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE +
SERPENT_BLOCK_SIZE,
.max_keysize = SERPENT_MAX_KEY_SIZE +
SERPENT_BLOCK_SIZE,
.ivsize = SERPENT_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
}, {
.cra_name = "xts(serpent)",
.cra_driver_name = "xts-serpent-sse2",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_serpent_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[9].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE * 2,
.max_keysize = SERPENT_MAX_KEY_SIZE * 2,
.ivsize = SERPENT_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
} };
static int __init serpent_sse2_init(void)
{
if (!cpu_has_xmm2) {
printk(KERN_INFO "SSE2 instructions are not detected.\n");
return -ENODEV;
}
return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
}
static void __exit serpent_sse2_exit(void)
{
crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
}
module_init(serpent_sse2_init);
module_exit(serpent_sse2_exit);
MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
MODULE_LICENSE("GPL");
MODULE_ALIAS("serpent");
| gpl-2.0 |
crimsonthunder/kernel_samsung_trlte | sound/pci/asihpi/hpimsginit.c | 8328 | 3612 | /******************************************************************************
AudioScience HPI driver
Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com>
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation;
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Hardware Programming Interface (HPI) Utility functions.
(C) Copyright AudioScience Inc. 2007
*******************************************************************************/
#include "hpi_internal.h"
#include "hpimsginit.h"
/* The actual message size for each object type */
static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT;
/* The actual response size for each object type */
static u16 res_size[HPI_OBJ_MAXINDEX + 1] = HPI_RESPONSE_SIZE_BY_OBJECT;
/* Flag to enable alternate message type for SSX2 bypass. */
static u16 gwSSX2_bypass;
/** \internal
* initialize the HPI message structure
*/
static void hpi_init_message(struct hpi_message *phm, u16 object,
u16 function)
{
memset(phm, 0, sizeof(*phm));
if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
phm->size = msg_size[object];
else
phm->size = sizeof(*phm);
if (gwSSX2_bypass)
phm->type = HPI_TYPE_SSX2BYPASS_MESSAGE;
else
phm->type = HPI_TYPE_REQUEST;
phm->object = object;
phm->function = function;
phm->version = 0;
phm->adapter_index = HPI_ADAPTER_INDEX_INVALID;
/* Expect actual adapter index to be set by caller */
}
/** \internal
* initialize the HPI response structure
*/
void hpi_init_response(struct hpi_response *phr, u16 object, u16 function,
u16 error)
{
memset(phr, 0, sizeof(*phr));
phr->type = HPI_TYPE_RESPONSE;
if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
phr->size = res_size[object];
else
phr->size = sizeof(*phr);
phr->object = object;
phr->function = function;
phr->error = error;
phr->specific_error = 0;
phr->version = 0;
}
void hpi_init_message_response(struct hpi_message *phm,
struct hpi_response *phr, u16 object, u16 function)
{
hpi_init_message(phm, object, function);
/* default error return if the response is
not filled in by the callee */
hpi_init_response(phr, object, function,
HPI_ERROR_PROCESSING_MESSAGE);
}
static void hpi_init_messageV1(struct hpi_message_header *phm, u16 size,
u16 object, u16 function)
{
memset(phm, 0, sizeof(*phm));
if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
phm->size = size;
phm->type = HPI_TYPE_REQUEST;
phm->object = object;
phm->function = function;
phm->version = 1;
/* Expect adapter index to be set by caller */
}
}
void hpi_init_responseV1(struct hpi_response_header *phr, u16 size,
u16 object, u16 function)
{
memset(phr, 0, sizeof(*phr));
phr->size = size;
phr->version = 1;
phr->type = HPI_TYPE_RESPONSE;
phr->error = HPI_ERROR_PROCESSING_MESSAGE;
}
void hpi_init_message_responseV1(struct hpi_message_header *phm, u16 msg_size,
struct hpi_response_header *phr, u16 res_size, u16 object,
u16 function)
{
hpi_init_messageV1(phm, msg_size, object, function);
hpi_init_responseV1(phr, res_size, object, function);
}
| gpl-2.0 |
kevin0100/android_kernel_qcom_msm8916 | drivers/parport/parport_ax88796.c | 8328 | 9517 | /* linux/drivers/parport/parport_ax88796.c
*
* (c) 2005,2006 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/parport.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/irq.h>
#define AX_SPR_BUSY (1<<7)
#define AX_SPR_ACK (1<<6)
#define AX_SPR_PE (1<<5)
#define AX_SPR_SLCT (1<<4)
#define AX_SPR_ERR (1<<3)
#define AX_CPR_nDOE (1<<5)
#define AX_CPR_SLCTIN (1<<3)
#define AX_CPR_nINIT (1<<2)
#define AX_CPR_ATFD (1<<1)
#define AX_CPR_STRB (1<<0)
struct ax_drvdata {
struct parport *parport;
struct parport_state suspend;
struct device *dev;
struct resource *io;
unsigned char irq_enabled;
void __iomem *base;
void __iomem *spp_data;
void __iomem *spp_spr;
void __iomem *spp_cpr;
};
static inline struct ax_drvdata *pp_to_drv(struct parport *p)
{
return p->private_data;
}
static unsigned char
parport_ax88796_read_data(struct parport *p)
{
struct ax_drvdata *dd = pp_to_drv(p);
return readb(dd->spp_data);
}
static void
parport_ax88796_write_data(struct parport *p, unsigned char data)
{
struct ax_drvdata *dd = pp_to_drv(p);
writeb(data, dd->spp_data);
}
static unsigned char
parport_ax88796_read_control(struct parport *p)
{
struct ax_drvdata *dd = pp_to_drv(p);
unsigned int cpr = readb(dd->spp_cpr);
unsigned int ret = 0;
if (!(cpr & AX_CPR_STRB))
ret |= PARPORT_CONTROL_STROBE;
if (!(cpr & AX_CPR_ATFD))
ret |= PARPORT_CONTROL_AUTOFD;
if (cpr & AX_CPR_nINIT)
ret |= PARPORT_CONTROL_INIT;
if (!(cpr & AX_CPR_SLCTIN))
ret |= PARPORT_CONTROL_SELECT;
return ret;
}
static void
parport_ax88796_write_control(struct parport *p, unsigned char control)
{
struct ax_drvdata *dd = pp_to_drv(p);
unsigned int cpr = readb(dd->spp_cpr);
cpr &= AX_CPR_nDOE;
if (!(control & PARPORT_CONTROL_STROBE))
cpr |= AX_CPR_STRB;
if (!(control & PARPORT_CONTROL_AUTOFD))
cpr |= AX_CPR_ATFD;
if (control & PARPORT_CONTROL_INIT)
cpr |= AX_CPR_nINIT;
if (!(control & PARPORT_CONTROL_SELECT))
cpr |= AX_CPR_SLCTIN;
dev_dbg(dd->dev, "write_control: ctrl=%02x, cpr=%02x\n", control, cpr);
writeb(cpr, dd->spp_cpr);
if (parport_ax88796_read_control(p) != control) {
dev_err(dd->dev, "write_control: read != set (%02x, %02x)\n",
parport_ax88796_read_control(p), control);
}
}
static unsigned char
parport_ax88796_read_status(struct parport *p)
{
struct ax_drvdata *dd = pp_to_drv(p);
unsigned int status = readb(dd->spp_spr);
unsigned int ret = 0;
if (status & AX_SPR_BUSY)
ret |= PARPORT_STATUS_BUSY;
if (status & AX_SPR_ACK)
ret |= PARPORT_STATUS_ACK;
if (status & AX_SPR_ERR)
ret |= PARPORT_STATUS_ERROR;
if (status & AX_SPR_SLCT)
ret |= PARPORT_STATUS_SELECT;
if (status & AX_SPR_PE)
ret |= PARPORT_STATUS_PAPEROUT;
return ret;
}
static unsigned char
parport_ax88796_frob_control(struct parport *p, unsigned char mask,
unsigned char val)
{
struct ax_drvdata *dd = pp_to_drv(p);
unsigned char old = parport_ax88796_read_control(p);
dev_dbg(dd->dev, "frob: mask=%02x, val=%02x, old=%02x\n",
mask, val, old);
parport_ax88796_write_control(p, (old & ~mask) | val);
return old;
}
static void
parport_ax88796_enable_irq(struct parport *p)
{
struct ax_drvdata *dd = pp_to_drv(p);
unsigned long flags;
local_irq_save(flags);
if (!dd->irq_enabled) {
enable_irq(p->irq);
dd->irq_enabled = 1;
}
local_irq_restore(flags);
}
static void
parport_ax88796_disable_irq(struct parport *p)
{
struct ax_drvdata *dd = pp_to_drv(p);
unsigned long flags;
local_irq_save(flags);
if (dd->irq_enabled) {
disable_irq(p->irq);
dd->irq_enabled = 0;
}
local_irq_restore(flags);
}
static void
parport_ax88796_data_forward(struct parport *p)
{
struct ax_drvdata *dd = pp_to_drv(p);
void __iomem *cpr = dd->spp_cpr;
writeb((readb(cpr) & ~AX_CPR_nDOE), cpr);
}
static void
parport_ax88796_data_reverse(struct parport *p)
{
struct ax_drvdata *dd = pp_to_drv(p);
void __iomem *cpr = dd->spp_cpr;
writeb(readb(cpr) | AX_CPR_nDOE, cpr);
}
static void
parport_ax88796_init_state(struct pardevice *d, struct parport_state *s)
{
struct ax_drvdata *dd = pp_to_drv(d->port);
memset(s, 0, sizeof(struct parport_state));
dev_dbg(dd->dev, "init_state: %p: state=%p\n", d, s);
s->u.ax88796.cpr = readb(dd->spp_cpr);
}
static void
parport_ax88796_save_state(struct parport *p, struct parport_state *s)
{
struct ax_drvdata *dd = pp_to_drv(p);
dev_dbg(dd->dev, "save_state: %p: state=%p\n", p, s);
s->u.ax88796.cpr = readb(dd->spp_cpr);
}
static void
parport_ax88796_restore_state(struct parport *p, struct parport_state *s)
{
struct ax_drvdata *dd = pp_to_drv(p);
dev_dbg(dd->dev, "restore_state: %p: state=%p\n", p, s);
writeb(s->u.ax88796.cpr, dd->spp_cpr);
}
static struct parport_operations parport_ax88796_ops = {
.write_data = parport_ax88796_write_data,
.read_data = parport_ax88796_read_data,
.write_control = parport_ax88796_write_control,
.read_control = parport_ax88796_read_control,
.frob_control = parport_ax88796_frob_control,
.read_status = parport_ax88796_read_status,
.enable_irq = parport_ax88796_enable_irq,
.disable_irq = parport_ax88796_disable_irq,
.data_forward = parport_ax88796_data_forward,
.data_reverse = parport_ax88796_data_reverse,
.init_state = parport_ax88796_init_state,
.save_state = parport_ax88796_save_state,
.restore_state = parport_ax88796_restore_state,
.epp_write_data = parport_ieee1284_epp_write_data,
.epp_read_data = parport_ieee1284_epp_read_data,
.epp_write_addr = parport_ieee1284_epp_write_addr,
.epp_read_addr = parport_ieee1284_epp_read_addr,
.ecp_write_data = parport_ieee1284_ecp_write_data,
.ecp_read_data = parport_ieee1284_ecp_read_data,
.ecp_write_addr = parport_ieee1284_ecp_write_addr,
.compat_write_data = parport_ieee1284_write_compat,
.nibble_read_data = parport_ieee1284_read_nibble,
.byte_read_data = parport_ieee1284_read_byte,
.owner = THIS_MODULE,
};
static int parport_ax88796_probe(struct platform_device *pdev)
{
struct device *_dev = &pdev->dev;
struct ax_drvdata *dd;
struct parport *pp = NULL;
struct resource *res;
unsigned long size;
int spacing;
int irq;
int ret;
dd = kzalloc(sizeof(struct ax_drvdata), GFP_KERNEL);
if (dd == NULL) {
dev_err(_dev, "no memory for private data\n");
return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(_dev, "no MEM specified\n");
ret = -ENXIO;
goto exit_mem;
}
size = resource_size(res);
spacing = size / 3;
dd->io = request_mem_region(res->start, size, pdev->name);
if (dd->io == NULL) {
dev_err(_dev, "cannot reserve memory\n");
ret = -ENXIO;
goto exit_mem;
}
dd->base = ioremap(res->start, size);
if (dd->base == NULL) {
dev_err(_dev, "cannot ioremap region\n");
ret = -ENXIO;
goto exit_res;
}
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
irq = PARPORT_IRQ_NONE;
pp = parport_register_port((unsigned long)dd->base, irq,
PARPORT_DMA_NONE,
&parport_ax88796_ops);
if (pp == NULL) {
dev_err(_dev, "failed to register parallel port\n");
ret = -ENOMEM;
goto exit_unmap;
}
pp->private_data = dd;
dd->parport = pp;
dd->dev = _dev;
dd->spp_data = dd->base;
dd->spp_spr = dd->base + (spacing * 1);
dd->spp_cpr = dd->base + (spacing * 2);
/* initialise the port controls */
writeb(AX_CPR_STRB, dd->spp_cpr);
if (irq >= 0) {
/* request irq */
ret = request_irq(irq, parport_irq_handler,
IRQF_TRIGGER_FALLING, pdev->name, pp);
if (ret < 0)
goto exit_port;
dd->irq_enabled = 1;
}
platform_set_drvdata(pdev, pp);
dev_info(_dev, "attached parallel port driver\n");
parport_announce_port(pp);
return 0;
exit_port:
parport_remove_port(pp);
exit_unmap:
iounmap(dd->base);
exit_res:
release_resource(dd->io);
kfree(dd->io);
exit_mem:
kfree(dd);
return ret;
}
static int parport_ax88796_remove(struct platform_device *pdev)
{
struct parport *p = platform_get_drvdata(pdev);
struct ax_drvdata *dd = pp_to_drv(p);
free_irq(p->irq, p);
parport_remove_port(p);
iounmap(dd->base);
release_resource(dd->io);
kfree(dd->io);
kfree(dd);
return 0;
}
#ifdef CONFIG_PM
static int parport_ax88796_suspend(struct platform_device *dev,
pm_message_t state)
{
struct parport *p = platform_get_drvdata(dev);
struct ax_drvdata *dd = pp_to_drv(p);
parport_ax88796_save_state(p, &dd->suspend);
writeb(AX_CPR_nDOE | AX_CPR_STRB, dd->spp_cpr);
return 0;
}
static int parport_ax88796_resume(struct platform_device *dev)
{
struct parport *p = platform_get_drvdata(dev);
struct ax_drvdata *dd = pp_to_drv(p);
parport_ax88796_restore_state(p, &dd->suspend);
return 0;
}
#else
#define parport_ax88796_suspend NULL
#define parport_ax88796_resume NULL
#endif
MODULE_ALIAS("platform:ax88796-pp");
static struct platform_driver axdrv = {
.driver = {
.name = "ax88796-pp",
.owner = THIS_MODULE,
},
.probe = parport_ax88796_probe,
.remove = parport_ax88796_remove,
.suspend = parport_ax88796_suspend,
.resume = parport_ax88796_resume,
};
module_platform_driver(axdrv);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("AX88796 Parport parallel port driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
teleofis/OpenWRT | DLpatch/linux-3.18.29/arch/sparc/prom/ranges.c | 8840 | 3703 | /*
* ranges.c: Handle ranges in newer proms for obio/sbus.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/init.h>
#include <linux/module.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/types.h>
static struct linux_prom_ranges promlib_obio_ranges[PROMREG_MAX];
static int num_obio_ranges;
/* Adjust register values based upon the ranges parameters. */
static void
prom_adjust_regs(struct linux_prom_registers *regp, int nregs,
struct linux_prom_ranges *rangep, int nranges)
{
int regc, rngc;
for (regc = 0; regc < nregs; regc++) {
for (rngc = 0; rngc < nranges; rngc++)
if (regp[regc].which_io == rangep[rngc].ot_child_space)
break; /* Fount it */
if (rngc == nranges) /* oops */
prom_printf("adjust_regs: Could not find range with matching bus type...\n");
regp[regc].which_io = rangep[rngc].ot_parent_space;
regp[regc].phys_addr -= rangep[rngc].ot_child_base;
regp[regc].phys_addr += rangep[rngc].ot_parent_base;
}
}
static void
prom_adjust_ranges(struct linux_prom_ranges *ranges1, int nranges1,
struct linux_prom_ranges *ranges2, int nranges2)
{
int rng1c, rng2c;
for(rng1c=0; rng1c < nranges1; rng1c++) {
for(rng2c=0; rng2c < nranges2; rng2c++)
if(ranges1[rng1c].ot_parent_space == ranges2[rng2c].ot_child_space &&
ranges1[rng1c].ot_parent_base >= ranges2[rng2c].ot_child_base &&
ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size - ranges1[rng1c].ot_parent_base > 0U)
break;
if(rng2c == nranges2) /* oops */
prom_printf("adjust_ranges: Could not find matching bus type...\n");
else if (ranges1[rng1c].ot_parent_base + ranges1[rng1c].or_size > ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size)
ranges1[rng1c].or_size =
ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size - ranges1[rng1c].ot_parent_base;
ranges1[rng1c].ot_parent_space = ranges2[rng2c].ot_parent_space;
ranges1[rng1c].ot_parent_base += ranges2[rng2c].ot_parent_base;
}
}
/* Apply probed obio ranges to registers passed, if no ranges return. */
void
prom_apply_obio_ranges(struct linux_prom_registers *regs, int nregs)
{
if(num_obio_ranges)
prom_adjust_regs(regs, nregs, promlib_obio_ranges, num_obio_ranges);
}
EXPORT_SYMBOL(prom_apply_obio_ranges);
void __init prom_ranges_init(void)
{
phandle node, obio_node;
int success;
num_obio_ranges = 0;
/* Check for obio and sbus ranges. */
node = prom_getchild(prom_root_node);
obio_node = prom_searchsiblings(node, "obio");
if(obio_node) {
success = prom_getproperty(obio_node, "ranges",
(char *) promlib_obio_ranges,
sizeof(promlib_obio_ranges));
if(success != -1)
num_obio_ranges = (success/sizeof(struct linux_prom_ranges));
}
if(num_obio_ranges)
prom_printf("PROMLIB: obio_ranges %d\n", num_obio_ranges);
}
void prom_apply_generic_ranges(phandle node, phandle parent,
struct linux_prom_registers *regs, int nregs)
{
int success;
int num_ranges;
struct linux_prom_ranges ranges[PROMREG_MAX];
success = prom_getproperty(node, "ranges",
(char *) ranges,
sizeof (ranges));
if (success != -1) {
num_ranges = (success/sizeof(struct linux_prom_ranges));
if (parent) {
struct linux_prom_ranges parent_ranges[PROMREG_MAX];
int num_parent_ranges;
success = prom_getproperty(parent, "ranges",
(char *) parent_ranges,
sizeof (parent_ranges));
if (success != -1) {
num_parent_ranges = (success/sizeof(struct linux_prom_ranges));
prom_adjust_ranges (ranges, num_ranges, parent_ranges, num_parent_ranges);
}
}
prom_adjust_regs(regs, nregs, ranges, num_ranges);
}
}
| gpl-2.0 |
francescodiotalevi/linux-zynq-stable | arch/sparc/prom/ranges.c | 8840 | 3703 | /*
* ranges.c: Handle ranges in newer proms for obio/sbus.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/init.h>
#include <linux/module.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/types.h>
static struct linux_prom_ranges promlib_obio_ranges[PROMREG_MAX];
static int num_obio_ranges;
/* Adjust register values based upon the ranges parameters. */
static void
prom_adjust_regs(struct linux_prom_registers *regp, int nregs,
struct linux_prom_ranges *rangep, int nranges)
{
int regc, rngc;
for (regc = 0; regc < nregs; regc++) {
for (rngc = 0; rngc < nranges; rngc++)
if (regp[regc].which_io == rangep[rngc].ot_child_space)
break; /* Fount it */
if (rngc == nranges) /* oops */
prom_printf("adjust_regs: Could not find range with matching bus type...\n");
regp[regc].which_io = rangep[rngc].ot_parent_space;
regp[regc].phys_addr -= rangep[rngc].ot_child_base;
regp[regc].phys_addr += rangep[rngc].ot_parent_base;
}
}
static void
prom_adjust_ranges(struct linux_prom_ranges *ranges1, int nranges1,
struct linux_prom_ranges *ranges2, int nranges2)
{
int rng1c, rng2c;
for(rng1c=0; rng1c < nranges1; rng1c++) {
for(rng2c=0; rng2c < nranges2; rng2c++)
if(ranges1[rng1c].ot_parent_space == ranges2[rng2c].ot_child_space &&
ranges1[rng1c].ot_parent_base >= ranges2[rng2c].ot_child_base &&
ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size - ranges1[rng1c].ot_parent_base > 0U)
break;
if(rng2c == nranges2) /* oops */
prom_printf("adjust_ranges: Could not find matching bus type...\n");
else if (ranges1[rng1c].ot_parent_base + ranges1[rng1c].or_size > ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size)
ranges1[rng1c].or_size =
ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size - ranges1[rng1c].ot_parent_base;
ranges1[rng1c].ot_parent_space = ranges2[rng2c].ot_parent_space;
ranges1[rng1c].ot_parent_base += ranges2[rng2c].ot_parent_base;
}
}
/* Apply probed obio ranges to registers passed, if no ranges return. */
void
prom_apply_obio_ranges(struct linux_prom_registers *regs, int nregs)
{
if(num_obio_ranges)
prom_adjust_regs(regs, nregs, promlib_obio_ranges, num_obio_ranges);
}
EXPORT_SYMBOL(prom_apply_obio_ranges);
void __init prom_ranges_init(void)
{
phandle node, obio_node;
int success;
num_obio_ranges = 0;
/* Check for obio and sbus ranges. */
node = prom_getchild(prom_root_node);
obio_node = prom_searchsiblings(node, "obio");
if(obio_node) {
success = prom_getproperty(obio_node, "ranges",
(char *) promlib_obio_ranges,
sizeof(promlib_obio_ranges));
if(success != -1)
num_obio_ranges = (success/sizeof(struct linux_prom_ranges));
}
if(num_obio_ranges)
prom_printf("PROMLIB: obio_ranges %d\n", num_obio_ranges);
}
void prom_apply_generic_ranges(phandle node, phandle parent,
struct linux_prom_registers *regs, int nregs)
{
int success;
int num_ranges;
struct linux_prom_ranges ranges[PROMREG_MAX];
success = prom_getproperty(node, "ranges",
(char *) ranges,
sizeof (ranges));
if (success != -1) {
num_ranges = (success/sizeof(struct linux_prom_ranges));
if (parent) {
struct linux_prom_ranges parent_ranges[PROMREG_MAX];
int num_parent_ranges;
success = prom_getproperty(parent, "ranges",
(char *) parent_ranges,
sizeof (parent_ranges));
if (success != -1) {
num_parent_ranges = (success/sizeof(struct linux_prom_ranges));
prom_adjust_ranges (ranges, num_ranges, parent_ranges, num_parent_ranges);
}
}
prom_adjust_regs(regs, nregs, ranges, num_ranges);
}
}
| gpl-2.0 |
eagleeyetom/android_kernel_oppo_msm8974 | arch/frv/kernel/debug-stub.c | 9096 | 6097 | /* debug-stub.c: debug-mode stub
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/serial_reg.h>
#include <linux/start_kernel.h>
#include <asm/serial-regs.h>
#include <asm/timer-regs.h>
#include <asm/irc-regs.h>
#include <asm/gdb-stub.h>
#include "gdb-io.h"
/* CPU board CON5 */
#define __UART0(X) (*(volatile uint8_t *)(UART0_BASE + (UART_##X)))
#define LSR_WAIT_FOR0(STATE) \
do { \
} while (!(__UART0(LSR) & UART_LSR_##STATE))
#define FLOWCTL_QUERY0(LINE) ({ __UART0(MSR) & UART_MSR_##LINE; })
#define FLOWCTL_CLEAR0(LINE) do { __UART0(MCR) &= ~UART_MCR_##LINE; } while (0)
#define FLOWCTL_SET0(LINE) do { __UART0(MCR) |= UART_MCR_##LINE; } while (0)
#define FLOWCTL_WAIT_FOR0(LINE) \
do { \
gdbstub_do_rx(); \
} while(!FLOWCTL_QUERY(LINE))
struct frv_debug_status __debug_status;
static void __init debug_stub_init(void);
/*****************************************************************************/
/*
* debug mode handler stub
* - we come here with the CPU in debug mode and with exceptions disabled
* - handle debugging services for userspace
*/
asmlinkage void debug_stub(void)
{
unsigned long hsr0;
int type = 0;
static u8 inited = 0;
if (!inited) {
debug_stub_init();
type = -1;
inited = 1;
}
hsr0 = __get_HSR(0);
if (hsr0 & HSR0_ETMD)
__set_HSR(0, hsr0 & ~HSR0_ETMD);
/* disable single stepping */
__debug_status.dcr &= ~DCR_SE;
/* kernel mode can propose an exception be handled in debug mode by jumping to a special
* location */
if (__debug_frame->pc == (unsigned long) __break_hijack_kernel_event_breaks_here) {
/* replace the debug frame with the kernel frame and discard
* the top kernel context */
*__debug_frame = *__frame;
__frame = __debug_frame->next_frame;
__debug_status.brr = (__debug_frame->tbr & TBR_TT) << 12;
__debug_status.brr |= BRR_EB;
}
if (__debug_frame->pc == (unsigned long) __debug_bug_trap + 4) {
__debug_frame->pc = __debug_frame->lr;
type = __debug_frame->gr8;
}
#ifdef CONFIG_GDBSTUB
gdbstub(type);
#endif
if (hsr0 & HSR0_ETMD)
__set_HSR(0, __get_HSR(0) | HSR0_ETMD);
} /* end debug_stub() */
/*****************************************************************************/
/*
* debug stub initialisation
*/
static void __init debug_stub_init(void)
{
__set_IRR(6, 0xff000000); /* map ERRs to NMI */
__set_IITMR(1, 0x20000000); /* ERR0/1, UART0/1 IRQ detect levels */
asm volatile(" movgs gr0,ibar0 \n"
" movgs gr0,ibar1 \n"
" movgs gr0,ibar2 \n"
" movgs gr0,ibar3 \n"
" movgs gr0,dbar0 \n"
" movgs gr0,dbmr00 \n"
" movgs gr0,dbmr01 \n"
" movgs gr0,dbdr00 \n"
" movgs gr0,dbdr01 \n"
" movgs gr0,dbar1 \n"
" movgs gr0,dbmr10 \n"
" movgs gr0,dbmr11 \n"
" movgs gr0,dbdr10 \n"
" movgs gr0,dbdr11 \n"
);
/* deal with debugging stub initialisation and initial pause */
if (__debug_frame->pc == (unsigned long) __debug_stub_init_break)
__debug_frame->pc = (unsigned long) start_kernel;
/* enable the debug events we want to trap */
__debug_status.dcr = DCR_EBE;
#ifdef CONFIG_GDBSTUB
gdbstub_init();
#endif
__clr_MASK_all();
__clr_MASK(15);
__clr_RC(15);
} /* end debug_stub_init() */
/*****************************************************************************/
/*
* kernel "exit" trap for gdb stub
*/
void debug_stub_exit(int status)
{
#ifdef CONFIG_GDBSTUB
gdbstub_exit(status);
#endif
} /* end debug_stub_exit() */
/*****************************************************************************/
/*
* send string to serial port
*/
void debug_to_serial(const char *p, int n)
{
char ch;
for (; n > 0; n--) {
ch = *p++;
FLOWCTL_SET0(DTR);
LSR_WAIT_FOR0(THRE);
// FLOWCTL_WAIT_FOR(CTS);
if (ch == 0x0a) {
__UART0(TX) = 0x0d;
mb();
LSR_WAIT_FOR0(THRE);
// FLOWCTL_WAIT_FOR(CTS);
}
__UART0(TX) = ch;
mb();
FLOWCTL_CLEAR0(DTR);
}
} /* end debug_to_serial() */
/*****************************************************************************/
/*
* send string to serial port
*/
void debug_to_serial2(const char *fmt, ...)
{
va_list va;
char buf[64];
int n;
va_start(va, fmt);
n = vsprintf(buf, fmt, va);
va_end(va);
debug_to_serial(buf, n);
} /* end debug_to_serial2() */
/*****************************************************************************/
/*
* set up the ttyS0 serial port baud rate timers
*/
void __init console_set_baud(unsigned baud)
{
unsigned value, high, low;
u8 lcr;
/* work out the divisor to give us the nearest higher baud rate */
value = __serial_clock_speed_HZ / 16 / baud;
/* determine the baud rate range */
high = __serial_clock_speed_HZ / 16 / value;
low = __serial_clock_speed_HZ / 16 / (value + 1);
/* pick the nearest bound */
if (low + (high - low) / 2 > baud)
value++;
lcr = __UART0(LCR);
__UART0(LCR) |= UART_LCR_DLAB;
mb();
__UART0(DLL) = value & 0xff;
__UART0(DLM) = (value >> 8) & 0xff;
mb();
__UART0(LCR) = lcr;
mb();
} /* end console_set_baud() */
/*****************************************************************************/
/*
*
*/
int __init console_get_baud(void)
{
unsigned value;
u8 lcr;
lcr = __UART0(LCR);
__UART0(LCR) |= UART_LCR_DLAB;
mb();
value = __UART0(DLM) << 8;
value |= __UART0(DLL);
__UART0(LCR) = lcr;
mb();
return value;
} /* end console_get_baud() */
/*****************************************************************************/
/*
* display BUG() info
*/
#ifndef CONFIG_NO_KERNEL_MSG
void __debug_bug_printk(const char *file, unsigned line)
{
printk("kernel BUG at %s:%d!\n", file, line);
} /* end __debug_bug_printk() */
#endif
| gpl-2.0 |
playfulgod/Kernel-p930-Nitro | drivers/i2c/busses/i2c-sibyte.c | 12168 | 5688 | /*
* Copyright (C) 2004 Steven J. Hill
* Copyright (C) 2001,2002,2003 Broadcom Corporation
* Copyright (C) 1995-2000 Simon G. Vogl
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_smbus.h>
struct i2c_algo_sibyte_data {
void *data; /* private data */
int bus; /* which bus */
void *reg_base; /* CSR base */
};
/* ----- global defines ----------------------------------------------- */
#define SMB_CSR(a,r) ((long)(a->reg_base + r))
static int smbus_xfer(struct i2c_adapter *i2c_adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data * data)
{
struct i2c_algo_sibyte_data *adap = i2c_adap->algo_data;
int data_bytes = 0;
int error;
while (csr_in32(SMB_CSR(adap, R_SMB_STATUS)) & M_SMB_BUSY)
;
switch (size) {
case I2C_SMBUS_QUICK:
csr_out32((V_SMB_ADDR(addr) |
(read_write == I2C_SMBUS_READ ? M_SMB_QDATA : 0) |
V_SMB_TT_QUICKCMD), SMB_CSR(adap, R_SMB_START));
break;
case I2C_SMBUS_BYTE:
if (read_write == I2C_SMBUS_READ) {
csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_RD1BYTE),
SMB_CSR(adap, R_SMB_START));
data_bytes = 1;
} else {
csr_out32(V_SMB_CMD(command), SMB_CSR(adap, R_SMB_CMD));
csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_WR1BYTE),
SMB_CSR(adap, R_SMB_START));
}
break;
case I2C_SMBUS_BYTE_DATA:
csr_out32(V_SMB_CMD(command), SMB_CSR(adap, R_SMB_CMD));
if (read_write == I2C_SMBUS_READ) {
csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_CMD_RD1BYTE),
SMB_CSR(adap, R_SMB_START));
data_bytes = 1;
} else {
csr_out32(V_SMB_LB(data->byte),
SMB_CSR(adap, R_SMB_DATA));
csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_WR2BYTE),
SMB_CSR(adap, R_SMB_START));
}
break;
case I2C_SMBUS_WORD_DATA:
csr_out32(V_SMB_CMD(command), SMB_CSR(adap, R_SMB_CMD));
if (read_write == I2C_SMBUS_READ) {
csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_CMD_RD2BYTE),
SMB_CSR(adap, R_SMB_START));
data_bytes = 2;
} else {
csr_out32(V_SMB_LB(data->word & 0xff),
SMB_CSR(adap, R_SMB_DATA));
csr_out32(V_SMB_MB(data->word >> 8),
SMB_CSR(adap, R_SMB_DATA));
csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_WR2BYTE),
SMB_CSR(adap, R_SMB_START));
}
break;
default:
return -EOPNOTSUPP;
}
while (csr_in32(SMB_CSR(adap, R_SMB_STATUS)) & M_SMB_BUSY)
;
error = csr_in32(SMB_CSR(adap, R_SMB_STATUS));
if (error & M_SMB_ERROR) {
/* Clear error bit by writing a 1 */
csr_out32(M_SMB_ERROR, SMB_CSR(adap, R_SMB_STATUS));
return (error & M_SMB_ERROR_TYPE) ? -EIO : -ENXIO;
}
if (data_bytes == 1)
data->byte = csr_in32(SMB_CSR(adap, R_SMB_DATA)) & 0xff;
if (data_bytes == 2)
data->word = csr_in32(SMB_CSR(adap, R_SMB_DATA)) & 0xffff;
return 0;
}
static u32 bit_func(struct i2c_adapter *adap)
{
return (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA);
}
/* -----exported algorithm data: ------------------------------------- */
static const struct i2c_algorithm i2c_sibyte_algo = {
.smbus_xfer = smbus_xfer,
.functionality = bit_func,
};
/*
* registering functions to load algorithms at runtime
*/
static int __init i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed)
{
struct i2c_algo_sibyte_data *adap = i2c_adap->algo_data;
/* Register new adapter to i2c module... */
i2c_adap->algo = &i2c_sibyte_algo;
/* Set the requested frequency. */
csr_out32(speed, SMB_CSR(adap,R_SMB_FREQ));
csr_out32(0, SMB_CSR(adap,R_SMB_CONTROL));
return i2c_add_numbered_adapter(i2c_adap);
}
static struct i2c_algo_sibyte_data sibyte_board_data[2] = {
{ NULL, 0, (void *) (CKSEG1+A_SMB_BASE(0)) },
{ NULL, 1, (void *) (CKSEG1+A_SMB_BASE(1)) }
};
static struct i2c_adapter sibyte_board_adapter[2] = {
{
.owner = THIS_MODULE,
.class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
.algo = NULL,
.algo_data = &sibyte_board_data[0],
.nr = 0,
.name = "SiByte SMBus 0",
},
{
.owner = THIS_MODULE,
.class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
.algo = NULL,
.algo_data = &sibyte_board_data[1],
.nr = 1,
.name = "SiByte SMBus 1",
},
};
static int __init i2c_sibyte_init(void)
{
pr_info("i2c-sibyte: i2c SMBus adapter module for SiByte board\n");
if (i2c_sibyte_add_bus(&sibyte_board_adapter[0], K_SMB_FREQ_100KHZ) < 0)
return -ENODEV;
if (i2c_sibyte_add_bus(&sibyte_board_adapter[1],
K_SMB_FREQ_400KHZ) < 0) {
i2c_del_adapter(&sibyte_board_adapter[0]);
return -ENODEV;
}
return 0;
}
static void __exit i2c_sibyte_exit(void)
{
i2c_del_adapter(&sibyte_board_adapter[0]);
i2c_del_adapter(&sibyte_board_adapter[1]);
}
module_init(i2c_sibyte_init);
module_exit(i2c_sibyte_exit);
MODULE_AUTHOR("Kip Walker (Broadcom Corp.), Steven J. Hill <sjhill@realitydiluted.com>");
MODULE_DESCRIPTION("SMBus adapter routines for SiByte boards");
MODULE_LICENSE("GPL");
| gpl-2.0 |
dongsheng365/linux-drm | drivers/gpu/drm/sti/sti_dvo.c | 137 | 14340 | /*
* Copyright (C) STMicroelectronics SA 2014
* Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_panel.h>
#include "sti_awg_utils.h"
#include "sti_mixer.h"
/* DVO registers */
#define DVO_AWG_DIGSYNC_CTRL 0x0000
#define DVO_DOF_CFG 0x0004
#define DVO_LUT_PROG_LOW 0x0008
#define DVO_LUT_PROG_MID 0x000C
#define DVO_LUT_PROG_HIGH 0x0010
#define DVO_DIGSYNC_INSTR_I 0x0100
#define DVO_AWG_CTRL_EN BIT(0)
#define DVO_AWG_FRAME_BASED_SYNC BIT(2)
#define DVO_DOF_EN_LOWBYTE BIT(0)
#define DVO_DOF_EN_MIDBYTE BIT(1)
#define DVO_DOF_EN_HIGHBYTE BIT(2)
#define DVO_DOF_EN BIT(6)
#define DVO_DOF_MOD_COUNT_SHIFT 8
#define DVO_LUT_ZERO 0
#define DVO_LUT_Y_G 1
#define DVO_LUT_Y_G_DEL 2
#define DVO_LUT_CB_B 3
#define DVO_LUT_CB_B_DEL 4
#define DVO_LUT_CR_R 5
#define DVO_LUT_CR_R_DEL 6
#define DVO_LUT_HOLD 7
struct dvo_config {
u32 flags;
u32 lowbyte;
u32 midbyte;
u32 highbyte;
int (*awg_fwgen_fct)(
struct awg_code_generation_params *fw_gen_params,
struct awg_timing *timing);
};
static struct dvo_config rgb_24bit_de_cfg = {
.flags = (0L << DVO_DOF_MOD_COUNT_SHIFT),
.lowbyte = DVO_LUT_CR_R,
.midbyte = DVO_LUT_Y_G,
.highbyte = DVO_LUT_CB_B,
.awg_fwgen_fct = sti_awg_generate_code_data_enable_mode,
};
/**
* STI digital video output structure
*
* @dev: driver device
* @drm_dev: pointer to drm device
* @mode: current display mode selected
* @regs: dvo registers
* @clk_pix: pixel clock for dvo
* @clk: clock for dvo
* @clk_main_parent: dvo parent clock if main path used
* @clk_aux_parent: dvo parent clock if aux path used
* @panel_node: panel node reference from device tree
* @panel: reference to the panel connected to the dvo
* @enabled: true if dvo is enabled else false
* @encoder: drm_encoder it is bound
*/
struct sti_dvo {
struct device dev;
struct drm_device *drm_dev;
struct drm_display_mode mode;
void __iomem *regs;
struct clk *clk_pix;
struct clk *clk;
struct clk *clk_main_parent;
struct clk *clk_aux_parent;
struct device_node *panel_node;
struct drm_panel *panel;
struct dvo_config *config;
bool enabled;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
};
struct sti_dvo_connector {
struct drm_connector drm_connector;
struct drm_encoder *encoder;
struct sti_dvo *dvo;
};
#define to_sti_dvo_connector(x) \
container_of(x, struct sti_dvo_connector, drm_connector)
#define BLANKING_LEVEL 16
int dvo_awg_generate_code(struct sti_dvo *dvo, u8 *ram_size, u32 *ram_code)
{
struct drm_display_mode *mode = &dvo->mode;
struct dvo_config *config = dvo->config;
struct awg_code_generation_params fw_gen_params;
struct awg_timing timing;
fw_gen_params.ram_code = ram_code;
fw_gen_params.instruction_offset = 0;
timing.total_lines = mode->vtotal;
timing.active_lines = mode->vdisplay;
timing.blanking_lines = mode->vsync_start - mode->vdisplay;
timing.trailing_lines = mode->vtotal - mode->vsync_start;
timing.total_pixels = mode->htotal;
timing.active_pixels = mode->hdisplay;
timing.blanking_pixels = mode->hsync_start - mode->hdisplay;
timing.trailing_pixels = mode->htotal - mode->hsync_start;
timing.blanking_level = BLANKING_LEVEL;
if (config->awg_fwgen_fct(&fw_gen_params, &timing)) {
DRM_ERROR("AWG firmware not properly generated\n");
return -EINVAL;
}
*ram_size = fw_gen_params.instruction_offset;
return 0;
}
/* Configure AWG, writing instructions
*
* @dvo: pointer to DVO structure
* @awg_ram_code: pointer to AWG instructions table
* @nb: nb of AWG instructions
*/
static void dvo_awg_configure(struct sti_dvo *dvo, u32 *awg_ram_code, int nb)
{
int i;
DRM_DEBUG_DRIVER("\n");
for (i = 0; i < nb; i++)
writel(awg_ram_code[i],
dvo->regs + DVO_DIGSYNC_INSTR_I + i * 4);
for (i = nb; i < AWG_MAX_INST; i++)
writel(0, dvo->regs + DVO_DIGSYNC_INSTR_I + i * 4);
writel(DVO_AWG_CTRL_EN, dvo->regs + DVO_AWG_DIGSYNC_CTRL);
}
static void sti_dvo_disable(struct drm_bridge *bridge)
{
struct sti_dvo *dvo = bridge->driver_private;
if (!dvo->enabled)
return;
DRM_DEBUG_DRIVER("\n");
if (dvo->config->awg_fwgen_fct)
writel(0x00000000, dvo->regs + DVO_AWG_DIGSYNC_CTRL);
writel(0x00000000, dvo->regs + DVO_DOF_CFG);
if (dvo->panel)
dvo->panel->funcs->disable(dvo->panel);
/* Disable/unprepare dvo clock */
clk_disable_unprepare(dvo->clk_pix);
clk_disable_unprepare(dvo->clk);
dvo->enabled = false;
}
static void sti_dvo_pre_enable(struct drm_bridge *bridge)
{
struct sti_dvo *dvo = bridge->driver_private;
struct dvo_config *config = dvo->config;
u32 val;
DRM_DEBUG_DRIVER("\n");
if (dvo->enabled)
return;
/* Make sure DVO is disabled */
writel(0x00000000, dvo->regs + DVO_DOF_CFG);
writel(0x00000000, dvo->regs + DVO_AWG_DIGSYNC_CTRL);
if (config->awg_fwgen_fct) {
u8 nb_instr;
u32 awg_ram_code[AWG_MAX_INST];
/* Configure AWG */
if (!dvo_awg_generate_code(dvo, &nb_instr, awg_ram_code))
dvo_awg_configure(dvo, awg_ram_code, nb_instr);
else
return;
}
/* Prepare/enable clocks */
if (clk_prepare_enable(dvo->clk_pix))
DRM_ERROR("Failed to prepare/enable dvo_pix clk\n");
if (clk_prepare_enable(dvo->clk))
DRM_ERROR("Failed to prepare/enable dvo clk\n");
if (dvo->panel)
dvo->panel->funcs->enable(dvo->panel);
/* Set LUT */
writel(config->lowbyte, dvo->regs + DVO_LUT_PROG_LOW);
writel(config->midbyte, dvo->regs + DVO_LUT_PROG_MID);
writel(config->highbyte, dvo->regs + DVO_LUT_PROG_HIGH);
/* Digital output formatter config */
val = (config->flags | DVO_DOF_EN);
writel(val, dvo->regs + DVO_DOF_CFG);
dvo->enabled = true;
}
static void sti_dvo_set_mode(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct sti_dvo *dvo = bridge->driver_private;
struct sti_mixer *mixer = to_sti_mixer(dvo->encoder->crtc);
int rate = mode->clock * 1000;
struct clk *clkp;
int ret;
DRM_DEBUG_DRIVER("\n");
memcpy(&dvo->mode, mode, sizeof(struct drm_display_mode));
/* According to the path used (main or aux), the dvo clocks should
* have a different parent clock. */
if (mixer->id == STI_MIXER_MAIN)
clkp = dvo->clk_main_parent;
else
clkp = dvo->clk_aux_parent;
if (clkp) {
clk_set_parent(dvo->clk_pix, clkp);
clk_set_parent(dvo->clk, clkp);
}
/* DVO clocks = compositor clock */
ret = clk_set_rate(dvo->clk_pix, rate);
if (ret < 0) {
DRM_ERROR("Cannot set rate (%dHz) for dvo_pix clk\n", rate);
return;
}
ret = clk_set_rate(dvo->clk, rate);
if (ret < 0) {
DRM_ERROR("Cannot set rate (%dHz) for dvo clk\n", rate);
return;
}
/* For now, we only support 24bit data enable (DE) synchro format */
dvo->config = &rgb_24bit_de_cfg;
}
static void sti_dvo_bridge_nope(struct drm_bridge *bridge)
{
/* do nothing */
}
static const struct drm_bridge_funcs sti_dvo_bridge_funcs = {
.pre_enable = sti_dvo_pre_enable,
.enable = sti_dvo_bridge_nope,
.disable = sti_dvo_disable,
.post_disable = sti_dvo_bridge_nope,
.mode_set = sti_dvo_set_mode,
};
static int sti_dvo_connector_get_modes(struct drm_connector *connector)
{
struct sti_dvo_connector *dvo_connector
= to_sti_dvo_connector(connector);
struct sti_dvo *dvo = dvo_connector->dvo;
if (dvo->panel)
return dvo->panel->funcs->get_modes(dvo->panel);
return 0;
}
#define CLK_TOLERANCE_HZ 50
static int sti_dvo_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int target = mode->clock * 1000;
int target_min = target - CLK_TOLERANCE_HZ;
int target_max = target + CLK_TOLERANCE_HZ;
int result;
struct sti_dvo_connector *dvo_connector
= to_sti_dvo_connector(connector);
struct sti_dvo *dvo = dvo_connector->dvo;
result = clk_round_rate(dvo->clk_pix, target);
DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n",
target, result);
if ((result < target_min) || (result > target_max)) {
DRM_DEBUG_DRIVER("dvo pixclk=%d not supported\n", target);
return MODE_BAD;
}
return MODE_OK;
}
struct drm_encoder *sti_dvo_best_encoder(struct drm_connector *connector)
{
struct sti_dvo_connector *dvo_connector
= to_sti_dvo_connector(connector);
/* Best encoder is the one associated during connector creation */
return dvo_connector->encoder;
}
static struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = {
.get_modes = sti_dvo_connector_get_modes,
.mode_valid = sti_dvo_connector_mode_valid,
.best_encoder = sti_dvo_best_encoder,
};
static enum drm_connector_status
sti_dvo_connector_detect(struct drm_connector *connector, bool force)
{
struct sti_dvo_connector *dvo_connector
= to_sti_dvo_connector(connector);
struct sti_dvo *dvo = dvo_connector->dvo;
DRM_DEBUG_DRIVER("\n");
if (!dvo->panel)
dvo->panel = of_drm_find_panel(dvo->panel_node);
if (dvo->panel)
if (!drm_panel_attach(dvo->panel, connector))
return connector_status_connected;
return connector_status_disconnected;
}
static void sti_dvo_connector_destroy(struct drm_connector *connector)
{
struct sti_dvo_connector *dvo_connector
= to_sti_dvo_connector(connector);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(dvo_connector);
}
static struct drm_connector_funcs sti_dvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_dvo_connector_detect,
.destroy = sti_dvo_connector_destroy,
};
static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev)
{
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
return encoder;
}
return NULL;
}
static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
{
struct sti_dvo *dvo = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct drm_encoder *encoder;
struct sti_dvo_connector *connector;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
dvo->drm_dev = drm_dev;
encoder = sti_dvo_find_encoder(drm_dev);
if (!encoder)
return -ENOMEM;
connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
if (!connector)
return -ENOMEM;
connector->dvo = dvo;
bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return -ENOMEM;
bridge->driver_private = dvo;
bridge->funcs = &sti_dvo_bridge_funcs;
bridge->of_node = dvo->dev.of_node;
err = drm_bridge_add(bridge);
if (err) {
DRM_ERROR("Failed to add bridge\n");
return err;
}
err = drm_bridge_attach(drm_dev, bridge);
if (err) {
DRM_ERROR("Failed to attach bridge\n");
return err;
}
dvo->bridge = bridge;
encoder->bridge = bridge;
connector->encoder = encoder;
dvo->encoder = encoder;
drm_connector = (struct drm_connector *)connector;
drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_init(drm_dev, drm_connector,
&sti_dvo_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
drm_connector_helper_add(drm_connector,
&sti_dvo_connector_helper_funcs);
err = drm_connector_register(drm_connector);
if (err)
goto err_connector;
err = drm_mode_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
}
return 0;
err_sysfs:
drm_connector_unregister(drm_connector);
err_connector:
drm_bridge_remove(bridge);
drm_connector_cleanup(drm_connector);
return -EINVAL;
}
static void sti_dvo_unbind(struct device *dev,
struct device *master, void *data)
{
struct sti_dvo *dvo = dev_get_drvdata(dev);
drm_bridge_remove(dvo->bridge);
}
static const struct component_ops sti_dvo_ops = {
.bind = sti_dvo_bind,
.unbind = sti_dvo_unbind,
};
static int sti_dvo_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_dvo *dvo;
struct resource *res;
struct device_node *np = dev->of_node;
DRM_INFO("%s\n", __func__);
dvo = devm_kzalloc(dev, sizeof(*dvo), GFP_KERNEL);
if (!dvo) {
DRM_ERROR("Failed to allocate memory for DVO\n");
return -ENOMEM;
}
dvo->dev = pdev->dev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvo-reg");
if (!res) {
DRM_ERROR("Invalid dvo resource\n");
return -ENOMEM;
}
dvo->regs = devm_ioremap_nocache(dev, res->start,
resource_size(res));
if (IS_ERR(dvo->regs))
return PTR_ERR(dvo->regs);
dvo->clk_pix = devm_clk_get(dev, "dvo_pix");
if (IS_ERR(dvo->clk_pix)) {
DRM_ERROR("Cannot get dvo_pix clock\n");
return PTR_ERR(dvo->clk_pix);
}
dvo->clk = devm_clk_get(dev, "dvo");
if (IS_ERR(dvo->clk)) {
DRM_ERROR("Cannot get dvo clock\n");
return PTR_ERR(dvo->clk);
}
dvo->clk_main_parent = devm_clk_get(dev, "main_parent");
if (IS_ERR(dvo->clk_main_parent)) {
DRM_DEBUG_DRIVER("Cannot get main_parent clock\n");
dvo->clk_main_parent = NULL;
}
dvo->clk_aux_parent = devm_clk_get(dev, "aux_parent");
if (IS_ERR(dvo->clk_aux_parent)) {
DRM_DEBUG_DRIVER("Cannot get aux_parent clock\n");
dvo->clk_aux_parent = NULL;
}
dvo->panel_node = of_parse_phandle(np, "sti,panel", 0);
if (!dvo->panel_node)
DRM_ERROR("No panel associated to the dvo output\n");
platform_set_drvdata(pdev, dvo);
return component_add(&pdev->dev, &sti_dvo_ops);
}
static int sti_dvo_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sti_dvo_ops);
return 0;
}
static struct of_device_id dvo_of_match[] = {
{ .compatible = "st,stih407-dvo", },
{ /* end node */ }
};
MODULE_DEVICE_TABLE(of, dvo_of_match);
struct platform_driver sti_dvo_driver = {
.driver = {
.name = "sti-dvo",
.owner = THIS_MODULE,
.of_match_table = dvo_of_match,
},
.probe = sti_dvo_probe,
.remove = sti_dvo_remove,
};
module_platform_driver(sti_dvo_driver);
MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
linuxmake/kernel_softwinner_fiber | drivers/net/wireless/iwlwifi/iwl-ucode.c | 137 | 21319 | /******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/dma-mapping.h>
#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
#include "iwl-agn-calib.h"
#include "iwl-trans.h"
#include "iwl-fh.h"
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
0, COEX_UNASSOC_IDLE_FLAGS},
{COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
{COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
{COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
0, COEX_CALIBRATION_FLAGS},
{COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
0, COEX_PERIODIC_CALIBRATION_FLAGS},
{COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
0, COEX_CONNECTION_ESTAB_FLAGS},
{COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
0, COEX_ASSOCIATED_IDLE_FLAGS},
{COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
{COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
0, COEX_ASSOC_AUTO_SCAN_FLAGS},
{COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
{COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
{COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
{COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
0, COEX_STAND_ALONE_DEBUG_FLAGS},
{COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
{COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
{COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
};
/******************************************************************************
*
* uCode download functions
*
******************************************************************************/
static void iwl_free_fw_desc(struct iwl_bus *bus, struct fw_desc *desc)
{
if (desc->v_addr)
dma_free_coherent(bus->dev, desc->len,
desc->v_addr, desc->p_addr);
desc->v_addr = NULL;
desc->len = 0;
}
static void iwl_free_fw_img(struct iwl_bus *bus, struct fw_img *img)
{
iwl_free_fw_desc(bus, &img->code);
iwl_free_fw_desc(bus, &img->data);
}
void iwl_dealloc_ucode(struct iwl_trans *trans)
{
iwl_free_fw_img(bus(trans), &trans->ucode_rt);
iwl_free_fw_img(bus(trans), &trans->ucode_init);
iwl_free_fw_img(bus(trans), &trans->ucode_wowlan);
}
int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
const void *data, size_t len)
{
if (!len) {
desc->v_addr = NULL;
return -EINVAL;
}
desc->v_addr = dma_alloc_coherent(bus->dev, len,
&desc->p_addr, GFP_KERNEL);
if (!desc->v_addr)
return -ENOMEM;
desc->len = len;
memcpy(desc->v_addr, data, len);
return 0;
}
/*
* ucode
*/
static int iwl_load_section(struct iwl_trans *trans, const char *name,
struct fw_desc *image, u32 dst_addr)
{
struct iwl_bus *bus = bus(trans);
dma_addr_t phy_addr = image->p_addr;
u32 byte_cnt = image->len;
int ret;
trans->ucode_write_complete = 0;
iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
iwl_write_direct32(bus,
FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
iwl_write_direct32(bus,
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
iwl_write_direct32(bus,
FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
(iwl_get_dma_hi_addr(phy_addr)
<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
IWL_DEBUG_FW(bus, "%s uCode section being loaded...\n", name);
ret = wait_event_timeout(trans->shrd->wait_command_queue,
trans->ucode_write_complete, 5 * HZ);
if (!ret) {
IWL_ERR(trans, "Could not load the %s uCode section\n",
name);
return -ETIMEDOUT;
}
return 0;
}
static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans,
enum iwl_ucode_type ucode_type)
{
switch (ucode_type) {
case IWL_UCODE_INIT:
return &trans->ucode_init;
case IWL_UCODE_WOWLAN:
return &trans->ucode_wowlan;
case IWL_UCODE_REGULAR:
return &trans->ucode_rt;
case IWL_UCODE_NONE:
break;
}
return NULL;
}
static int iwl_load_given_ucode(struct iwl_trans *trans,
enum iwl_ucode_type ucode_type)
{
int ret = 0;
struct fw_img *image = iwl_get_ucode_image(trans, ucode_type);
if (!image) {
IWL_ERR(trans, "Invalid ucode requested (%d)\n",
ucode_type);
return -EINVAL;
}
ret = iwl_load_section(trans, "INST", &image->code,
IWLAGN_RTC_INST_LOWER_BOUND);
if (ret)
return ret;
return iwl_load_section(trans, "DATA", &image->data,
IWLAGN_RTC_DATA_LOWER_BOUND);
}
/*
* Calibration
*/
static int iwl_set_Xtal_calib(struct iwl_trans *trans)
{
struct iwl_calib_xtal_freq_cmd cmd;
__le16 *xtal_calib =
(__le16 *)iwl_eeprom_query_addr(trans->shrd, EEPROM_XTAL);
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
}
static int iwl_set_temperature_offset_calib(struct iwl_trans *trans)
{
struct iwl_calib_temperature_offset_cmd cmd;
__le16 *offset_calib =
(__le16 *)iwl_eeprom_query_addr(trans->shrd,
EEPROM_RAW_TEMPERATURE);
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
if (!(cmd.radio_sensor_offset))
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
IWL_DEBUG_CALIB(trans, "Radio sensor offset: %d\n",
le16_to_cpu(cmd.radio_sensor_offset));
return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
}
static int iwl_set_temperature_offset_calib_v2(struct iwl_trans *trans)
{
struct iwl_calib_temperature_offset_v2_cmd cmd;
__le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(trans->shrd,
EEPROM_KELVIN_TEMPERATURE);
__le16 *offset_calib_low =
(__le16 *)iwl_eeprom_query_addr(trans->shrd,
EEPROM_RAW_TEMPERATURE);
struct iwl_eeprom_calib_hdr *hdr;
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(trans->shrd,
EEPROM_CALIB_ALL);
memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
sizeof(*offset_calib_high));
memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
sizeof(*offset_calib_low));
if (!(cmd.radio_sensor_offset_low)) {
IWL_DEBUG_CALIB(trans, "no info in EEPROM, use default\n");
cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
}
memcpy(&cmd.burntVoltageRef, &hdr->voltage,
sizeof(hdr->voltage));
IWL_DEBUG_CALIB(trans, "Radio sensor offset high: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_high));
IWL_DEBUG_CALIB(trans, "Radio sensor offset low: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_low));
IWL_DEBUG_CALIB(trans, "Voltage Ref: %d\n",
le16_to_cpu(cmd.burntVoltageRef));
return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
}
static int iwl_send_calib_cfg(struct iwl_trans *trans)
{
struct iwl_calib_cfg_cmd calib_cfg_cmd;
struct iwl_host_cmd cmd = {
.id = CALIBRATION_CFG_CMD,
.len = { sizeof(struct iwl_calib_cfg_cmd), },
.data = { &calib_cfg_cmd, },
};
memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
calib_cfg_cmd.ucd_calib_cfg.flags =
IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
return iwl_trans_send_cmd(trans, &cmd);
}
int iwlagn_rx_calib_result(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
/* reduce the size of the length field itself */
len -= 4;
if (iwl_calib_set(trans(priv), hdr, len))
IWL_ERR(priv, "Failed to record calibration data %d\n",
hdr->op_code);
return 0;
}
int iwl_init_alive_start(struct iwl_trans *trans)
{
int ret;
if (cfg(trans)->bt_params &&
cfg(trans)->bt_params->advanced_bt_coexist) {
/*
* Tell uCode we are ready to perform calibration
* need to perform this before any calibration
* no need to close the envlope since we are going
* to load the runtime uCode later.
*/
ret = iwl_send_bt_env(trans, IWL_BT_COEX_ENV_OPEN,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
}
ret = iwl_send_calib_cfg(trans);
if (ret)
return ret;
/**
* temperature offset calibration is only needed for runtime ucode,
* so prepare the value now.
*/
if (cfg(trans)->need_temp_offset_calib) {
if (cfg(trans)->temp_offset_v2)
return iwl_set_temperature_offset_calib_v2(trans);
else
return iwl_set_temperature_offset_calib(trans);
}
return 0;
}
static int iwl_send_wimax_coex(struct iwl_trans *trans)
{
struct iwl_wimax_coex_cmd coex_cmd;
if (cfg(trans)->base_params->support_wimax_coexist) {
/* UnMask wake up src at associated sleep */
coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
/* UnMask wake up src at unassociated sleep */
coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
memcpy(coex_cmd.sta_prio, cu_priorities,
sizeof(struct iwl_wimax_coex_event_entry) *
COEX_NUM_OF_EVENTS);
/* enabling the coexistence feature */
coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
/* enabling the priorities tables */
coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
} else {
/* coexistence is disabled */
memset(&coex_cmd, 0, sizeof(coex_cmd));
}
return iwl_trans_send_cmd_pdu(trans,
COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
sizeof(coex_cmd), &coex_cmd);
}
static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
0, 0, 0, 0, 0, 0, 0
};
void iwl_send_prio_tbl(struct iwl_trans *trans)
{
struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
sizeof(iwl_bt_prio_tbl));
if (iwl_trans_send_cmd_pdu(trans,
REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
IWL_ERR(trans, "failed to send BT prio tbl command\n");
}
int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type)
{
struct iwl_bt_coex_prot_env_cmd env_cmd;
int ret;
env_cmd.action = action;
env_cmd.type = type;
ret = iwl_trans_send_cmd_pdu(trans,
REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
sizeof(env_cmd), &env_cmd);
if (ret)
IWL_ERR(trans, "failed to send BT env command\n");
return ret;
}
static int iwl_alive_notify(struct iwl_trans *trans)
{
struct iwl_priv *priv = priv(trans);
struct iwl_rxon_context *ctx;
int ret;
if (!priv->tx_cmd_pool)
priv->tx_cmd_pool =
kmem_cache_create("iwl_dev_cmd",
sizeof(struct iwl_device_cmd),
sizeof(void *), 0, NULL);
if (!priv->tx_cmd_pool)
return -ENOMEM;
iwl_trans_tx_start(trans);
for_each_context(priv, ctx)
ctx->last_tx_rejected = false;
ret = iwl_send_wimax_coex(trans);
if (ret)
return ret;
if (!cfg(priv)->no_xtal_calib) {
ret = iwl_set_Xtal_calib(trans);
if (ret)
return ret;
}
return iwl_send_calib_results(trans);
}
/**
* iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
*/
static int iwl_verify_inst_sparse(struct iwl_bus *bus,
struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
u32 len = fw_desc->len;
u32 val;
u32 i;
IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
i + IWLAGN_RTC_INST_LOWER_BOUND);
val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image))
return -EIO;
}
return 0;
}
static void iwl_print_mismatch_inst(struct iwl_bus *bus,
struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
u32 len = fw_desc->len;
u32 val;
u32 offs;
int errors = 0;
IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
IWLAGN_RTC_INST_LOWER_BOUND);
for (offs = 0;
offs < len && errors < 20;
offs += sizeof(u32), image++) {
/* read data comes through single port, auto-incr addr */
val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
IWL_ERR(bus, "uCode INST section at "
"offset 0x%x, is 0x%x, s/b 0x%x\n",
offs, val, le32_to_cpu(*image));
errors++;
}
}
}
/**
* iwl_verify_ucode - determine which instruction image is in SRAM,
* and verify its contents
*/
static int iwl_verify_ucode(struct iwl_trans *trans,
enum iwl_ucode_type ucode_type)
{
struct fw_img *img = iwl_get_ucode_image(trans, ucode_type);
if (!img) {
IWL_ERR(trans, "Invalid ucode requested (%d)\n", ucode_type);
return -EINVAL;
}
if (!iwl_verify_inst_sparse(bus(trans), &img->code)) {
IWL_DEBUG_FW(trans, "uCode is good in inst SRAM\n");
return 0;
}
IWL_ERR(trans, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
iwl_print_mismatch_inst(bus(trans), &img->code);
return -EIO;
}
struct iwl_alive_data {
bool valid;
u8 subtype;
};
static void iwl_alive_fn(struct iwl_trans *trans,
struct iwl_rx_packet *pkt,
void *data)
{
struct iwl_alive_data *alive_data = data;
struct iwl_alive_resp *palive;
palive = &pkt->u.alive_frame;
IWL_DEBUG_FW(trans, "Alive ucode status 0x%08X revision "
"0x%01X 0x%01X\n",
palive->is_valid, palive->ver_type,
palive->ver_subtype);
trans->shrd->device_pointers.error_event_table =
le32_to_cpu(palive->error_event_table_ptr);
trans->shrd->device_pointers.log_event_table =
le32_to_cpu(palive->log_event_table_ptr);
alive_data->subtype = palive->ver_subtype;
alive_data->valid = palive->is_valid == UCODE_VALID_OK;
}
/* notification wait support */
void iwl_init_notification_wait(struct iwl_shared *shrd,
struct iwl_notification_wait *wait_entry,
u8 cmd,
void (*fn)(struct iwl_trans *trans,
struct iwl_rx_packet *pkt,
void *data),
void *fn_data)
{
wait_entry->fn = fn;
wait_entry->fn_data = fn_data;
wait_entry->cmd = cmd;
wait_entry->triggered = false;
wait_entry->aborted = false;
spin_lock_bh(&shrd->notif_wait_lock);
list_add(&wait_entry->list, &shrd->notif_waits);
spin_unlock_bh(&shrd->notif_wait_lock);
}
int iwl_wait_notification(struct iwl_shared *shrd,
struct iwl_notification_wait *wait_entry,
unsigned long timeout)
{
int ret;
ret = wait_event_timeout(shrd->notif_waitq,
wait_entry->triggered || wait_entry->aborted,
timeout);
spin_lock_bh(&shrd->notif_wait_lock);
list_del(&wait_entry->list);
spin_unlock_bh(&shrd->notif_wait_lock);
if (wait_entry->aborted)
return -EIO;
/* return value is always >= 0 */
if (ret <= 0)
return -ETIMEDOUT;
return 0;
}
void iwl_remove_notification(struct iwl_shared *shrd,
struct iwl_notification_wait *wait_entry)
{
spin_lock_bh(&shrd->notif_wait_lock);
list_del(&wait_entry->list);
spin_unlock_bh(&shrd->notif_wait_lock);
}
void iwl_abort_notification_waits(struct iwl_shared *shrd)
{
unsigned long flags;
struct iwl_notification_wait *wait_entry;
spin_lock_irqsave(&shrd->notif_wait_lock, flags);
list_for_each_entry(wait_entry, &shrd->notif_waits, list)
wait_entry->aborted = true;
spin_unlock_irqrestore(&shrd->notif_wait_lock, flags);
wake_up_all(&shrd->notif_waitq);
}
#define UCODE_ALIVE_TIMEOUT HZ
#define UCODE_CALIB_TIMEOUT (2*HZ)
int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
enum iwl_ucode_type ucode_type)
{
struct iwl_notification_wait alive_wait;
struct iwl_alive_data alive_data;
int ret;
enum iwl_ucode_type old_type;
ret = iwl_trans_start_device(trans);
if (ret)
return ret;
iwl_init_notification_wait(trans->shrd, &alive_wait, REPLY_ALIVE,
iwl_alive_fn, &alive_data);
old_type = trans->shrd->ucode_type;
trans->shrd->ucode_type = ucode_type;
ret = iwl_load_given_ucode(trans, ucode_type);
if (ret) {
trans->shrd->ucode_type = old_type;
iwl_remove_notification(trans->shrd, &alive_wait);
return ret;
}
iwl_trans_kick_nic(trans);
/*
* Some things may run in the background now, but we
* just wait for the ALIVE notification here.
*/
ret = iwl_wait_notification(trans->shrd, &alive_wait,
UCODE_ALIVE_TIMEOUT);
if (ret) {
trans->shrd->ucode_type = old_type;
return ret;
}
if (!alive_data.valid) {
IWL_ERR(trans, "Loaded ucode is not valid!\n");
trans->shrd->ucode_type = old_type;
return -EIO;
}
/*
* This step takes a long time (60-80ms!!) and
* WoWLAN image should be loaded quickly, so
* skip it for WoWLAN.
*/
if (ucode_type != IWL_UCODE_WOWLAN) {
ret = iwl_verify_ucode(trans, ucode_type);
if (ret) {
trans->shrd->ucode_type = old_type;
return ret;
}
/* delay a bit to give rfkill time to run */
msleep(5);
}
ret = iwl_alive_notify(trans);
if (ret) {
IWL_WARN(trans,
"Could not complete ALIVE transition: %d\n", ret);
trans->shrd->ucode_type = old_type;
return ret;
}
return 0;
}
int iwl_run_init_ucode(struct iwl_trans *trans)
{
struct iwl_notification_wait calib_wait;
int ret;
lockdep_assert_held(&trans->shrd->mutex);
/* No init ucode required? Curious, but maybe ok */
if (!trans->ucode_init.code.len)
return 0;
if (trans->shrd->ucode_type != IWL_UCODE_NONE)
return 0;
iwl_init_notification_wait(trans->shrd, &calib_wait,
CALIBRATION_COMPLETE_NOTIFICATION,
NULL, NULL);
/* Will also start the device */
ret = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT);
if (ret)
goto error;
ret = iwl_init_alive_start(trans);
if (ret)
goto error;
/*
* Some things may run in the background now, but we
* just wait for the calibration complete notification.
*/
ret = iwl_wait_notification(trans->shrd, &calib_wait,
UCODE_CALIB_TIMEOUT);
goto out;
error:
iwl_remove_notification(trans->shrd, &calib_wait);
out:
/* Whatever happened, stop the device */
iwl_trans_stop_device(trans);
return ret;
}
| gpl-2.0 |
ninjablocks/kernel-VAR-SOM-AMxx | drivers/staging/iio/light/tsl2x7x_core.c | 137 | 53474 | /*
* Device driver for monitoring ambient light intensity in (lux)
* and proximity detection (prox) within the TAOS TSL2X7X family of devices.
*
* Copyright (c) 2012, TAOS Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include "tsl2x7x.h"
/* Cal defs*/
#define PROX_STAT_CAL 0
#define PROX_STAT_SAMP 1
#define MAX_SAMPLES_CAL 200
/* TSL2X7X Device ID */
#define TRITON_ID 0x00
#define SWORDFISH_ID 0x30
#define HALIBUT_ID 0x20
/* Lux calculation constants */
#define TSL2X7X_LUX_CALC_OVER_FLOW 65535
/* TAOS Register definitions - note:
* depending on device, some of these register are not used and the
* register address is benign.
*/
/* 2X7X register offsets */
#define TSL2X7X_MAX_CONFIG_REG 16
/* Device Registers and Masks */
#define TSL2X7X_CNTRL 0x00
#define TSL2X7X_ALS_TIME 0X01
#define TSL2X7X_PRX_TIME 0x02
#define TSL2X7X_WAIT_TIME 0x03
#define TSL2X7X_ALS_MINTHRESHLO 0X04
#define TSL2X7X_ALS_MINTHRESHHI 0X05
#define TSL2X7X_ALS_MAXTHRESHLO 0X06
#define TSL2X7X_ALS_MAXTHRESHHI 0X07
#define TSL2X7X_PRX_MINTHRESHLO 0X08
#define TSL2X7X_PRX_MINTHRESHHI 0X09
#define TSL2X7X_PRX_MAXTHRESHLO 0X0A
#define TSL2X7X_PRX_MAXTHRESHHI 0X0B
#define TSL2X7X_PERSISTENCE 0x0C
#define TSL2X7X_PRX_CONFIG 0x0D
#define TSL2X7X_PRX_COUNT 0x0E
#define TSL2X7X_GAIN 0x0F
#define TSL2X7X_NOTUSED 0x10
#define TSL2X7X_REVID 0x11
#define TSL2X7X_CHIPID 0x12
#define TSL2X7X_STATUS 0x13
#define TSL2X7X_ALS_CHAN0LO 0x14
#define TSL2X7X_ALS_CHAN0HI 0x15
#define TSL2X7X_ALS_CHAN1LO 0x16
#define TSL2X7X_ALS_CHAN1HI 0x17
#define TSL2X7X_PRX_LO 0x18
#define TSL2X7X_PRX_HI 0x19
/* tsl2X7X cmd reg masks */
#define TSL2X7X_CMD_REG 0x80
#define TSL2X7X_CMD_SPL_FN 0x60
#define TSL2X7X_CMD_PROX_INT_CLR 0X05
#define TSL2X7X_CMD_ALS_INT_CLR 0x06
#define TSL2X7X_CMD_PROXALS_INT_CLR 0X07
/* tsl2X7X cntrl reg masks */
#define TSL2X7X_CNTL_ADC_ENBL 0x02
#define TSL2X7X_CNTL_PWR_ON 0x01
/* tsl2X7X status reg masks */
#define TSL2X7X_STA_ADC_VALID 0x01
#define TSL2X7X_STA_PRX_VALID 0x02
#define TSL2X7X_STA_ADC_PRX_VALID (TSL2X7X_STA_ADC_VALID |\
TSL2X7X_STA_PRX_VALID)
#define TSL2X7X_STA_ALS_INTR 0x10
#define TSL2X7X_STA_PRX_INTR 0x20
/* tsl2X7X cntrl reg masks */
#define TSL2X7X_CNTL_REG_CLEAR 0x00
#define TSL2X7X_CNTL_PROX_INT_ENBL 0X20
#define TSL2X7X_CNTL_ALS_INT_ENBL 0X10
#define TSL2X7X_CNTL_WAIT_TMR_ENBL 0X08
#define TSL2X7X_CNTL_PROX_DET_ENBL 0X04
#define TSL2X7X_CNTL_PWRON 0x01
#define TSL2X7X_CNTL_ALSPON_ENBL 0x03
#define TSL2X7X_CNTL_INTALSPON_ENBL 0x13
#define TSL2X7X_CNTL_PROXPON_ENBL 0x0F
#define TSL2X7X_CNTL_INTPROXPON_ENBL 0x2F
/*Prox diode to use */
#define TSL2X7X_DIODE0 0x10
#define TSL2X7X_DIODE1 0x20
#define TSL2X7X_DIODE_BOTH 0x30
/* LED Power */
#define TSL2X7X_mA100 0x00
#define TSL2X7X_mA50 0x40
#define TSL2X7X_mA25 0x80
#define TSL2X7X_mA13 0xD0
#define TSL2X7X_MAX_TIMER_CNT (0xFF)
#define TSL2X7X_MIN_ITIME 3
/* TAOS txx2x7x Device family members */
enum {
tsl2571,
tsl2671,
tmd2671,
tsl2771,
tmd2771,
tsl2572,
tsl2672,
tmd2672,
tsl2772,
tmd2772
};
enum {
TSL2X7X_CHIP_UNKNOWN = 0,
TSL2X7X_CHIP_WORKING = 1,
TSL2X7X_CHIP_SUSPENDED = 2
};
struct tsl2x7x_parse_result {
int integer;
int fract;
};
/* Per-device data */
struct tsl2x7x_als_info {
u16 als_ch0;
u16 als_ch1;
u16 lux;
};
struct tsl2x7x_prox_stat {
int min;
int max;
int mean;
unsigned long stddev;
};
struct tsl2x7x_chip_info {
int chan_table_elements;
struct iio_chan_spec channel[4];
const struct iio_info *info;
};
struct tsl2X7X_chip {
kernel_ulong_t id;
struct mutex prox_mutex;
struct mutex als_mutex;
struct i2c_client *client;
u16 prox_data;
struct tsl2x7x_als_info als_cur_info;
struct tsl2x7x_settings tsl2x7x_settings;
struct tsl2X7X_platform_data *pdata;
int als_time_scale;
int als_saturation;
int tsl2x7x_chip_status;
u8 tsl2x7x_config[TSL2X7X_MAX_CONFIG_REG];
const struct tsl2x7x_chip_info *chip_info;
const struct iio_info *info;
s64 event_timestamp;
/* This structure is intentionally large to accommodate
* updates via sysfs. */
/* Sized to 9 = max 8 segments + 1 termination segment */
struct tsl2x7x_lux tsl2x7x_device_lux[TSL2X7X_MAX_LUX_TABLE_SIZE];
};
/* Different devices require different coefficents */
static const struct tsl2x7x_lux tsl2x71_lux_table[] = {
{ 14461, 611, 1211 },
{ 18540, 352, 623 },
{ 0, 0, 0 },
};
static const struct tsl2x7x_lux tmd2x71_lux_table[] = {
{ 11635, 115, 256 },
{ 15536, 87, 179 },
{ 0, 0, 0 },
};
static const struct tsl2x7x_lux tsl2x72_lux_table[] = {
{ 14013, 466, 917 },
{ 18222, 310, 552 },
{ 0, 0, 0 },
};
static const struct tsl2x7x_lux tmd2x72_lux_table[] = {
{ 13218, 130, 262 },
{ 17592, 92, 169 },
{ 0, 0, 0 },
};
static const struct tsl2x7x_lux *tsl2x7x_default_lux_table_group[] = {
[tsl2571] = tsl2x71_lux_table,
[tsl2671] = tsl2x71_lux_table,
[tmd2671] = tmd2x71_lux_table,
[tsl2771] = tsl2x71_lux_table,
[tmd2771] = tmd2x71_lux_table,
[tsl2572] = tsl2x72_lux_table,
[tsl2672] = tsl2x72_lux_table,
[tmd2672] = tmd2x72_lux_table,
[tsl2772] = tsl2x72_lux_table,
[tmd2772] = tmd2x72_lux_table,
};
static const struct tsl2x7x_settings tsl2x7x_default_settings = {
.als_time = 219, /* 101 ms */
.als_gain = 0,
.prx_time = 254, /* 5.4 ms */
.prox_gain = 1,
.wait_time = 245,
.prox_config = 0,
.als_gain_trim = 1000,
.als_cal_target = 150,
.als_thresh_low = 200,
.als_thresh_high = 256,
.persistence = 255,
.interrupts_en = 0,
.prox_thres_low = 0,
.prox_thres_high = 512,
.prox_max_samples_cal = 30,
.prox_pulse_count = 8
};
static const s16 tsl2X7X_als_gainadj[] = {
1,
8,
16,
120
};
static const s16 tsl2X7X_prx_gainadj[] = {
1,
2,
4,
8
};
/* Channel variations */
enum {
ALS,
PRX,
ALSPRX,
PRX2,
ALSPRX2,
};
static const u8 device_channel_config[] = {
ALS,
PRX,
PRX,
ALSPRX,
ALSPRX,
ALS,
PRX2,
PRX2,
ALSPRX2,
ALSPRX2
};
/**
* tsl2x7x_i2c_read() - Read a byte from a register.
* @client: i2c client
* @reg: device register to read from
* @*val: pointer to location to store register contents.
*
*/
static int
tsl2x7x_i2c_read(struct i2c_client *client, u8 reg, u8 *val)
{
int ret = 0;
/* select register to write */
ret = i2c_smbus_write_byte(client, (TSL2X7X_CMD_REG | reg));
if (ret < 0) {
dev_err(&client->dev, "%s: failed to write register %x\n"
, __func__, reg);
return ret;
}
/* read the data */
ret = i2c_smbus_read_byte(client);
if (ret >= 0)
*val = (u8)ret;
else
dev_err(&client->dev, "%s: failed to read register %x\n"
, __func__, reg);
return ret;
}
/**
* tsl2x7x_get_lux() - Reads and calculates current lux value.
* @indio_dev: pointer to IIO device
*
* The raw ch0 and ch1 values of the ambient light sensed in the last
* integration cycle are read from the device.
* Time scale factor array values are adjusted based on the integration time.
* The raw values are multiplied by a scale factor, and device gain is obtained
* using gain index. Limit checks are done next, then the ratio of a multiple
* of ch1 value, to the ch0 value, is calculated. Array tsl2x7x_device_lux[]
* is then scanned to find the first ratio value that is just above the ratio
* we just calculated. The ch0 and ch1 multiplier constants in the array are
* then used along with the time scale factor array values, to calculate the
* lux.
*/
static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
{
u16 ch0, ch1; /* separated ch0/ch1 data from device */
u32 lux; /* raw lux calculated from device data */
u64 lux64;
u32 ratio;
u8 buf[4];
struct tsl2x7x_lux *p;
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
int i, ret;
u32 ch0lux = 0;
u32 ch1lux = 0;
if (mutex_trylock(&chip->als_mutex) == 0)
return chip->als_cur_info.lux; /* busy, so return LAST VALUE */
if (chip->tsl2x7x_chip_status != TSL2X7X_CHIP_WORKING) {
/* device is not enabled */
dev_err(&chip->client->dev, "%s: device is not enabled\n",
__func__);
ret = -EBUSY ;
goto out_unlock;
}
ret = tsl2x7x_i2c_read(chip->client,
(TSL2X7X_CMD_REG | TSL2X7X_STATUS), &buf[0]);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: Failed to read STATUS Reg\n", __func__);
goto out_unlock;
}
/* is data new & valid */
if (!(buf[0] & TSL2X7X_STA_ADC_VALID)) {
dev_err(&chip->client->dev,
"%s: data not valid yet\n", __func__);
ret = chip->als_cur_info.lux; /* return LAST VALUE */
goto out_unlock;
}
for (i = 0; i < 4; i++) {
ret = tsl2x7x_i2c_read(chip->client,
(TSL2X7X_CMD_REG | (TSL2X7X_ALS_CHAN0LO + i)),
&buf[i]);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: failed to read. err=%x\n", __func__, ret);
goto out_unlock;
}
}
/* clear any existing interrupt status */
ret = i2c_smbus_write_byte(chip->client,
(TSL2X7X_CMD_REG |
TSL2X7X_CMD_SPL_FN |
TSL2X7X_CMD_ALS_INT_CLR));
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: i2c_write_command failed - err = %d\n",
__func__, ret);
goto out_unlock; /* have no data, so return failure */
}
/* extract ALS/lux data */
ch0 = le16_to_cpup((const __le16 *)&buf[0]);
ch1 = le16_to_cpup((const __le16 *)&buf[2]);
chip->als_cur_info.als_ch0 = ch0;
chip->als_cur_info.als_ch1 = ch1;
if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation)) {
lux = TSL2X7X_LUX_CALC_OVER_FLOW;
goto return_max;
}
if (ch0 == 0) {
/* have no data, so return LAST VALUE */
ret = chip->als_cur_info.lux;
goto out_unlock;
}
/* calculate ratio */
ratio = (ch1 << 15) / ch0;
/* convert to unscaled lux using the pointer to the table */
p = (struct tsl2x7x_lux *) chip->tsl2x7x_device_lux;
while (p->ratio != 0 && p->ratio < ratio)
p++;
if (p->ratio == 0) {
lux = 0;
} else {
ch0lux = DIV_ROUND_UP((ch0 * p->ch0),
tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain]);
ch1lux = DIV_ROUND_UP((ch1 * p->ch1),
tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain]);
lux = ch0lux - ch1lux;
}
/* note: lux is 31 bit max at this point */
if (ch1lux > ch0lux) {
dev_dbg(&chip->client->dev, "ch1lux > ch0lux-return last value\n");
ret = chip->als_cur_info.lux;
goto out_unlock;
}
/* adjust for active time scale */
if (chip->als_time_scale == 0)
lux = 0;
else
lux = (lux + (chip->als_time_scale >> 1)) /
chip->als_time_scale;
/* adjust for active gain scale
* The tsl2x7x_device_lux tables have a factor of 256 built-in.
* User-specified gain provides a multiplier.
* Apply user-specified gain before shifting right to retain precision.
* Use 64 bits to avoid overflow on multiplication.
* Then go back to 32 bits before division to avoid using div_u64().
*/
lux64 = lux;
lux64 = lux64 * chip->tsl2x7x_settings.als_gain_trim;
lux64 >>= 8;
lux = lux64;
lux = (lux + 500) / 1000;
if (lux > TSL2X7X_LUX_CALC_OVER_FLOW) /* check for overflow */
lux = TSL2X7X_LUX_CALC_OVER_FLOW;
/* Update the structure with the latest lux. */
return_max:
chip->als_cur_info.lux = lux;
ret = lux;
out_unlock:
mutex_unlock(&chip->als_mutex);
return ret;
}
/**
* tsl2x7x_get_prox() - Reads proximity data registers and updates
* chip->prox_data.
*
* @indio_dev: pointer to IIO device
*/
static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
{
int i;
int ret;
u8 status;
u8 chdata[2];
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
if (mutex_trylock(&chip->prox_mutex) == 0) {
dev_err(&chip->client->dev,
"%s: Can't get prox mutex\n", __func__);
return -EBUSY;
}
ret = tsl2x7x_i2c_read(chip->client,
(TSL2X7X_CMD_REG | TSL2X7X_STATUS), &status);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: i2c err=%d\n", __func__, ret);
goto prox_poll_err;
}
switch (chip->id) {
case tsl2571:
case tsl2671:
case tmd2671:
case tsl2771:
case tmd2771:
if (!(status & TSL2X7X_STA_ADC_VALID))
goto prox_poll_err;
break;
case tsl2572:
case tsl2672:
case tmd2672:
case tsl2772:
case tmd2772:
if (!(status & TSL2X7X_STA_PRX_VALID))
goto prox_poll_err;
break;
}
for (i = 0; i < 2; i++) {
ret = tsl2x7x_i2c_read(chip->client,
(TSL2X7X_CMD_REG |
(TSL2X7X_PRX_LO + i)), &chdata[i]);
if (ret < 0)
goto prox_poll_err;
}
chip->prox_data =
le16_to_cpup((const __le16 *)&chdata[0]);
prox_poll_err:
mutex_unlock(&chip->prox_mutex);
return chip->prox_data;
}
/**
* tsl2x7x_defaults() - Populates the device nominal operating parameters
* with those provided by a 'platform' data struct or
* with prefined defaults.
*
* @chip: pointer to device structure.
*/
static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
{
/* If Operational settings defined elsewhere.. */
if (chip->pdata && chip->pdata->platform_default_settings)
memcpy(&(chip->tsl2x7x_settings),
chip->pdata->platform_default_settings,
sizeof(tsl2x7x_default_settings));
else
memcpy(&(chip->tsl2x7x_settings),
&tsl2x7x_default_settings,
sizeof(tsl2x7x_default_settings));
/* Load up the proper lux table. */
if (chip->pdata && chip->pdata->platform_lux_table[0].ratio != 0)
memcpy(chip->tsl2x7x_device_lux,
chip->pdata->platform_lux_table,
sizeof(chip->pdata->platform_lux_table));
else
memcpy(chip->tsl2x7x_device_lux,
(struct tsl2x7x_lux *)tsl2x7x_default_lux_table_group[chip->id],
MAX_DEFAULT_TABLE_BYTES);
}
/**
* tsl2x7x_als_calibrate() - Obtain single reading and calculate
* the als_gain_trim.
*
* @indio_dev: pointer to IIO device
*/
static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
u8 reg_val;
int gain_trim_val;
int ret;
int lux_val;
ret = i2c_smbus_write_byte(chip->client,
(TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: failed to write CNTRL register, ret=%d\n",
__func__, ret);
return ret;
}
reg_val = i2c_smbus_read_byte(chip->client);
if ((reg_val & (TSL2X7X_CNTL_ADC_ENBL | TSL2X7X_CNTL_PWR_ON))
!= (TSL2X7X_CNTL_ADC_ENBL | TSL2X7X_CNTL_PWR_ON)) {
dev_err(&chip->client->dev,
"%s: failed: ADC not enabled\n", __func__);
return -1;
}
ret = i2c_smbus_write_byte(chip->client,
(TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: failed to write ctrl reg: ret=%d\n",
__func__, ret);
return ret;
}
reg_val = i2c_smbus_read_byte(chip->client);
if ((reg_val & TSL2X7X_STA_ADC_VALID) != TSL2X7X_STA_ADC_VALID) {
dev_err(&chip->client->dev,
"%s: failed: STATUS - ADC not valid.\n", __func__);
return -ENODATA;
}
lux_val = tsl2x7x_get_lux(indio_dev);
if (lux_val < 0) {
dev_err(&chip->client->dev,
"%s: failed to get lux\n", __func__);
return lux_val;
}
gain_trim_val = (((chip->tsl2x7x_settings.als_cal_target)
* chip->tsl2x7x_settings.als_gain_trim) / lux_val);
if ((gain_trim_val < 250) || (gain_trim_val > 4000))
return -ERANGE;
chip->tsl2x7x_settings.als_gain_trim = gain_trim_val;
dev_info(&chip->client->dev,
"%s als_calibrate completed\n", chip->client->name);
return (int) gain_trim_val;
}
static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
{
int i;
int ret = 0;
u8 *dev_reg;
u8 utmp;
int als_count;
int als_time;
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
u8 reg_val = 0;
if (chip->pdata && chip->pdata->power_on)
chip->pdata->power_on(indio_dev);
/* Non calculated parameters */
chip->tsl2x7x_config[TSL2X7X_PRX_TIME] =
chip->tsl2x7x_settings.prx_time;
chip->tsl2x7x_config[TSL2X7X_WAIT_TIME] =
chip->tsl2x7x_settings.wait_time;
chip->tsl2x7x_config[TSL2X7X_PRX_CONFIG] =
chip->tsl2x7x_settings.prox_config;
chip->tsl2x7x_config[TSL2X7X_ALS_MINTHRESHLO] =
(chip->tsl2x7x_settings.als_thresh_low) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_ALS_MINTHRESHHI] =
(chip->tsl2x7x_settings.als_thresh_low >> 8) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_ALS_MAXTHRESHLO] =
(chip->tsl2x7x_settings.als_thresh_high) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_ALS_MAXTHRESHHI] =
(chip->tsl2x7x_settings.als_thresh_high >> 8) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_PERSISTENCE] =
chip->tsl2x7x_settings.persistence;
chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
chip->tsl2x7x_settings.prox_pulse_count;
chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
(chip->tsl2x7x_settings.prox_thres_low) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
(chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
(chip->tsl2x7x_settings.prox_thres_high) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
(chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF;
/* and make sure we're not already on */
if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
/* if forcing a register update - turn off, then on */
dev_info(&chip->client->dev, "device is already enabled\n");
return -EINVAL;
}
/* determine als integration register */
als_count = (chip->tsl2x7x_settings.als_time * 100 + 135) / 270;
if (als_count == 0)
als_count = 1; /* ensure at least one cycle */
/* convert back to time (encompasses overrides) */
als_time = (als_count * 27 + 5) / 10;
chip->tsl2x7x_config[TSL2X7X_ALS_TIME] = 256 - als_count;
/* Set the gain based on tsl2x7x_settings struct */
chip->tsl2x7x_config[TSL2X7X_GAIN] =
(chip->tsl2x7x_settings.als_gain |
(TSL2X7X_mA100 | TSL2X7X_DIODE1)
| ((chip->tsl2x7x_settings.prox_gain) << 2));
/* set chip struct re scaling and saturation */
chip->als_saturation = als_count * 922; /* 90% of full scale */
chip->als_time_scale = (als_time + 25) / 50;
/* TSL2X7X Specific power-on / adc enable sequence
* Power on the device 1st. */
utmp = TSL2X7X_CNTL_PWR_ON;
ret = i2c_smbus_write_byte_data(chip->client,
TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: failed on CNTRL reg.\n", __func__);
return ret;
}
/* Use the following shadow copy for our delay before enabling ADC.
* Write all the registers. */
for (i = 0, dev_reg = chip->tsl2x7x_config;
i < TSL2X7X_MAX_CONFIG_REG; i++) {
ret = i2c_smbus_write_byte_data(chip->client,
TSL2X7X_CMD_REG + i, *dev_reg++);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: failed on write to reg %d.\n", __func__, i);
return ret;
}
}
mdelay(3); /* Power-on settling time */
/* NOW enable the ADC
* initialize the desired mode of operation */
utmp = TSL2X7X_CNTL_PWR_ON |
TSL2X7X_CNTL_ADC_ENBL |
TSL2X7X_CNTL_PROX_DET_ENBL;
ret = i2c_smbus_write_byte_data(chip->client,
TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: failed on 2nd CTRL reg.\n", __func__);
return ret;
}
chip->tsl2x7x_chip_status = TSL2X7X_CHIP_WORKING;
if (chip->tsl2x7x_settings.interrupts_en != 0) {
dev_info(&chip->client->dev, "Setting Up Interrupt(s)\n");
reg_val = TSL2X7X_CNTL_PWR_ON | TSL2X7X_CNTL_ADC_ENBL;
if ((chip->tsl2x7x_settings.interrupts_en == 0x20) ||
(chip->tsl2x7x_settings.interrupts_en == 0x30))
reg_val |= TSL2X7X_CNTL_PROX_DET_ENBL;
reg_val |= chip->tsl2x7x_settings.interrupts_en;
ret = i2c_smbus_write_byte_data(chip->client,
(TSL2X7X_CMD_REG | TSL2X7X_CNTRL), reg_val);
if (ret < 0)
dev_err(&chip->client->dev,
"%s: failed in tsl2x7x_IOCTL_INT_SET.\n",
__func__);
/* Clear out any initial interrupts */
ret = i2c_smbus_write_byte(chip->client,
TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
TSL2X7X_CMD_PROXALS_INT_CLR);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: Failed to clear Int status\n",
__func__);
return ret;
}
}
return ret;
}
static int tsl2x7x_chip_off(struct iio_dev *indio_dev)
{
int ret;
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
/* turn device off */
chip->tsl2x7x_chip_status = TSL2X7X_CHIP_SUSPENDED;
ret = i2c_smbus_write_byte_data(chip->client,
TSL2X7X_CMD_REG | TSL2X7X_CNTRL, 0x00);
if (chip->pdata && chip->pdata->power_off)
chip->pdata->power_off(chip->client);
return ret;
}
/**
* tsl2x7x_invoke_change
* @indio_dev: pointer to IIO device
*
* Obtain and lock both ALS and PROX resources,
* determine and save device state (On/Off),
* cycle device to implement updated parameter,
* put device back into proper state, and unlock
* resource.
*/
static
int tsl2x7x_invoke_change(struct iio_dev *indio_dev)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
int device_status = chip->tsl2x7x_chip_status;
mutex_lock(&chip->als_mutex);
mutex_lock(&chip->prox_mutex);
if (device_status == TSL2X7X_CHIP_WORKING)
tsl2x7x_chip_off(indio_dev);
tsl2x7x_chip_on(indio_dev);
if (device_status != TSL2X7X_CHIP_WORKING)
tsl2x7x_chip_off(indio_dev);
mutex_unlock(&chip->prox_mutex);
mutex_unlock(&chip->als_mutex);
return 0;
}
static
void tsl2x7x_prox_calculate(int *data, int length,
struct tsl2x7x_prox_stat *statP)
{
int i;
int sample_sum;
int tmp;
if (length == 0)
length = 1;
sample_sum = 0;
statP->min = INT_MAX;
statP->max = INT_MIN;
for (i = 0; i < length; i++) {
sample_sum += data[i];
statP->min = min(statP->min, data[i]);
statP->max = max(statP->max, data[i]);
}
statP->mean = sample_sum / length;
sample_sum = 0;
for (i = 0; i < length; i++) {
tmp = data[i] - statP->mean;
sample_sum += tmp * tmp;
}
statP->stddev = int_sqrt((long)sample_sum)/length;
}
/**
* tsl2x7x_prox_cal() - Calculates std. and sets thresholds.
* @indio_dev: pointer to IIO device
*
* Calculates a standard deviation based on the samples,
* and sets the threshold accordingly.
*/
static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
{
int prox_history[MAX_SAMPLES_CAL + 1];
int i;
struct tsl2x7x_prox_stat prox_stat_data[2];
struct tsl2x7x_prox_stat *calP;
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
u8 tmp_irq_settings;
u8 current_state = chip->tsl2x7x_chip_status;
if (chip->tsl2x7x_settings.prox_max_samples_cal > MAX_SAMPLES_CAL) {
dev_err(&chip->client->dev,
"%s: max prox samples cal is too big: %d\n",
__func__, chip->tsl2x7x_settings.prox_max_samples_cal);
chip->tsl2x7x_settings.prox_max_samples_cal = MAX_SAMPLES_CAL;
}
/* have to stop to change settings */
tsl2x7x_chip_off(indio_dev);
/* Enable proximity detection save just in case prox not wanted yet*/
tmp_irq_settings = chip->tsl2x7x_settings.interrupts_en;
chip->tsl2x7x_settings.interrupts_en |= TSL2X7X_CNTL_PROX_INT_ENBL;
/*turn on device if not already on*/
tsl2x7x_chip_on(indio_dev);
/*gather the samples*/
for (i = 0; i < chip->tsl2x7x_settings.prox_max_samples_cal; i++) {
mdelay(15);
tsl2x7x_get_prox(indio_dev);
prox_history[i] = chip->prox_data;
dev_info(&chip->client->dev, "2 i=%d prox data= %d\n",
i, chip->prox_data);
}
tsl2x7x_chip_off(indio_dev);
calP = &prox_stat_data[PROX_STAT_CAL];
tsl2x7x_prox_calculate(prox_history,
chip->tsl2x7x_settings.prox_max_samples_cal, calP);
chip->tsl2x7x_settings.prox_thres_high = (calP->max << 1) - calP->mean;
dev_info(&chip->client->dev, " cal min=%d mean=%d max=%d\n",
calP->min, calP->mean, calP->max);
dev_info(&chip->client->dev,
"%s proximity threshold set to %d\n",
chip->client->name, chip->tsl2x7x_settings.prox_thres_high);
/* back to the way they were */
chip->tsl2x7x_settings.interrupts_en = tmp_irq_settings;
if (current_state == TSL2X7X_CHIP_WORKING)
tsl2x7x_chip_on(indio_dev);
}
static ssize_t tsl2x7x_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
return snprintf(buf, PAGE_SIZE, "%d\n", chip->tsl2x7x_chip_status);
}
static ssize_t tsl2x7x_power_state_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
if (strtobool(buf, &value))
return -EINVAL;
if (value)
tsl2x7x_chip_on(indio_dev);
else
tsl2x7x_chip_off(indio_dev);
return len;
}
static ssize_t tsl2x7x_gain_available_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
switch (chip->id) {
case tsl2571:
case tsl2671:
case tmd2671:
case tsl2771:
case tmd2771:
return snprintf(buf, PAGE_SIZE, "%s\n", "1 8 16 128");
}
return snprintf(buf, PAGE_SIZE, "%s\n", "1 8 16 120");
}
static ssize_t tsl2x7x_prox_gain_available_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", "1 2 4 8");
}
static ssize_t tsl2x7x_als_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int y, z;
y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
z = y * TSL2X7X_MIN_ITIME;
y /= 1000;
z %= 1000;
return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
}
static ssize_t tsl2x7x_als_time_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
struct tsl2x7x_parse_result result;
int ret;
ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
if (ret)
return ret;
result.fract /= 3;
chip->tsl2x7x_settings.als_time =
(TSL2X7X_MAX_TIMER_CNT - (u8)result.fract);
dev_info(&chip->client->dev, "%s: als time = %d",
__func__, chip->tsl2x7x_settings.als_time);
tsl2x7x_invoke_change(indio_dev);
return IIO_VAL_INT_PLUS_MICRO;
}
static IIO_CONST_ATTR(in_illuminance0_integration_time_available,
".00272 - .696");
static ssize_t tsl2x7x_als_cal_target_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
return snprintf(buf, PAGE_SIZE, "%d\n",
chip->tsl2x7x_settings.als_cal_target);
}
static ssize_t tsl2x7x_als_cal_target_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
unsigned long value;
if (kstrtoul(buf, 0, &value))
return -EINVAL;
if (value)
chip->tsl2x7x_settings.als_cal_target = value;
tsl2x7x_invoke_change(indio_dev);
return len;
}
/* persistence settings */
static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int y, z, filter_delay;
/* Determine integration time */
y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
z = y * TSL2X7X_MIN_ITIME;
filter_delay = z * (chip->tsl2x7x_settings.persistence & 0x0F);
y = (filter_delay / 1000);
z = (filter_delay % 1000);
return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
}
static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
struct tsl2x7x_parse_result result;
int y, z, filter_delay;
int ret;
ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
if (ret)
return ret;
y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
z = y * TSL2X7X_MIN_ITIME;
filter_delay =
DIV_ROUND_UP(((result.integer * 1000) + result.fract), z);
chip->tsl2x7x_settings.persistence &= 0xF0;
chip->tsl2x7x_settings.persistence |= (filter_delay & 0x0F);
dev_info(&chip->client->dev, "%s: als persistence = %d",
__func__, filter_delay);
tsl2x7x_invoke_change(indio_dev);
return IIO_VAL_INT_PLUS_MICRO;
}
static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int y, z, filter_delay;
/* Determine integration time */
y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.prx_time) + 1;
z = y * TSL2X7X_MIN_ITIME;
filter_delay = z * ((chip->tsl2x7x_settings.persistence & 0xF0) >> 4);
y = (filter_delay / 1000);
z = (filter_delay % 1000);
return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
}
static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
struct tsl2x7x_parse_result result;
int y, z, filter_delay;
int ret;
ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
if (ret)
return ret;
y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.prx_time) + 1;
z = y * TSL2X7X_MIN_ITIME;
filter_delay =
DIV_ROUND_UP(((result.integer * 1000) + result.fract), z);
chip->tsl2x7x_settings.persistence &= 0x0F;
chip->tsl2x7x_settings.persistence |= ((filter_delay << 4) & 0xF0);
dev_info(&chip->client->dev, "%s: prox persistence = %d",
__func__, filter_delay);
tsl2x7x_invoke_change(indio_dev);
return IIO_VAL_INT_PLUS_MICRO;
}
static ssize_t tsl2x7x_do_calibrate(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
if (strtobool(buf, &value))
return -EINVAL;
if (value)
tsl2x7x_als_calibrate(indio_dev);
tsl2x7x_invoke_change(indio_dev);
return len;
}
static ssize_t tsl2x7x_luxtable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int i = 0;
int offset = 0;
while (i < (TSL2X7X_MAX_LUX_TABLE_SIZE * 3)) {
offset += snprintf(buf + offset, PAGE_SIZE, "%d,%d,%d,",
chip->tsl2x7x_device_lux[i].ratio,
chip->tsl2x7x_device_lux[i].ch0,
chip->tsl2x7x_device_lux[i].ch1);
if (chip->tsl2x7x_device_lux[i].ratio == 0) {
/* We just printed the first "0" entry.
* Now get rid of the extra "," and break. */
offset--;
break;
}
i++;
}
offset += snprintf(buf + offset, PAGE_SIZE, "\n");
return offset;
}
static ssize_t tsl2x7x_luxtable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
int value[ARRAY_SIZE(chip->tsl2x7x_device_lux)*3 + 1];
int n;
get_options(buf, ARRAY_SIZE(value), value);
/* We now have an array of ints starting at value[1], and
* enumerated by value[0].
* We expect each group of three ints is one table entry,
* and the last table entry is all 0.
*/
n = value[0];
if ((n % 3) || n < 6 ||
n > ((ARRAY_SIZE(chip->tsl2x7x_device_lux) - 1) * 3)) {
dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n);
return -EINVAL;
}
if ((value[(n - 2)] | value[(n - 1)] | value[n]) != 0) {
dev_info(dev, "LUX TABLE INPUT ERROR 2 Value[0]=%d\n", n);
return -EINVAL;
}
if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING)
tsl2x7x_chip_off(indio_dev);
/* Zero out the table */
memset(chip->tsl2x7x_device_lux, 0, sizeof(chip->tsl2x7x_device_lux));
memcpy(chip->tsl2x7x_device_lux, &value[1], (value[0] * 4));
tsl2x7x_invoke_change(indio_dev);
return len;
}
static ssize_t tsl2x7x_do_prox_calibrate(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
if (strtobool(buf, &value))
return -EINVAL;
if (value)
tsl2x7x_prox_cal(indio_dev);
tsl2x7x_invoke_change(indio_dev);
return len;
}
static int tsl2x7x_read_interrupt_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
int ret;
if (chan->type == IIO_INTENSITY)
ret = !!(chip->tsl2x7x_settings.interrupts_en & 0x10);
else
ret = !!(chip->tsl2x7x_settings.interrupts_en & 0x20);
return ret;
}
static int tsl2x7x_write_interrupt_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
int val)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
if (chan->type == IIO_INTENSITY) {
if (val)
chip->tsl2x7x_settings.interrupts_en |= 0x10;
else
chip->tsl2x7x_settings.interrupts_en &= 0x20;
} else {
if (val)
chip->tsl2x7x_settings.interrupts_en |= 0x20;
else
chip->tsl2x7x_settings.interrupts_en &= 0x10;
}
tsl2x7x_invoke_change(indio_dev);
return 0;
}
static int tsl2x7x_write_thresh(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
enum iio_event_info info,
int val, int val2)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
if (chan->type == IIO_INTENSITY) {
switch (dir) {
case IIO_EV_DIR_RISING:
chip->tsl2x7x_settings.als_thresh_high = val;
break;
case IIO_EV_DIR_FALLING:
chip->tsl2x7x_settings.als_thresh_low = val;
break;
default:
return -EINVAL;
}
} else {
switch (dir) {
case IIO_EV_DIR_RISING:
chip->tsl2x7x_settings.prox_thres_high = val;
break;
case IIO_EV_DIR_FALLING:
chip->tsl2x7x_settings.prox_thres_low = val;
break;
default:
return -EINVAL;
}
}
tsl2x7x_invoke_change(indio_dev);
return 0;
}
static int tsl2x7x_read_thresh(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
enum iio_event_info info,
int *val, int *val2)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
if (chan->type == IIO_INTENSITY) {
switch (dir) {
case IIO_EV_DIR_RISING:
*val = chip->tsl2x7x_settings.als_thresh_high;
break;
case IIO_EV_DIR_FALLING:
*val = chip->tsl2x7x_settings.als_thresh_low;
break;
default:
return -EINVAL;
}
} else {
switch (dir) {
case IIO_EV_DIR_RISING:
*val = chip->tsl2x7x_settings.prox_thres_high;
break;
case IIO_EV_DIR_FALLING:
*val = chip->tsl2x7x_settings.prox_thres_low;
break;
default:
return -EINVAL;
}
}
return IIO_VAL_INT;
}
static int tsl2x7x_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long mask)
{
int ret = -EINVAL;
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
case IIO_LIGHT:
tsl2x7x_get_lux(indio_dev);
*val = chip->als_cur_info.lux;
ret = IIO_VAL_INT;
break;
default:
return -EINVAL;
}
break;
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
case IIO_INTENSITY:
tsl2x7x_get_lux(indio_dev);
if (chan->channel == 0)
*val = chip->als_cur_info.als_ch0;
else
*val = chip->als_cur_info.als_ch1;
ret = IIO_VAL_INT;
break;
case IIO_PROXIMITY:
tsl2x7x_get_prox(indio_dev);
*val = chip->prox_data;
ret = IIO_VAL_INT;
break;
default:
return -EINVAL;
}
break;
case IIO_CHAN_INFO_CALIBSCALE:
if (chan->type == IIO_LIGHT)
*val =
tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain];
else
*val =
tsl2X7X_prx_gainadj[chip->tsl2x7x_settings.prox_gain];
ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_CALIBBIAS:
*val = chip->tsl2x7x_settings.als_gain_trim;
ret = IIO_VAL_INT;
break;
default:
ret = -EINVAL;
}
return ret;
}
static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_CALIBSCALE:
if (chan->type == IIO_INTENSITY) {
switch (val) {
case 1:
chip->tsl2x7x_settings.als_gain = 0;
break;
case 8:
chip->tsl2x7x_settings.als_gain = 1;
break;
case 16:
chip->tsl2x7x_settings.als_gain = 2;
break;
case 120:
switch (chip->id) {
case tsl2572:
case tsl2672:
case tmd2672:
case tsl2772:
case tmd2772:
return -EINVAL;
}
chip->tsl2x7x_settings.als_gain = 3;
break;
case 128:
switch (chip->id) {
case tsl2571:
case tsl2671:
case tmd2671:
case tsl2771:
case tmd2771:
return -EINVAL;
}
chip->tsl2x7x_settings.als_gain = 3;
break;
default:
return -EINVAL;
}
} else {
switch (val) {
case 1:
chip->tsl2x7x_settings.prox_gain = 0;
break;
case 2:
chip->tsl2x7x_settings.prox_gain = 1;
break;
case 4:
chip->tsl2x7x_settings.prox_gain = 2;
break;
case 8:
chip->tsl2x7x_settings.prox_gain = 3;
break;
default:
return -EINVAL;
}
}
break;
case IIO_CHAN_INFO_CALIBBIAS:
chip->tsl2x7x_settings.als_gain_trim = val;
break;
default:
return -EINVAL;
}
tsl2x7x_invoke_change(indio_dev);
return 0;
}
static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
tsl2x7x_power_state_show, tsl2x7x_power_state_store);
static DEVICE_ATTR(in_proximity0_calibscale_available, S_IRUGO,
tsl2x7x_prox_gain_available_show, NULL);
static DEVICE_ATTR(in_illuminance0_calibscale_available, S_IRUGO,
tsl2x7x_gain_available_show, NULL);
static DEVICE_ATTR(in_illuminance0_integration_time, S_IRUGO | S_IWUSR,
tsl2x7x_als_time_show, tsl2x7x_als_time_store);
static DEVICE_ATTR(in_illuminance0_target_input, S_IRUGO | S_IWUSR,
tsl2x7x_als_cal_target_show, tsl2x7x_als_cal_target_store);
static DEVICE_ATTR(in_illuminance0_calibrate, S_IWUSR, NULL,
tsl2x7x_do_calibrate);
static DEVICE_ATTR(in_proximity0_calibrate, S_IWUSR, NULL,
tsl2x7x_do_prox_calibrate);
static DEVICE_ATTR(in_illuminance0_lux_table, S_IRUGO | S_IWUSR,
tsl2x7x_luxtable_show, tsl2x7x_luxtable_store);
static DEVICE_ATTR(in_intensity0_thresh_period, S_IRUGO | S_IWUSR,
tsl2x7x_als_persistence_show, tsl2x7x_als_persistence_store);
static DEVICE_ATTR(in_proximity0_thresh_period, S_IRUGO | S_IWUSR,
tsl2x7x_prox_persistence_show, tsl2x7x_prox_persistence_store);
/* Use the default register values to identify the Taos device */
static int tsl2x7x_device_id(unsigned char *id, int target)
{
switch (target) {
case tsl2571:
case tsl2671:
case tsl2771:
return ((*id & 0xf0) == TRITON_ID);
case tmd2671:
case tmd2771:
return ((*id & 0xf0) == HALIBUT_ID);
case tsl2572:
case tsl2672:
case tmd2672:
case tsl2772:
case tmd2772:
return ((*id & 0xf0) == SWORDFISH_ID);
}
return -EINVAL;
}
static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
s64 timestamp = iio_get_time_ns();
int ret;
u8 value;
value = i2c_smbus_read_byte_data(chip->client,
TSL2X7X_CMD_REG | TSL2X7X_STATUS);
/* What type of interrupt do we need to process */
if (value & TSL2X7X_STA_PRX_INTR) {
tsl2x7x_get_prox(indio_dev); /* freshen data for ABI */
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
timestamp);
}
if (value & TSL2X7X_STA_ALS_INTR) {
tsl2x7x_get_lux(indio_dev); /* freshen data for ABI */
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
timestamp);
}
/* Clear interrupt now that we have handled it. */
ret = i2c_smbus_write_byte(chip->client,
TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
TSL2X7X_CMD_PROXALS_INT_CLR);
if (ret < 0)
dev_err(&chip->client->dev,
"%s: Failed to clear irq from event handler. err = %d\n",
__func__, ret);
return IRQ_HANDLED;
}
static struct attribute *tsl2x7x_ALS_device_attrs[] = {
&dev_attr_power_state.attr,
&dev_attr_in_illuminance0_calibscale_available.attr,
&dev_attr_in_illuminance0_integration_time.attr,
&iio_const_attr_in_illuminance0_integration_time_available\
.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
NULL
};
static struct attribute *tsl2x7x_PRX_device_attrs[] = {
&dev_attr_power_state.attr,
&dev_attr_in_proximity0_calibrate.attr,
NULL
};
static struct attribute *tsl2x7x_ALSPRX_device_attrs[] = {
&dev_attr_power_state.attr,
&dev_attr_in_illuminance0_calibscale_available.attr,
&dev_attr_in_illuminance0_integration_time.attr,
&iio_const_attr_in_illuminance0_integration_time_available\
.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
&dev_attr_in_proximity0_calibrate.attr,
NULL
};
static struct attribute *tsl2x7x_PRX2_device_attrs[] = {
&dev_attr_power_state.attr,
&dev_attr_in_proximity0_calibrate.attr,
&dev_attr_in_proximity0_calibscale_available.attr,
NULL
};
static struct attribute *tsl2x7x_ALSPRX2_device_attrs[] = {
&dev_attr_power_state.attr,
&dev_attr_in_illuminance0_calibscale_available.attr,
&dev_attr_in_illuminance0_integration_time.attr,
&iio_const_attr_in_illuminance0_integration_time_available\
.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
&dev_attr_in_proximity0_calibrate.attr,
&dev_attr_in_proximity0_calibscale_available.attr,
NULL
};
static struct attribute *tsl2X7X_ALS_event_attrs[] = {
&dev_attr_in_intensity0_thresh_period.attr,
NULL,
};
static struct attribute *tsl2X7X_PRX_event_attrs[] = {
&dev_attr_in_proximity0_thresh_period.attr,
NULL,
};
static struct attribute *tsl2X7X_ALSPRX_event_attrs[] = {
&dev_attr_in_intensity0_thresh_period.attr,
&dev_attr_in_proximity0_thresh_period.attr,
NULL,
};
static const struct attribute_group tsl2X7X_device_attr_group_tbl[] = {
[ALS] = {
.attrs = tsl2x7x_ALS_device_attrs,
},
[PRX] = {
.attrs = tsl2x7x_PRX_device_attrs,
},
[ALSPRX] = {
.attrs = tsl2x7x_ALSPRX_device_attrs,
},
[PRX2] = {
.attrs = tsl2x7x_PRX2_device_attrs,
},
[ALSPRX2] = {
.attrs = tsl2x7x_ALSPRX2_device_attrs,
},
};
static struct attribute_group tsl2X7X_event_attr_group_tbl[] = {
[ALS] = {
.attrs = tsl2X7X_ALS_event_attrs,
.name = "events",
},
[PRX] = {
.attrs = tsl2X7X_PRX_event_attrs,
.name = "events",
},
[ALSPRX] = {
.attrs = tsl2X7X_ALSPRX_event_attrs,
.name = "events",
},
};
static const struct iio_info tsl2X7X_device_info[] = {
[ALS] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALS],
.event_attrs = &tsl2X7X_event_attr_group_tbl[ALS],
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_thresh,
.write_event_value = &tsl2x7x_write_thresh,
.read_event_config = &tsl2x7x_read_interrupt_config,
.write_event_config = &tsl2x7x_write_interrupt_config,
},
[PRX] = {
.attrs = &tsl2X7X_device_attr_group_tbl[PRX],
.event_attrs = &tsl2X7X_event_attr_group_tbl[PRX],
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_thresh,
.write_event_value = &tsl2x7x_write_thresh,
.read_event_config = &tsl2x7x_read_interrupt_config,
.write_event_config = &tsl2x7x_write_interrupt_config,
},
[ALSPRX] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX],
.event_attrs = &tsl2X7X_event_attr_group_tbl[ALSPRX],
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_thresh,
.write_event_value = &tsl2x7x_write_thresh,
.read_event_config = &tsl2x7x_read_interrupt_config,
.write_event_config = &tsl2x7x_write_interrupt_config,
},
[PRX2] = {
.attrs = &tsl2X7X_device_attr_group_tbl[PRX2],
.event_attrs = &tsl2X7X_event_attr_group_tbl[PRX],
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_thresh,
.write_event_value = &tsl2x7x_write_thresh,
.read_event_config = &tsl2x7x_read_interrupt_config,
.write_event_config = &tsl2x7x_write_interrupt_config,
},
[ALSPRX2] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX2],
.event_attrs = &tsl2X7X_event_attr_group_tbl[ALSPRX],
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_thresh,
.write_event_value = &tsl2x7x_write_thresh,
.read_event_config = &tsl2x7x_read_interrupt_config,
.write_event_config = &tsl2x7x_write_interrupt_config,
},
};
static const struct iio_event_spec tsl2x7x_events[] = {
{
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_RISING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
}, {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_FALLING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
},
};
static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
[ALS] = {
.channel = {
{
.type = IIO_LIGHT,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
}, {
.type = IIO_INTENSITY,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS),
.event_spec = tsl2x7x_events,
.num_event_specs = ARRAY_SIZE(tsl2x7x_events),
}, {
.type = IIO_INTENSITY,
.indexed = 1,
.channel = 1,
},
},
.chan_table_elements = 3,
.info = &tsl2X7X_device_info[ALS],
},
[PRX] = {
.channel = {
{
.type = IIO_PROXIMITY,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.event_spec = tsl2x7x_events,
.num_event_specs = ARRAY_SIZE(tsl2x7x_events),
},
},
.chan_table_elements = 1,
.info = &tsl2X7X_device_info[PRX],
},
[ALSPRX] = {
.channel = {
{
.type = IIO_LIGHT,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED)
}, {
.type = IIO_INTENSITY,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS),
.event_spec = tsl2x7x_events,
.num_event_specs = ARRAY_SIZE(tsl2x7x_events),
}, {
.type = IIO_INTENSITY,
.indexed = 1,
.channel = 1,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
}, {
.type = IIO_PROXIMITY,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.event_spec = tsl2x7x_events,
.num_event_specs = ARRAY_SIZE(tsl2x7x_events),
},
},
.chan_table_elements = 4,
.info = &tsl2X7X_device_info[ALSPRX],
},
[PRX2] = {
.channel = {
{
.type = IIO_PROXIMITY,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE),
.event_spec = tsl2x7x_events,
.num_event_specs = ARRAY_SIZE(tsl2x7x_events),
},
},
.chan_table_elements = 1,
.info = &tsl2X7X_device_info[PRX2],
},
[ALSPRX2] = {
.channel = {
{
.type = IIO_LIGHT,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
}, {
.type = IIO_INTENSITY,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS),
.event_spec = tsl2x7x_events,
.num_event_specs = ARRAY_SIZE(tsl2x7x_events),
}, {
.type = IIO_INTENSITY,
.indexed = 1,
.channel = 1,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
}, {
.type = IIO_PROXIMITY,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE),
.event_spec = tsl2x7x_events,
.num_event_specs = ARRAY_SIZE(tsl2x7x_events),
},
},
.chan_table_elements = 4,
.info = &tsl2X7X_device_info[ALSPRX2],
},
};
static int tsl2x7x_probe(struct i2c_client *clientp,
const struct i2c_device_id *id)
{
int ret;
unsigned char device_id;
struct iio_dev *indio_dev;
struct tsl2X7X_chip *chip;
indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
if (!indio_dev)
return -ENOMEM;
chip = iio_priv(indio_dev);
chip->client = clientp;
i2c_set_clientdata(clientp, indio_dev);
ret = tsl2x7x_i2c_read(chip->client,
TSL2X7X_CHIPID, &device_id);
if (ret < 0)
return ret;
if ((!tsl2x7x_device_id(&device_id, id->driver_data)) ||
(tsl2x7x_device_id(&device_id, id->driver_data) == -EINVAL)) {
dev_info(&chip->client->dev,
"%s: i2c device found does not match expected id\n",
__func__);
return -EINVAL;
}
ret = i2c_smbus_write_byte(clientp, (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
if (ret < 0) {
dev_err(&clientp->dev, "%s: write to cmd reg failed. err = %d\n",
__func__, ret);
return ret;
}
/* ALS and PROX functions can be invoked via user space poll
* or H/W interrupt. If busy return last sample. */
mutex_init(&chip->als_mutex);
mutex_init(&chip->prox_mutex);
chip->tsl2x7x_chip_status = TSL2X7X_CHIP_UNKNOWN;
chip->pdata = clientp->dev.platform_data;
chip->id = id->driver_data;
chip->chip_info =
&tsl2x7x_chip_info_tbl[device_channel_config[id->driver_data]];
indio_dev->info = chip->chip_info->info;
indio_dev->dev.parent = &clientp->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->name = chip->client->name;
indio_dev->channels = chip->chip_info->channel;
indio_dev->num_channels = chip->chip_info->chan_table_elements;
if (clientp->irq) {
ret = devm_request_threaded_irq(&clientp->dev, clientp->irq,
NULL,
&tsl2x7x_event_handler,
IRQF_TRIGGER_RISING |
IRQF_ONESHOT,
"TSL2X7X_event",
indio_dev);
if (ret) {
dev_err(&clientp->dev,
"%s: irq request failed", __func__);
return ret;
}
}
/* Load up the defaults */
tsl2x7x_defaults(chip);
/* Make sure the chip is on */
tsl2x7x_chip_on(indio_dev);
ret = iio_device_register(indio_dev);
if (ret) {
dev_err(&clientp->dev,
"%s: iio registration failed\n", __func__);
return ret;
}
dev_info(&clientp->dev, "%s Light sensor found.\n", id->name);
return 0;
}
static int tsl2x7x_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
int ret = 0;
if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
ret = tsl2x7x_chip_off(indio_dev);
chip->tsl2x7x_chip_status = TSL2X7X_CHIP_SUSPENDED;
}
if (chip->pdata && chip->pdata->platform_power) {
pm_message_t pmm = {PM_EVENT_SUSPEND};
chip->pdata->platform_power(dev, pmm);
}
return ret;
}
static int tsl2x7x_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
int ret = 0;
if (chip->pdata && chip->pdata->platform_power) {
pm_message_t pmm = {PM_EVENT_RESUME};
chip->pdata->platform_power(dev, pmm);
}
if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_SUSPENDED)
ret = tsl2x7x_chip_on(indio_dev);
return ret;
}
static int tsl2x7x_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
tsl2x7x_chip_off(indio_dev);
iio_device_unregister(indio_dev);
return 0;
}
static struct i2c_device_id tsl2x7x_idtable[] = {
{ "tsl2571", tsl2571 },
{ "tsl2671", tsl2671 },
{ "tmd2671", tmd2671 },
{ "tsl2771", tsl2771 },
{ "tmd2771", tmd2771 },
{ "tsl2572", tsl2572 },
{ "tsl2672", tsl2672 },
{ "tmd2672", tmd2672 },
{ "tsl2772", tsl2772 },
{ "tmd2772", tmd2772 },
{}
};
MODULE_DEVICE_TABLE(i2c, tsl2x7x_idtable);
static const struct dev_pm_ops tsl2x7x_pm_ops = {
.suspend = tsl2x7x_suspend,
.resume = tsl2x7x_resume,
};
/* Driver definition */
static struct i2c_driver tsl2x7x_driver = {
.driver = {
.name = "tsl2x7x",
.pm = &tsl2x7x_pm_ops,
},
.id_table = tsl2x7x_idtable,
.probe = tsl2x7x_probe,
.remove = tsl2x7x_remove,
};
module_i2c_driver(tsl2x7x_driver);
MODULE_AUTHOR("J. August Brenner<jbrenner@taosinc.com>");
MODULE_DESCRIPTION("TAOS tsl2x7x ambient and proximity light sensor driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
LibiSC/tab10test | arch/arm/mm/mmu.c | 137 | 33993 | /*
* linux/arch/arm/mm/mmu.c
*
* Copyright (C) 1995-2005 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/sections.h>
#include <asm/cachetype.h>
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/smp_plat.h>
#include <asm/tlb.h>
#include <asm/highmem.h>
#include <asm/system_info.h>
#include <asm/traps.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include "mm.h"
/*
* empty_zero_page is a special page that is used for
* zero-initialized data and COW.
*/
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
/*
* The pmd table for the upper-most set of pages.
*/
pmd_t *top_pmd;
#define CPOLICY_UNCACHED 0
#define CPOLICY_BUFFERED 1
#define CPOLICY_WRITETHROUGH 2
#define CPOLICY_WRITEBACK 3
#define CPOLICY_WRITEALLOC 4
static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
static unsigned int ecc_mask __initdata = 0;
pgprot_t pgprot_user;
pgprot_t pgprot_kernel;
EXPORT_SYMBOL(pgprot_user);
EXPORT_SYMBOL(pgprot_kernel);
struct cachepolicy {
const char policy[16];
unsigned int cr_mask;
pmdval_t pmd;
pteval_t pte;
};
static struct cachepolicy cache_policies[] __initdata = {
{
.policy = "uncached",
.cr_mask = CR_W|CR_C,
.pmd = PMD_SECT_UNCACHED,
.pte = L_PTE_MT_UNCACHED,
}, {
.policy = "buffered",
.cr_mask = CR_C,
.pmd = PMD_SECT_BUFFERED,
.pte = L_PTE_MT_BUFFERABLE,
}, {
.policy = "writethrough",
.cr_mask = 0,
.pmd = PMD_SECT_WT,
.pte = L_PTE_MT_WRITETHROUGH,
}, {
.policy = "writeback",
.cr_mask = 0,
.pmd = PMD_SECT_WB,
.pte = L_PTE_MT_WRITEBACK,
}, {
.policy = "writealloc",
.cr_mask = 0,
.pmd = PMD_SECT_WBWA,
.pte = L_PTE_MT_WRITEALLOC,
}
};
/*
* These are useful for identifying cache coherency
* problems by allowing the cache or the cache and
* writebuffer to be turned off. (Note: the write
* buffer should not be on and the cache off).
*/
static int __init early_cachepolicy(char *p)
{
int i;
for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
int len = strlen(cache_policies[i].policy);
if (memcmp(p, cache_policies[i].policy, len) == 0) {
cachepolicy = i;
cr_alignment &= ~cache_policies[i].cr_mask;
cr_no_alignment &= ~cache_policies[i].cr_mask;
break;
}
}
if (i == ARRAY_SIZE(cache_policies))
printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
/*
* This restriction is partly to do with the way we boot; it is
* unpredictable to have memory mapped using two different sets of
* memory attributes (shared, type, and cache attribs). We can not
* change these attributes once the initial assembly has setup the
* page tables.
*/
if (cpu_architecture() >= CPU_ARCH_ARMv6) {
printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
cachepolicy = CPOLICY_WRITEBACK;
}
flush_cache_all();
set_cr(cr_alignment);
return 0;
}
early_param("cachepolicy", early_cachepolicy);
static int __init early_nocache(char *__unused)
{
char *p = "buffered";
printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
early_cachepolicy(p);
return 0;
}
early_param("nocache", early_nocache);
static int __init early_nowrite(char *__unused)
{
char *p = "uncached";
printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
early_cachepolicy(p);
return 0;
}
early_param("nowb", early_nowrite);
#ifndef CONFIG_ARM_LPAE
static int __init early_ecc(char *p)
{
if (memcmp(p, "on", 2) == 0)
ecc_mask = PMD_PROTECTION;
else if (memcmp(p, "off", 3) == 0)
ecc_mask = 0;
return 0;
}
early_param("ecc", early_ecc);
#endif
static int __init noalign_setup(char *__unused)
{
cr_alignment &= ~CR_A;
cr_no_alignment &= ~CR_A;
set_cr(cr_alignment);
return 1;
}
__setup("noalign", noalign_setup);
#ifndef CONFIG_SMP
void adjust_cr(unsigned long mask, unsigned long set)
{
unsigned long flags;
mask &= ~CR_A;
set &= mask;
local_irq_save(flags);
cr_no_alignment = (cr_no_alignment & ~mask) | set;
cr_alignment = (cr_alignment & ~mask) | set;
set_cr((get_cr() & ~mask) | set);
local_irq_restore(flags);
}
#endif
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO,
},
[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE,
.domain = DOMAIN_IO,
},
[MT_DEVICE_CACHED] = { /* ioremap_cached */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
.domain = DOMAIN_IO,
},
[MT_DEVICE_WC] = { /* ioremap_wc */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE,
.domain = DOMAIN_IO,
},
[MT_UNCACHED] = {
.prot_pte = PROT_PTE_DEVICE,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
#ifndef CONFIG_ARM_LPAE
[MT_MINICLEAN] = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL,
},
#endif
[MT_LOW_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_USER,
},
[MT_HIGH_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
[MT_ROM] = {
.prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_NONCACHED] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_MT_BUFFERABLE,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_DTCM] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_ITCM] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_SO] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_MT_UNCACHED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
PMD_SECT_UNCACHED | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
};
const struct mem_type *get_mem_type(unsigned int type)
{
return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
}
EXPORT_SYMBOL(get_mem_type);
/*
* Adjust the PMD section entries according to the CPU in use.
*/
static void __init build_mem_type_table(void)
{
struct cachepolicy *cp;
unsigned int cr = get_cr();
pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
int cpu_arch = cpu_architecture();
int i;
if (cpu_arch < CPU_ARCH_ARMv6) {
#if defined(CONFIG_CPU_DCACHE_DISABLE)
if (cachepolicy > CPOLICY_BUFFERED)
cachepolicy = CPOLICY_BUFFERED;
#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
if (cachepolicy > CPOLICY_WRITETHROUGH)
cachepolicy = CPOLICY_WRITETHROUGH;
#endif
}
if (cpu_arch < CPU_ARCH_ARMv5) {
if (cachepolicy >= CPOLICY_WRITEALLOC)
cachepolicy = CPOLICY_WRITEBACK;
ecc_mask = 0;
}
if (is_smp())
cachepolicy = CPOLICY_WRITEALLOC;
/*
* Strip out features not present on earlier architectures.
* Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
* without extended page tables don't have the 'Shared' bit.
*/
if (cpu_arch < CPU_ARCH_ARMv5)
for (i = 0; i < ARRAY_SIZE(mem_types); i++)
mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
for (i = 0; i < ARRAY_SIZE(mem_types); i++)
mem_types[i].prot_sect &= ~PMD_SECT_S;
/*
* ARMv5 and lower, bit 4 must be set for page tables (was: cache
* "update-able on write" bit on ARM610). However, Xscale and
* Xscale3 require this bit to be cleared.
*/
if (cpu_is_xscale() || cpu_is_xsc3()) {
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_sect &= ~PMD_BIT4;
mem_types[i].prot_l1 &= ~PMD_BIT4;
}
} else if (cpu_arch < CPU_ARCH_ARMv6) {
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
if (mem_types[i].prot_l1)
mem_types[i].prot_l1 |= PMD_BIT4;
if (mem_types[i].prot_sect)
mem_types[i].prot_sect |= PMD_BIT4;
}
}
/*
* Mark the device areas according to the CPU/architecture.
*/
if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
if (!cpu_is_xsc3()) {
/*
* Mark device regions on ARMv6+ as execute-never
* to prevent speculative instruction fetches.
*/
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
}
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
/*
* For ARMv7 with TEX remapping,
* - shared device is SXCB=1100
* - nonshared device is SXCB=0100
* - write combine device mem is SXCB=0001
* (Uncached Normal memory)
*/
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
} else if (cpu_is_xsc3()) {
/*
* For Xscale3,
* - shared device is TEXCB=00101
* - nonshared device is TEXCB=01000
* - write combine device mem is TEXCB=00100
* (Inner/Outer Uncacheable in xsc3 parlance)
*/
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
} else {
/*
* For ARMv6 and ARMv7 without TEX remapping,
* - shared device is TEXCB=00001
* - nonshared device is TEXCB=01000
* - write combine device mem is TEXCB=00100
* (Uncached Normal in ARMv6 parlance).
*/
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
}
} else {
/*
* On others, write combining is "Uncached/Buffered"
*/
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
}
/*
* Now deal with the memory-type mappings
*/
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
/*
* Only use write-through for non-SMP systems
*/
if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
/*
* Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.)
*/
if (arch_is_coherent() && cpu_is_xsc3()) {
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
/*
* ARMv6 and above have extended page tables.
*/
if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
#ifndef CONFIG_ARM_LPAE
/*
* Mark cache clean areas and XIP ROM read only
* from SVC mode and no access from userspace.
*/
mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
#endif
if (is_smp()) {
/*
* Mark memory with the "shared" attribute
* for SMP systems
*/
user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
}
/*
* Non-cacheable Normal - intended for memory areas that must
* not cause dirty cache line writebacks when used
*/
if (cpu_arch >= CPU_ARCH_ARMv6) {
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
/* Non-cacheable Normal is XCB = 001 */
mem_types[MT_MEMORY_NONCACHED].prot_sect |=
PMD_SECT_BUFFERED;
} else {
/* For both ARMv6 and non-TEX-remapping ARMv7 */
mem_types[MT_MEMORY_NONCACHED].prot_sect |=
PMD_SECT_TEX(1);
}
} else {
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
}
#ifdef CONFIG_ARM_LPAE
/*
* Do not generate access flag faults for the kernel mappings.
*/
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_pte |= PTE_EXT_AF;
if (mem_types[i].prot_sect)
mem_types[i].prot_sect |= PMD_SECT_AF;
}
kern_pgprot |= PTE_EXT_AF;
vecs_pgprot |= PTE_EXT_AF;
#endif
for (i = 0; i < 16; i++) {
pteval_t v = pgprot_val(protection_map[i]);
protection_map[i] = __pgprot(v | user_pgprot);
}
mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | kern_pgprot);
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
switch (cp->pmd) {
case PMD_SECT_WT:
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
break;
case PMD_SECT_WB:
case PMD_SECT_WBWA:
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
break;
}
printk("Memory policy: ECC %sabled, Data cache %s\n",
ecc_mask ? "en" : "dis", cp->policy);
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
struct mem_type *t = &mem_types[i];
if (t->prot_l1)
t->prot_l1 |= PMD_DOMAIN(t->domain);
if (t->prot_sect)
t->prot_sect |= PMD_DOMAIN(t->domain);
}
}
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
if (!pfn_valid(pfn))
return pgprot_noncached(vma_prot);
else if (file->f_flags & O_SYNC)
return pgprot_writecombine(vma_prot);
return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);
#endif
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
{
void *ptr = __va(memblock_alloc(sz, align));
memset(ptr, 0, sz);
return ptr;
}
static void __init *early_alloc(unsigned long sz)
{
return early_alloc_aligned(sz, sz);
}
static pte_t * __init early_pte_alloc(pmd_t *pmd)
{
if (pmd_none(*pmd) || pmd_bad(*pmd))
return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
return pmd_page_vaddr(*pmd);
}
static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
{
__pmd_populate(pmd, __pa(pte), prot);
BUG_ON(pmd_bad(*pmd));
}
static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
unsigned long addr, unsigned long prot)
{
if (pmd_none(*pmd)) {
pte_t *pte = early_pte_alloc(pmd);
early_pte_install(pmd, pte, prot);
}
BUG_ON(pmd_bad(*pmd));
return pte_offset_kernel(pmd, addr);
}
static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
const struct mem_type *type)
{
pte_t *start_pte = early_pte_alloc(pmd);
pte_t *pte = start_pte + pte_index(addr);
/* If replacing a section mapping, the whole section must be replaced */
BUG_ON(pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
do {
set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
early_pte_install(pmd, start_pte, type->prot_l1);
}
static void __init alloc_init_section(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys,
const struct mem_type *type,
bool force_pages)
{
pmd_t *pmd = pmd_offset(pud, addr);
/*
* Try a section mapping - end, addr and phys must all be aligned
* to a section boundary. Note that PMDs refer to the individual
* L1 entries, whereas PGDs refer to a group of L1 entries making
* up one logical pointer to an L2 table.
*/
if (((addr | end | phys) & ~SECTION_MASK) == 0 && !force_pages) {
pmd_t *p = pmd;
#ifndef CONFIG_ARM_LPAE
if (addr & SECTION_SIZE)
pmd++;
#endif
do {
*pmd = __pmd(phys | type->prot_sect);
phys += SECTION_SIZE;
} while (pmd++, addr += SECTION_SIZE, addr != end);
flush_pmd_entry(p);
} else {
/*
* No need to loop; pte's aren't interested in the
* individual L1 entries.
*/
alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
}
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
unsigned long end, unsigned long phys, const struct mem_type *type,
bool force_pages)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
alloc_init_section(pud, addr, next, phys, type, force_pages);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
#ifndef CONFIG_ARM_LPAE
static void __init create_36bit_mapping(struct map_desc *md,
const struct mem_type *type)
{
unsigned long addr, length, end;
phys_addr_t phys;
pgd_t *pgd;
addr = md->virtual;
phys = __pfn_to_phys(md->pfn);
length = PAGE_ALIGN(md->length);
if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
printk(KERN_ERR "MM: CPU does not support supersection "
"mapping for 0x%08llx at 0x%08lx\n",
(long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
/* N.B. ARMv6 supersections are only defined to work with domain 0.
* Since domain assignments can in fact be arbitrary, the
* 'domain == 0' check below is required to insure that ARMv6
* supersections are only allocated for domain 0 regardless
* of the actual domain assignments in use.
*/
if (type->domain) {
printk(KERN_ERR "MM: invalid domain in supersection "
"mapping for 0x%08llx at 0x%08lx\n",
(long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
" at 0x%08lx invalid alignment\n",
(long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
/*
* Shift bits [35:32] of address into bits [23:20] of PMD
* (See ARMv6 spec).
*/
phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
pgd = pgd_offset_k(addr);
end = addr + length;
do {
pud_t *pud = pud_offset(pgd, addr);
pmd_t *pmd = pmd_offset(pud, addr);
int i;
for (i = 0; i < 16; i++)
*pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
addr += SUPERSECTION_SIZE;
phys += SUPERSECTION_SIZE;
pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
} while (addr != end);
}
#endif /* !CONFIG_ARM_LPAE */
/*
* Create the page directory entries and any necessary
* page tables for the mapping specified by `md'. We
* are able to cope here with varying sizes and address
* offsets, and we take full advantage of sections and
* supersections.
*/
static void __init create_mapping(struct map_desc *md, bool force_pages)
{
unsigned long addr, length, end;
phys_addr_t phys;
const struct mem_type *type;
pgd_t *pgd;
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
" at 0x%08lx in user region\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md->virtual >= PAGE_OFFSET &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
printk(KERN_WARNING "BUG: mapping for 0x%08llx"
" at 0x%08lx out of vmalloc space\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
}
type = &mem_types[md->type];
#ifndef CONFIG_ARM_LPAE
/*
* Catch 36-bit addresses
*/
if (md->pfn >= 0x100000) {
create_36bit_mapping(md, type);
return;
}
#endif
addr = md->virtual & PAGE_MASK;
phys = __pfn_to_phys(md->pfn);
length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
"be mapped using pages, ignoring.\n",
(long long)__pfn_to_phys(md->pfn), addr);
return;
}
pgd = pgd_offset_k(addr);
end = addr + length;
do {
unsigned long next = pgd_addr_end(addr, end);
alloc_init_pud(pgd, addr, next, phys, type, force_pages);
phys += next - addr;
addr = next;
} while (pgd++, addr != end);
}
/*
* Create the architecture specific mappings
*/
void __init iotable_init(struct map_desc *io_desc, int nr)
{
struct map_desc *md;
struct vm_struct *vm;
if (!nr)
return;
vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
for (md = io_desc; nr; md++, nr--) {
create_mapping(md, false);
vm->addr = (void *)(md->virtual & PAGE_MASK);
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(md->pfn);
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->flags |= VM_ARM_MTYPE(md->type);
vm->caller = iotable_init;
vm_area_add_early(vm++);
}
}
#ifndef CONFIG_ARM_LPAE
/*
* The Linux PMD is made of two consecutive section entries covering 2MB
* (see definition in include/asm/pgtable-2level.h). However a call to
* create_mapping() may optimize static mappings by using individual
* 1MB section mappings. This leaves the actual PMD potentially half
* initialized if the top or bottom section entry isn't used, leaving it
* open to problems if a subsequent ioremap() or vmalloc() tries to use
* the virtual space left free by that unused section entry.
*
* Let's avoid the issue by inserting dummy vm entries covering the unused
* PMD halves once the static mappings are in place.
*/
static void __init pmd_empty_section_gap(unsigned long addr)
{
struct vm_struct *vm;
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
vm->addr = (void *)addr;
vm->size = SECTION_SIZE;
vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
vm->caller = pmd_empty_section_gap;
vm_area_add_early(vm);
}
static void __init fill_pmd_gaps(void)
{
struct vm_struct *vm;
unsigned long addr, next = 0;
pmd_t *pmd;
/* we're still single threaded hence no lock needed here */
for (vm = vmlist; vm; vm = vm->next) {
if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
continue;
addr = (unsigned long)vm->addr;
if (addr < next)
continue;
/*
* Check if this vm starts on an odd section boundary.
* If so and the first section entry for this PMD is free
* then we block the corresponding virtual address.
*/
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
pmd = pmd_off_k(addr);
if (pmd_none(*pmd))
pmd_empty_section_gap(addr & PMD_MASK);
}
/*
* Then check if this vm ends on an odd section boundary.
* If so and the second section entry for this PMD is empty
* then we block the corresponding virtual address.
*/
addr += vm->size;
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
pmd = pmd_off_k(addr) + 1;
if (pmd_none(*pmd))
pmd_empty_section_gap(addr);
}
/* no need to look at any vm entry until we hit the next PMD */
next = (addr + PMD_SIZE - 1) & PMD_MASK;
}
}
#else
#define fill_pmd_gaps() do { } while (0)
#endif
static void * __initdata vmalloc_min =
(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
/*
* vmalloc=size forces the vmalloc area to be exactly 'size'
* bytes. This can be used to increase (or decrease) the vmalloc
* area - the default is 240m.
*/
static int __init early_vmalloc(char *arg)
{
unsigned long vmalloc_reserve = memparse(arg, NULL);
if (vmalloc_reserve < SZ_16M) {
vmalloc_reserve = SZ_16M;
printk(KERN_WARNING
"vmalloc area too small, limiting to %luMB\n",
vmalloc_reserve >> 20);
}
if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
printk(KERN_WARNING
"vmalloc area is too big, limiting to %luMB\n",
vmalloc_reserve >> 20);
}
vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
return 0;
}
early_param("vmalloc", early_vmalloc);
static phys_addr_t lowmem_limit __initdata = 0;
void __init sanity_check_meminfo(void)
{
int i, j, highmem = 0;
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j];
*bank = meminfo.bank[i];
if (bank->start > ULONG_MAX)
highmem = 1;
#ifdef CONFIG_HIGHMEM
if (__va(bank->start) >= vmalloc_min ||
__va(bank->start) < (void *)PAGE_OFFSET)
highmem = 1;
bank->highmem = highmem;
/*
* Split those memory banks which are partially overlapping
* the vmalloc area greatly simplifying things later.
*/
if (!highmem && __va(bank->start) < vmalloc_min &&
bank->size > vmalloc_min - __va(bank->start)) {
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
"ignoring high memory\n");
} else {
memmove(bank + 1, bank,
(meminfo.nr_banks - i) * sizeof(*bank));
meminfo.nr_banks++;
i++;
bank[1].size -= vmalloc_min - __va(bank->start);
bank[1].start = __pa(vmalloc_min - 1) + 1;
bank[1].highmem = highmem = 1;
j++;
}
bank->size = vmalloc_min - __va(bank->start);
}
#else
bank->highmem = highmem;
/*
* Highmem banks not allowed with !CONFIG_HIGHMEM.
*/
if (highmem) {
printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
"(!CONFIG_HIGHMEM).\n",
(unsigned long long)bank->start,
(unsigned long long)bank->start + bank->size - 1);
continue;
}
/*
* Check whether this memory bank would entirely overlap
* the vmalloc area.
*/
if (__va(bank->start) >= vmalloc_min ||
__va(bank->start) < (void *)PAGE_OFFSET) {
printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
"(vmalloc region overlap).\n",
(unsigned long long)bank->start,
(unsigned long long)bank->start + bank->size - 1);
continue;
}
/*
* Check whether this memory bank would partially overlap
* the vmalloc area.
*/
if (__va(bank->start + bank->size) > vmalloc_min ||
__va(bank->start + bank->size) < __va(bank->start)) {
unsigned long newsize = vmalloc_min - __va(bank->start);
printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
"to -%.8llx (vmalloc region overlap).\n",
(unsigned long long)bank->start,
(unsigned long long)bank->start + bank->size - 1,
(unsigned long long)bank->start + newsize - 1);
bank->size = newsize;
}
#endif
if (!bank->highmem && bank->start + bank->size > lowmem_limit)
lowmem_limit = bank->start + bank->size;
j++;
}
#ifdef CONFIG_HIGHMEM
if (highmem) {
const char *reason = NULL;
if (cache_is_vipt_aliasing()) {
/*
* Interactions between kmap and other mappings
* make highmem support with aliasing VIPT caches
* rather difficult.
*/
reason = "with VIPT aliasing cache";
}
if (reason) {
printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
reason);
while (j > 0 && meminfo.bank[j - 1].highmem)
j--;
}
}
#endif
meminfo.nr_banks = j;
high_memory = __va(lowmem_limit - 1) + 1;
memblock_set_current_limit(lowmem_limit);
}
static inline void prepare_page_table(void)
{
unsigned long addr;
phys_addr_t end;
/*
* Clear out all the mappings below the kernel image.
*/
for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
#ifdef CONFIG_XIP_KERNEL
/* The XIP kernel is mapped in the module area -- skip over it */
addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
#endif
for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
* Find the end of the first block of lowmem.
*/
end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
if (end >= lowmem_limit)
end = lowmem_limit;
/*
* Clear out all the kernel space mappings, except for the first
* memory bank, up to the vmalloc region.
*/
for (addr = __phys_to_virt(end);
addr < VMALLOC_START; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
}
#ifdef CONFIG_ARM_LPAE
/* the first page is reserved for pgd */
#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
#else
#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#endif
/*
* Reserve the special regions of memory
*/
void __init arm_mm_memblock_reserve(void)
{
/*
* Reserve the page tables. These are already in use,
* and can only be in node 0.
*/
memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
#ifdef CONFIG_SA1111
/*
* Because of the SA1111 DMA bug, we want to preserve our
* precious DMA-able memory...
*/
memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
#endif
}
/*
* Set up the device mappings. Since we clear out the page tables for all
* mappings above VMALLOC_START, we will remove any debug device mappings.
* This means you have to be careful how you debug this function, or any
* called function. This means you can't use any function or debugging
* method which may touch any device, otherwise the kernel _will_ crash.
*/
static void __init devicemaps_init(struct machine_desc *mdesc)
{
struct map_desc map;
unsigned long addr;
void *vectors;
/*
* Allocate the vector page early.
*/
vectors = early_alloc(PAGE_SIZE);
early_trap_init(vectors);
for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
* Map the kernel if it is XIP.
* It is always first in the modulearea.
*/
#ifdef CONFIG_XIP_KERNEL
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
map.virtual = MODULES_VADDR;
map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
map.type = MT_ROM;
create_mapping(&map);
#endif
/*
* Map the cache flushing regions.
*/
#ifdef FLUSH_BASE
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
map.virtual = FLUSH_BASE;
map.length = SZ_1M;
map.type = MT_CACHECLEAN;
create_mapping(&map);
#endif
#ifdef FLUSH_BASE_MINICACHE
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
map.virtual = FLUSH_BASE_MINICACHE;
map.length = SZ_1M;
map.type = MT_MINICLEAN;
create_mapping(&map);
#endif
/*
* Create a mapping for the machine vectors at the high-vectors
* location (0xffff0000). If we aren't using high-vectors, also
* create a mapping at the low-vectors virtual address.
*/
map.pfn = __phys_to_pfn(virt_to_phys(vectors));
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
map.type = MT_HIGH_VECTORS;
create_mapping(&map, false);
if (!vectors_high()) {
map.virtual = 0;
map.type = MT_LOW_VECTORS;
create_mapping(&map, false);
}
/*
* Ask the machine support to map in the statically mapped devices.
*/
if (mdesc->map_io)
mdesc->map_io();
fill_pmd_gaps();
/*
* Finally flush the caches and tlb to ensure that we're in a
* consistent state wrt the writebuffer. This also ensures that
* any write-allocated cache lines in the vector page are written
* back. After this point, we can start to touch devices again.
*/
local_flush_tlb_all();
flush_cache_all();
}
static void __init kmap_init(void)
{
#ifdef CONFIG_HIGHMEM
pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
PKMAP_BASE, _PAGE_KERNEL_TABLE);
#endif
}
static void __init map_lowmem(void)
{
struct memblock_region *reg;
phys_addr_t start;
phys_addr_t end;
struct map_desc map;
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
start = reg->base;
end = start + reg->size;
if (end > lowmem_limit)
end = lowmem_limit;
if (start >= end)
break;
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY;
create_mapping(&map, false);
}
#ifdef CONFIG_DEBUG_RODATA
start = __pa(_stext) & PMD_MASK;
end = ALIGN(__pa(__end_rodata), PMD_SIZE);
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY;
create_mapping(&map, true);
#endif
}
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
*/
void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
memblock_set_current_limit(lowmem_limit);
build_mem_type_table();
prepare_page_table();
map_lowmem();
devicemaps_init(mdesc);
kmap_init();
top_pmd = pmd_off_k(0xffff0000);
/* allocate the zero page. */
zero_page = early_alloc(PAGE_SIZE);
bootmem_init();
empty_zero_page = virt_to_page(zero_page);
__flush_dcache_page(NULL, empty_zero_page);
}
| gpl-2.0 |
zhmz90/linux | drivers/staging/wlan-ng/prism2sta.c | 393 | 58217 | /* src/prism2/driver/prism2sta.c
*
* Implements the station functionality for prism2
*
* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
* --------------------------------------------------------------------
*
* linux-wlan
*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* Alternatively, the contents of this file may be used under the
* terms of the GNU Public License version 2 (the "GPL"), in which
* case the provisions of the GPL are applicable instead of the
* above. If you wish to allow the use of your version of this file
* only under the terms of the GPL and not to allow others to use
* your version of this file under the MPL, indicate your decision
* by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL. If you do not delete
* the provisions above, a recipient may use your version of this
* file under either the MPL or the GPL.
*
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
* made directly to:
*
* AbsoluteValue Systems Inc.
* info@linux-wlan.com
* http://www.linux-wlan.com
*
* --------------------------------------------------------------------
*
* Portions of the development of this software were funded by
* Intersil Corporation as part of PRISM(R) chipset product development.
*
* --------------------------------------------------------------------
*
* This file implements the module and linux pcmcia routines for the
* prism2 driver.
*
* --------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/wireless.h>
#include <linux/netdevice.h>
#include <linux/workqueue.h>
#include <linux/byteorder/generic.h>
#include <linux/etherdevice.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/bitops.h>
#include "p80211types.h"
#include "p80211hdr.h"
#include "p80211mgmt.h"
#include "p80211conv.h"
#include "p80211msg.h"
#include "p80211netdev.h"
#include "p80211req.h"
#include "p80211metadef.h"
#include "p80211metastruct.h"
#include "hfa384x.h"
#include "prism2mgmt.h"
static char *dev_info = "prism2_usb";
static wlandevice_t *create_wlan(void);
int prism2_reset_holdtime = 30; /* Reset hold time in ms */
int prism2_reset_settletime = 100; /* Reset settle time in ms */
static int prism2_doreset; /* Do a reset at init? */
module_param(prism2_doreset, int, 0644);
MODULE_PARM_DESC(prism2_doreset, "Issue a reset on initialization");
module_param(prism2_reset_holdtime, int, 0644);
MODULE_PARM_DESC(prism2_reset_holdtime, "reset hold time in ms");
module_param(prism2_reset_settletime, int, 0644);
MODULE_PARM_DESC(prism2_reset_settletime, "reset settle time in ms");
MODULE_LICENSE("Dual MPL/GPL");
static int prism2sta_open(wlandevice_t *wlandev);
static int prism2sta_close(wlandevice_t *wlandev);
static void prism2sta_reset(wlandevice_t *wlandev);
static int prism2sta_txframe(wlandevice_t *wlandev, struct sk_buff *skb,
union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep);
static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg);
static int prism2sta_getcardinfo(wlandevice_t *wlandev);
static int prism2sta_globalsetup(wlandevice_t *wlandev);
static int prism2sta_setmulticast(wlandevice_t *wlandev, netdevice_t *dev);
static void prism2sta_inf_handover(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf);
static void prism2sta_inf_tallies(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf);
static void prism2sta_inf_hostscanresults(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf);
static void prism2sta_inf_scanresults(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf);
static void prism2sta_inf_chinforesults(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf);
static void prism2sta_inf_linkstatus(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf);
static void prism2sta_inf_assocstatus(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf);
static void prism2sta_inf_authreq(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf);
static void prism2sta_inf_authreq_defer(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf);
static void prism2sta_inf_psusercnt(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf);
/*----------------------------------------------------------------
* prism2sta_open
*
* WLAN device open method. Called from p80211netdev when kernel
* device open (start) method is called in response to the
* SIOCSIIFFLAGS ioctl changing the flags bit IFF_UP
* from clear to set.
*
* Arguments:
* wlandev wlan device structure
*
* Returns:
* 0 success
* >0 f/w reported error
* <0 driver reported error
*
* Side effects:
*
* Call context:
* process thread
----------------------------------------------------------------*/
static int prism2sta_open(wlandevice_t *wlandev)
{
/* We don't currently have to do anything else.
* The setup of the MAC should be subsequently completed via
* the mlme commands.
* Higher layers know we're ready from dev->start==1 and
* dev->tbusy==0. Our rx path knows to pass up received/
* frames because of dev->flags&IFF_UP is true.
*/
return 0;
}
/*----------------------------------------------------------------
* prism2sta_close
*
* WLAN device close method. Called from p80211netdev when kernel
* device close method is called in response to the
* SIOCSIIFFLAGS ioctl changing the flags bit IFF_UP
* from set to clear.
*
* Arguments:
* wlandev wlan device structure
*
* Returns:
* 0 success
* >0 f/w reported error
* <0 driver reported error
*
* Side effects:
*
* Call context:
* process thread
----------------------------------------------------------------*/
static int prism2sta_close(wlandevice_t *wlandev)
{
/* We don't currently have to do anything else.
* Higher layers know we're not ready from dev->start==0 and
* dev->tbusy==1. Our rx path knows to not pass up received
* frames because of dev->flags&IFF_UP is false.
*/
return 0;
}
/*----------------------------------------------------------------
* prism2sta_reset
*
* Currently not implemented.
*
* Arguments:
* wlandev wlan device structure
* none
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* process thread
----------------------------------------------------------------*/
static void prism2sta_reset(wlandevice_t *wlandev)
{
}
/*----------------------------------------------------------------
* prism2sta_txframe
*
* Takes a frame from p80211 and queues it for transmission.
*
* Arguments:
* wlandev wlan device structure
* pb packet buffer struct. Contains an 802.11
* data frame.
* p80211_hdr points to the 802.11 header for the packet.
* Returns:
* 0 Success and more buffs available
* 1 Success but no more buffs
* 2 Allocation failure
* 4 Buffer full or queue busy
*
* Side effects:
*
* Call context:
* process thread
----------------------------------------------------------------*/
static int prism2sta_txframe(wlandevice_t *wlandev, struct sk_buff *skb,
union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
/* If necessary, set the 802.11 WEP bit */
if ((wlandev->hostwep & (HOSTWEP_PRIVACYINVOKED | HOSTWEP_ENCRYPT)) ==
HOSTWEP_PRIVACYINVOKED) {
p80211_hdr->a3.fc |= cpu_to_le16(WLAN_SET_FC_ISWEP(1));
}
return hfa384x_drvr_txframe(hw, skb, p80211_hdr, p80211_wep);
}
/*----------------------------------------------------------------
* prism2sta_mlmerequest
*
* wlan command message handler. All we do here is pass the message
* over to the prism2sta_mgmt_handler.
*
* Arguments:
* wlandev wlan device structure
* msg wlan command message
* Returns:
* 0 success
* <0 successful acceptance of message, but we're
* waiting for an async process to finish before
* we're done with the msg. When the asynch
* process is done, we'll call the p80211
* function p80211req_confirm() .
* >0 An error occurred while we were handling
* the message.
*
* Side effects:
*
* Call context:
* process thread
----------------------------------------------------------------*/
static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
int result = 0;
switch (msg->msgcode) {
case DIDmsg_dot11req_mibget:
pr_debug("Received mibget request\n");
result = prism2mgmt_mibset_mibget(wlandev, msg);
break;
case DIDmsg_dot11req_mibset:
pr_debug("Received mibset request\n");
result = prism2mgmt_mibset_mibget(wlandev, msg);
break;
case DIDmsg_dot11req_scan:
pr_debug("Received scan request\n");
result = prism2mgmt_scan(wlandev, msg);
break;
case DIDmsg_dot11req_scan_results:
pr_debug("Received scan_results request\n");
result = prism2mgmt_scan_results(wlandev, msg);
break;
case DIDmsg_dot11req_start:
pr_debug("Received mlme start request\n");
result = prism2mgmt_start(wlandev, msg);
break;
/*
* Prism2 specific messages
*/
case DIDmsg_p2req_readpda:
pr_debug("Received mlme readpda request\n");
result = prism2mgmt_readpda(wlandev, msg);
break;
case DIDmsg_p2req_ramdl_state:
pr_debug("Received mlme ramdl_state request\n");
result = prism2mgmt_ramdl_state(wlandev, msg);
break;
case DIDmsg_p2req_ramdl_write:
pr_debug("Received mlme ramdl_write request\n");
result = prism2mgmt_ramdl_write(wlandev, msg);
break;
case DIDmsg_p2req_flashdl_state:
pr_debug("Received mlme flashdl_state request\n");
result = prism2mgmt_flashdl_state(wlandev, msg);
break;
case DIDmsg_p2req_flashdl_write:
pr_debug("Received mlme flashdl_write request\n");
result = prism2mgmt_flashdl_write(wlandev, msg);
break;
/*
* Linux specific messages
*/
case DIDmsg_lnxreq_hostwep:
break; /* ignore me. */
case DIDmsg_lnxreq_ifstate:
{
struct p80211msg_lnxreq_ifstate *ifstatemsg;
pr_debug("Received mlme ifstate request\n");
ifstatemsg = (struct p80211msg_lnxreq_ifstate *) msg;
result =
prism2sta_ifstate(wlandev,
ifstatemsg->ifstate.data);
ifstatemsg->resultcode.status =
P80211ENUM_msgitem_status_data_ok;
ifstatemsg->resultcode.data = result;
result = 0;
}
break;
case DIDmsg_lnxreq_wlansniff:
pr_debug("Received mlme wlansniff request\n");
result = prism2mgmt_wlansniff(wlandev, msg);
break;
case DIDmsg_lnxreq_autojoin:
pr_debug("Received mlme autojoin request\n");
result = prism2mgmt_autojoin(wlandev, msg);
break;
case DIDmsg_lnxreq_commsquality:{
struct p80211msg_lnxreq_commsquality *qualmsg;
pr_debug("Received commsquality request\n");
qualmsg = (struct p80211msg_lnxreq_commsquality *) msg;
qualmsg->link.status =
P80211ENUM_msgitem_status_data_ok;
qualmsg->level.status =
P80211ENUM_msgitem_status_data_ok;
qualmsg->noise.status =
P80211ENUM_msgitem_status_data_ok;
qualmsg->link.data = le16_to_cpu(hw->qual.CQ_currBSS);
qualmsg->level.data = le16_to_cpu(hw->qual.ASL_currBSS);
qualmsg->noise.data = le16_to_cpu(hw->qual.ANL_currFC);
qualmsg->txrate.data = hw->txrate;
break;
}
default:
netdev_warn(wlandev->netdev,
"Unknown mgmt request message 0x%08x",
msg->msgcode);
break;
}
return result;
}
/*----------------------------------------------------------------
* prism2sta_ifstate
*
* Interface state. This is the primary WLAN interface enable/disable
* handler. Following the driver/load/deviceprobe sequence, this
* function must be called with a state of "enable" before any other
* commands will be accepted.
*
* Arguments:
* wlandev wlan device structure
* msgp ptr to msg buffer
*
* Returns:
* A p80211 message resultcode value.
*
* Side effects:
*
* Call context:
* process thread (usually)
* interrupt
----------------------------------------------------------------*/
u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
u32 result;
result = P80211ENUM_resultcode_implementation_failure;
pr_debug("Current MSD state(%d), requesting(%d)\n",
wlandev->msdstate, ifstate);
switch (ifstate) {
case P80211ENUM_ifstate_fwload:
switch (wlandev->msdstate) {
case WLAN_MSD_HWPRESENT:
wlandev->msdstate = WLAN_MSD_FWLOAD_PENDING;
/*
* Initialize the device+driver sufficiently
* for firmware loading.
*/
result = hfa384x_drvr_start(hw);
if (result) {
netdev_err(wlandev->netdev,
"hfa384x_drvr_start() failed,result=%d\n",
(int)result);
result =
P80211ENUM_resultcode_implementation_failure;
wlandev->msdstate = WLAN_MSD_HWPRESENT;
break;
}
wlandev->msdstate = WLAN_MSD_FWLOAD;
result = P80211ENUM_resultcode_success;
break;
case WLAN_MSD_FWLOAD:
hfa384x_cmd_initialize(hw);
result = P80211ENUM_resultcode_success;
break;
case WLAN_MSD_RUNNING:
netdev_warn(wlandev->netdev,
"Cannot enter fwload state from enable state, you must disable first.\n");
result = P80211ENUM_resultcode_invalid_parameters;
break;
case WLAN_MSD_HWFAIL:
default:
/* probe() had a problem or the msdstate contains
* an unrecognized value, there's nothing we can do.
*/
result = P80211ENUM_resultcode_implementation_failure;
break;
}
break;
case P80211ENUM_ifstate_enable:
switch (wlandev->msdstate) {
case WLAN_MSD_HWPRESENT:
case WLAN_MSD_FWLOAD:
wlandev->msdstate = WLAN_MSD_RUNNING_PENDING;
/* Initialize the device+driver for full
* operation. Note that this might me an FWLOAD to
* to RUNNING transition so we must not do a chip
* or board level reset. Note that on failure,
* the MSD state is set to HWPRESENT because we
* can't make any assumptions about the state
* of the hardware or a previous firmware load.
*/
result = hfa384x_drvr_start(hw);
if (result) {
netdev_err(wlandev->netdev,
"hfa384x_drvr_start() failed,result=%d\n",
(int)result);
result =
P80211ENUM_resultcode_implementation_failure;
wlandev->msdstate = WLAN_MSD_HWPRESENT;
break;
}
result = prism2sta_getcardinfo(wlandev);
if (result) {
netdev_err(wlandev->netdev,
"prism2sta_getcardinfo() failed,result=%d\n",
(int)result);
result =
P80211ENUM_resultcode_implementation_failure;
hfa384x_drvr_stop(hw);
wlandev->msdstate = WLAN_MSD_HWPRESENT;
break;
}
result = prism2sta_globalsetup(wlandev);
if (result) {
netdev_err(wlandev->netdev,
"prism2sta_globalsetup() failed,result=%d\n",
(int)result);
result =
P80211ENUM_resultcode_implementation_failure;
hfa384x_drvr_stop(hw);
wlandev->msdstate = WLAN_MSD_HWPRESENT;
break;
}
wlandev->msdstate = WLAN_MSD_RUNNING;
hw->join_ap = 0;
hw->join_retries = 60;
result = P80211ENUM_resultcode_success;
break;
case WLAN_MSD_RUNNING:
/* Do nothing, we're already in this state. */
result = P80211ENUM_resultcode_success;
break;
case WLAN_MSD_HWFAIL:
default:
/* probe() had a problem or the msdstate contains
* an unrecognized value, there's nothing we can do.
*/
result = P80211ENUM_resultcode_implementation_failure;
break;
}
break;
case P80211ENUM_ifstate_disable:
switch (wlandev->msdstate) {
case WLAN_MSD_HWPRESENT:
/* Do nothing, we're already in this state. */
result = P80211ENUM_resultcode_success;
break;
case WLAN_MSD_FWLOAD:
case WLAN_MSD_RUNNING:
wlandev->msdstate = WLAN_MSD_HWPRESENT_PENDING;
/*
* TODO: Shut down the MAC completely. Here a chip
* or board level reset is probably called for.
* After a "disable" _all_ results are lost, even
* those from a fwload.
*/
if (!wlandev->hwremoved)
netif_carrier_off(wlandev->netdev);
hfa384x_drvr_stop(hw);
wlandev->macmode = WLAN_MACMODE_NONE;
wlandev->msdstate = WLAN_MSD_HWPRESENT;
result = P80211ENUM_resultcode_success;
break;
case WLAN_MSD_HWFAIL:
default:
/* probe() had a problem or the msdstate contains
* an unrecognized value, there's nothing we can do.
*/
result = P80211ENUM_resultcode_implementation_failure;
break;
}
break;
default:
result = P80211ENUM_resultcode_invalid_parameters;
break;
}
return result;
}
/*----------------------------------------------------------------
* prism2sta_getcardinfo
*
* Collect the NICID, firmware version and any other identifiers
* we'd like to have in host-side data structures.
*
* Arguments:
* wlandev wlan device structure
*
* Returns:
* 0 success
* >0 f/w reported error
* <0 driver reported error
*
* Side effects:
*
* Call context:
* Either.
----------------------------------------------------------------*/
static int prism2sta_getcardinfo(wlandevice_t *wlandev)
{
int result = 0;
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
u16 temp;
u8 snum[HFA384x_RID_NICSERIALNUMBER_LEN];
/* Collect version and compatibility info */
/* Some are critical, some are not */
/* NIC identity */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_NICIDENTITY,
&hw->ident_nic,
sizeof(hfa384x_compident_t));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve NICIDENTITY\n");
goto failed;
}
/* get all the nic id fields in host byte order */
hw->ident_nic.id = le16_to_cpu(hw->ident_nic.id);
hw->ident_nic.variant = le16_to_cpu(hw->ident_nic.variant);
hw->ident_nic.major = le16_to_cpu(hw->ident_nic.major);
hw->ident_nic.minor = le16_to_cpu(hw->ident_nic.minor);
netdev_info(wlandev->netdev, "ident: nic h/w: id=0x%02x %d.%d.%d\n",
hw->ident_nic.id, hw->ident_nic.major,
hw->ident_nic.minor, hw->ident_nic.variant);
/* Primary f/w identity */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRIIDENTITY,
&hw->ident_pri_fw,
sizeof(hfa384x_compident_t));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve PRIIDENTITY\n");
goto failed;
}
/* get all the private fw id fields in host byte order */
hw->ident_pri_fw.id = le16_to_cpu(hw->ident_pri_fw.id);
hw->ident_pri_fw.variant = le16_to_cpu(hw->ident_pri_fw.variant);
hw->ident_pri_fw.major = le16_to_cpu(hw->ident_pri_fw.major);
hw->ident_pri_fw.minor = le16_to_cpu(hw->ident_pri_fw.minor);
netdev_info(wlandev->netdev, "ident: pri f/w: id=0x%02x %d.%d.%d\n",
hw->ident_pri_fw.id, hw->ident_pri_fw.major,
hw->ident_pri_fw.minor, hw->ident_pri_fw.variant);
/* Station (Secondary?) f/w identity */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STAIDENTITY,
&hw->ident_sta_fw,
sizeof(hfa384x_compident_t));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve STAIDENTITY\n");
goto failed;
}
if (hw->ident_nic.id < 0x8000) {
netdev_err(wlandev->netdev,
"FATAL: Card is not an Intersil Prism2/2.5/3\n");
result = -1;
goto failed;
}
/* get all the station fw id fields in host byte order */
hw->ident_sta_fw.id = le16_to_cpu(hw->ident_sta_fw.id);
hw->ident_sta_fw.variant = le16_to_cpu(hw->ident_sta_fw.variant);
hw->ident_sta_fw.major = le16_to_cpu(hw->ident_sta_fw.major);
hw->ident_sta_fw.minor = le16_to_cpu(hw->ident_sta_fw.minor);
/* strip out the 'special' variant bits */
hw->mm_mods = hw->ident_sta_fw.variant & (BIT(14) | BIT(15));
hw->ident_sta_fw.variant &= ~((u16) (BIT(14) | BIT(15)));
if (hw->ident_sta_fw.id == 0x1f) {
netdev_info(wlandev->netdev,
"ident: sta f/w: id=0x%02x %d.%d.%d\n",
hw->ident_sta_fw.id, hw->ident_sta_fw.major,
hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
} else {
netdev_info(wlandev->netdev,
"ident: ap f/w: id=0x%02x %d.%d.%d\n",
hw->ident_sta_fw.id, hw->ident_sta_fw.major,
hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
netdev_err(wlandev->netdev, "Unsupported Tertiary AP firmware loaded!\n");
goto failed;
}
/* Compatibility range, Modem supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_MFISUPRANGE,
&hw->cap_sup_mfi,
sizeof(hfa384x_caplevel_t));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve MFISUPRANGE\n");
goto failed;
}
/* get all the Compatibility range, modem interface supplier
fields in byte order */
hw->cap_sup_mfi.role = le16_to_cpu(hw->cap_sup_mfi.role);
hw->cap_sup_mfi.id = le16_to_cpu(hw->cap_sup_mfi.id);
hw->cap_sup_mfi.variant = le16_to_cpu(hw->cap_sup_mfi.variant);
hw->cap_sup_mfi.bottom = le16_to_cpu(hw->cap_sup_mfi.bottom);
hw->cap_sup_mfi.top = le16_to_cpu(hw->cap_sup_mfi.top);
netdev_info(wlandev->netdev,
"MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_mfi.role, hw->cap_sup_mfi.id,
hw->cap_sup_mfi.variant, hw->cap_sup_mfi.bottom,
hw->cap_sup_mfi.top);
/* Compatibility range, Controller supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CFISUPRANGE,
&hw->cap_sup_cfi,
sizeof(hfa384x_caplevel_t));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve CFISUPRANGE\n");
goto failed;
}
/* get all the Compatibility range, controller interface supplier
fields in byte order */
hw->cap_sup_cfi.role = le16_to_cpu(hw->cap_sup_cfi.role);
hw->cap_sup_cfi.id = le16_to_cpu(hw->cap_sup_cfi.id);
hw->cap_sup_cfi.variant = le16_to_cpu(hw->cap_sup_cfi.variant);
hw->cap_sup_cfi.bottom = le16_to_cpu(hw->cap_sup_cfi.bottom);
hw->cap_sup_cfi.top = le16_to_cpu(hw->cap_sup_cfi.top);
netdev_info(wlandev->netdev,
"CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_cfi.role, hw->cap_sup_cfi.id,
hw->cap_sup_cfi.variant, hw->cap_sup_cfi.bottom,
hw->cap_sup_cfi.top);
/* Compatibility range, Primary f/w supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRISUPRANGE,
&hw->cap_sup_pri,
sizeof(hfa384x_caplevel_t));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve PRISUPRANGE\n");
goto failed;
}
/* get all the Compatibility range, primary firmware supplier
fields in byte order */
hw->cap_sup_pri.role = le16_to_cpu(hw->cap_sup_pri.role);
hw->cap_sup_pri.id = le16_to_cpu(hw->cap_sup_pri.id);
hw->cap_sup_pri.variant = le16_to_cpu(hw->cap_sup_pri.variant);
hw->cap_sup_pri.bottom = le16_to_cpu(hw->cap_sup_pri.bottom);
hw->cap_sup_pri.top = le16_to_cpu(hw->cap_sup_pri.top);
netdev_info(wlandev->netdev,
"PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_pri.role, hw->cap_sup_pri.id,
hw->cap_sup_pri.variant, hw->cap_sup_pri.bottom,
hw->cap_sup_pri.top);
/* Compatibility range, Station f/w supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STASUPRANGE,
&hw->cap_sup_sta,
sizeof(hfa384x_caplevel_t));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve STASUPRANGE\n");
goto failed;
}
/* get all the Compatibility range, station firmware supplier
fields in byte order */
hw->cap_sup_sta.role = le16_to_cpu(hw->cap_sup_sta.role);
hw->cap_sup_sta.id = le16_to_cpu(hw->cap_sup_sta.id);
hw->cap_sup_sta.variant = le16_to_cpu(hw->cap_sup_sta.variant);
hw->cap_sup_sta.bottom = le16_to_cpu(hw->cap_sup_sta.bottom);
hw->cap_sup_sta.top = le16_to_cpu(hw->cap_sup_sta.top);
if (hw->cap_sup_sta.id == 0x04) {
netdev_info(wlandev->netdev,
"STA:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_sta.role, hw->cap_sup_sta.id,
hw->cap_sup_sta.variant, hw->cap_sup_sta.bottom,
hw->cap_sup_sta.top);
} else {
netdev_info(wlandev->netdev,
"AP:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_sta.role, hw->cap_sup_sta.id,
hw->cap_sup_sta.variant, hw->cap_sup_sta.bottom,
hw->cap_sup_sta.top);
}
/* Compatibility range, primary f/w actor, CFI supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRI_CFIACTRANGES,
&hw->cap_act_pri_cfi,
sizeof(hfa384x_caplevel_t));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve PRI_CFIACTRANGES\n");
goto failed;
}
/* get all the Compatibility range, primary f/w actor, CFI supplier
fields in byte order */
hw->cap_act_pri_cfi.role = le16_to_cpu(hw->cap_act_pri_cfi.role);
hw->cap_act_pri_cfi.id = le16_to_cpu(hw->cap_act_pri_cfi.id);
hw->cap_act_pri_cfi.variant = le16_to_cpu(hw->cap_act_pri_cfi.variant);
hw->cap_act_pri_cfi.bottom = le16_to_cpu(hw->cap_act_pri_cfi.bottom);
hw->cap_act_pri_cfi.top = le16_to_cpu(hw->cap_act_pri_cfi.top);
netdev_info(wlandev->netdev,
"PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_act_pri_cfi.role, hw->cap_act_pri_cfi.id,
hw->cap_act_pri_cfi.variant, hw->cap_act_pri_cfi.bottom,
hw->cap_act_pri_cfi.top);
/* Compatibility range, sta f/w actor, CFI supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_CFIACTRANGES,
&hw->cap_act_sta_cfi,
sizeof(hfa384x_caplevel_t));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve STA_CFIACTRANGES\n");
goto failed;
}
/* get all the Compatibility range, station f/w actor, CFI supplier
fields in byte order */
hw->cap_act_sta_cfi.role = le16_to_cpu(hw->cap_act_sta_cfi.role);
hw->cap_act_sta_cfi.id = le16_to_cpu(hw->cap_act_sta_cfi.id);
hw->cap_act_sta_cfi.variant = le16_to_cpu(hw->cap_act_sta_cfi.variant);
hw->cap_act_sta_cfi.bottom = le16_to_cpu(hw->cap_act_sta_cfi.bottom);
hw->cap_act_sta_cfi.top = le16_to_cpu(hw->cap_act_sta_cfi.top);
netdev_info(wlandev->netdev,
"STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_act_sta_cfi.role, hw->cap_act_sta_cfi.id,
hw->cap_act_sta_cfi.variant, hw->cap_act_sta_cfi.bottom,
hw->cap_act_sta_cfi.top);
/* Compatibility range, sta f/w actor, MFI supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_MFIACTRANGES,
&hw->cap_act_sta_mfi,
sizeof(hfa384x_caplevel_t));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve STA_MFIACTRANGES\n");
goto failed;
}
/* get all the Compatibility range, station f/w actor, MFI supplier
fields in byte order */
hw->cap_act_sta_mfi.role = le16_to_cpu(hw->cap_act_sta_mfi.role);
hw->cap_act_sta_mfi.id = le16_to_cpu(hw->cap_act_sta_mfi.id);
hw->cap_act_sta_mfi.variant = le16_to_cpu(hw->cap_act_sta_mfi.variant);
hw->cap_act_sta_mfi.bottom = le16_to_cpu(hw->cap_act_sta_mfi.bottom);
hw->cap_act_sta_mfi.top = le16_to_cpu(hw->cap_act_sta_mfi.top);
netdev_info(wlandev->netdev,
"STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_act_sta_mfi.role, hw->cap_act_sta_mfi.id,
hw->cap_act_sta_mfi.variant, hw->cap_act_sta_mfi.bottom,
hw->cap_act_sta_mfi.top);
/* Serial Number */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_NICSERIALNUMBER,
snum, HFA384x_RID_NICSERIALNUMBER_LEN);
if (!result) {
netdev_info(wlandev->netdev, "Prism2 card SN: %*pEhp\n",
HFA384x_RID_NICSERIALNUMBER_LEN, snum);
} else {
netdev_err(wlandev->netdev, "Failed to retrieve Prism2 Card SN\n");
goto failed;
}
/* Collect the MAC address */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CNFOWNMACADDR,
wlandev->netdev->dev_addr, ETH_ALEN);
if (result != 0) {
netdev_err(wlandev->netdev, "Failed to retrieve mac address\n");
goto failed;
}
/* short preamble is always implemented */
wlandev->nsdcaps |= P80211_NSDCAP_SHORT_PREAMBLE;
/* find out if hardware wep is implemented */
hfa384x_drvr_getconfig16(hw, HFA384x_RID_PRIVACYOPTIMP, &temp);
if (temp)
wlandev->nsdcaps |= P80211_NSDCAP_HARDWAREWEP;
/* get the dBm Scaling constant */
hfa384x_drvr_getconfig16(hw, HFA384x_RID_CNFDBMADJUST, &temp);
hw->dbmadjust = temp;
/* Only enable scan by default on newer firmware */
if (HFA384x_FIRMWARE_VERSION(hw->ident_sta_fw.major,
hw->ident_sta_fw.minor,
hw->ident_sta_fw.variant) <
HFA384x_FIRMWARE_VERSION(1, 5, 5)) {
wlandev->nsdcaps |= P80211_NSDCAP_NOSCAN;
}
/* TODO: Set any internally managed config items */
goto done;
failed:
netdev_err(wlandev->netdev, "Failed, result=%d\n", result);
done:
return result;
}
/*----------------------------------------------------------------
* prism2sta_globalsetup
*
* Set any global RIDs that we want to set at device activation.
*
* Arguments:
* wlandev wlan device structure
*
* Returns:
* 0 success
* >0 f/w reported error
* <0 driver reported error
*
* Side effects:
*
* Call context:
* process thread
----------------------------------------------------------------*/
static int prism2sta_globalsetup(wlandevice_t *wlandev)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
/* Set the maximum frame size */
return hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFMAXDATALEN,
WLAN_DATA_MAXLEN);
}
static int prism2sta_setmulticast(wlandevice_t *wlandev, netdevice_t *dev)
{
int result = 0;
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
u16 promisc;
/* If we're not ready, what's the point? */
if (hw->state != HFA384x_STATE_RUNNING)
goto exit;
if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
promisc = P80211ENUM_truth_true;
else
promisc = P80211ENUM_truth_false;
result =
hfa384x_drvr_setconfig16_async(hw, HFA384x_RID_PROMISCMODE,
promisc);
exit:
return result;
}
/*----------------------------------------------------------------
* prism2sta_inf_handover
*
* Handles the receipt of a Handover info frame. Should only be present
* in APs only.
*
* Arguments:
* wlandev wlan device structure
* inf ptr to info frame (contents in hfa384x order)
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
----------------------------------------------------------------*/
static void prism2sta_inf_handover(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
pr_debug("received infoframe:HANDOVER (unhandled)\n");
}
/*----------------------------------------------------------------
* prism2sta_inf_tallies
*
* Handles the receipt of a CommTallies info frame.
*
* Arguments:
* wlandev wlan device structure
* inf ptr to info frame (contents in hfa384x order)
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
----------------------------------------------------------------*/
static void prism2sta_inf_tallies(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
u16 *src16;
u32 *dst;
u32 *src32;
int i;
int cnt;
/*
** Determine if these are 16-bit or 32-bit tallies, based on the
** record length of the info record.
*/
cnt = sizeof(hfa384x_CommTallies32_t) / sizeof(u32);
if (inf->framelen > 22) {
dst = (u32 *) &hw->tallies;
src32 = (u32 *) &inf->info.commtallies32;
for (i = 0; i < cnt; i++, dst++, src32++)
*dst += le32_to_cpu(*src32);
} else {
dst = (u32 *) &hw->tallies;
src16 = (u16 *) &inf->info.commtallies16;
for (i = 0; i < cnt; i++, dst++, src16++)
*dst += le16_to_cpu(*src16);
}
}
/*----------------------------------------------------------------
* prism2sta_inf_scanresults
*
* Handles the receipt of a Scan Results info frame.
*
* Arguments:
* wlandev wlan device structure
* inf ptr to info frame (contents in hfa384x order)
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
----------------------------------------------------------------*/
static void prism2sta_inf_scanresults(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
int nbss;
hfa384x_ScanResult_t *sr = &(inf->info.scanresult);
int i;
hfa384x_JoinRequest_data_t joinreq;
int result;
/* Get the number of results, first in bytes, then in results */
nbss = (inf->framelen * sizeof(u16)) -
sizeof(inf->infotype) - sizeof(inf->info.scanresult.scanreason);
nbss /= sizeof(hfa384x_ScanResultSub_t);
/* Print em */
pr_debug("rx scanresults, reason=%d, nbss=%d:\n",
inf->info.scanresult.scanreason, nbss);
for (i = 0; i < nbss; i++) {
pr_debug("chid=%d anl=%d sl=%d bcnint=%d\n",
sr->result[i].chid,
sr->result[i].anl,
sr->result[i].sl, sr->result[i].bcnint);
pr_debug(" capinfo=0x%04x proberesp_rate=%d\n",
sr->result[i].capinfo, sr->result[i].proberesp_rate);
}
/* issue a join request */
joinreq.channel = sr->result[0].chid;
memcpy(joinreq.bssid, sr->result[0].bssid, WLAN_BSSID_LEN);
result = hfa384x_drvr_setconfig(hw,
HFA384x_RID_JOINREQUEST,
&joinreq, HFA384x_RID_JOINREQUEST_LEN);
if (result) {
netdev_err(wlandev->netdev, "setconfig(joinreq) failed, result=%d\n",
result);
}
}
/*----------------------------------------------------------------
* prism2sta_inf_hostscanresults
*
* Handles the receipt of a Scan Results info frame.
*
* Arguments:
* wlandev wlan device structure
* inf ptr to info frame (contents in hfa384x order)
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
----------------------------------------------------------------*/
static void prism2sta_inf_hostscanresults(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
int nbss;
nbss = (inf->framelen - 3) / 32;
pr_debug("Received %d hostscan results\n", nbss);
if (nbss > 32)
nbss = 32;
kfree(hw->scanresults);
hw->scanresults = kmemdup(inf, sizeof(hfa384x_InfFrame_t), GFP_ATOMIC);
if (nbss == 0)
nbss = -1;
/* Notify/wake the sleeping caller. */
hw->scanflag = nbss;
wake_up_interruptible(&hw->cmdq);
};
/*----------------------------------------------------------------
* prism2sta_inf_chinforesults
*
* Handles the receipt of a Channel Info Results info frame.
*
* Arguments:
* wlandev wlan device structure
* inf ptr to info frame (contents in hfa384x order)
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
----------------------------------------------------------------*/
static void prism2sta_inf_chinforesults(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
unsigned int i, n;
hw->channel_info.results.scanchannels =
le16_to_cpu(inf->info.chinforesult.scanchannels);
for (i = 0, n = 0; i < HFA384x_CHINFORESULT_MAX; i++) {
hfa384x_ChInfoResultSub_t *result;
hfa384x_ChInfoResultSub_t *chinforesult;
int chan;
if (!(hw->channel_info.results.scanchannels & (1 << i)))
continue;
result = &inf->info.chinforesult.result[n];
chan = le16_to_cpu(result->chid) - 1;
if (chan < 0 || chan >= HFA384x_CHINFORESULT_MAX)
continue;
chinforesult = &hw->channel_info.results.result[chan];
chinforesult->chid = chan;
chinforesult->anl = le16_to_cpu(result->anl);
chinforesult->pnl = le16_to_cpu(result->pnl);
chinforesult->active = le16_to_cpu(result->active);
pr_debug("chinfo: channel %d, %s level (avg/peak)=%d/%d dB, pcf %d\n",
chan + 1,
(chinforesult->active & HFA384x_CHINFORESULT_BSSACTIVE)
? "signal" : "noise",
chinforesult->anl, chinforesult->pnl,
(chinforesult->active & HFA384x_CHINFORESULT_PCFACTIVE)
? 1 : 0);
n++;
}
atomic_set(&hw->channel_info.done, 2);
hw->channel_info.count = n;
}
void prism2sta_processing_defer(struct work_struct *data)
{
hfa384x_t *hw = container_of(data, struct hfa384x, link_bh);
wlandevice_t *wlandev = hw->wlandev;
hfa384x_bytestr32_t ssid;
int result;
/* First let's process the auth frames */
{
struct sk_buff *skb;
hfa384x_InfFrame_t *inf;
while ((skb = skb_dequeue(&hw->authq))) {
inf = (hfa384x_InfFrame_t *) skb->data;
prism2sta_inf_authreq_defer(wlandev, inf);
}
}
/* Now let's handle the linkstatus stuff */
if (hw->link_status == hw->link_status_new)
return;
hw->link_status = hw->link_status_new;
switch (hw->link_status) {
case HFA384x_LINK_NOTCONNECTED:
/* I'm currently assuming that this is the initial link
* state. It should only be possible immediately
* following an Enable command.
* Response:
* Block Transmits, Ignore receives of data frames
*/
netif_carrier_off(wlandev->netdev);
netdev_info(wlandev->netdev, "linkstatus=NOTCONNECTED (unhandled)\n");
break;
case HFA384x_LINK_CONNECTED:
/* This one indicates a successful scan/join/auth/assoc.
* When we have the full MLME complement, this event will
* signify successful completion of both mlme_authenticate
* and mlme_associate. State management will get a little
* ugly here.
* Response:
* Indicate authentication and/or association
* Enable Transmits, Receives and pass up data frames
*/
netif_carrier_on(wlandev->netdev);
/* If we are joining a specific AP, set our
* state and reset retries
*/
if (hw->join_ap == 1)
hw->join_ap = 2;
hw->join_retries = 60;
/* Don't call this in monitor mode */
if (wlandev->netdev->type == ARPHRD_ETHER) {
u16 portstatus;
netdev_info(wlandev->netdev, "linkstatus=CONNECTED\n");
/* For non-usb devices, we can use the sync versions */
/* Collect the BSSID, and set state to allow tx */
result = hfa384x_drvr_getconfig(hw,
HFA384x_RID_CURRENTBSSID,
wlandev->bssid,
WLAN_BSSID_LEN);
if (result) {
pr_debug
("getconfig(0x%02x) failed, result = %d\n",
HFA384x_RID_CURRENTBSSID, result);
return;
}
result = hfa384x_drvr_getconfig(hw,
HFA384x_RID_CURRENTSSID,
&ssid, sizeof(ssid));
if (result) {
pr_debug
("getconfig(0x%02x) failed, result = %d\n",
HFA384x_RID_CURRENTSSID, result);
return;
}
prism2mgmt_bytestr2pstr(
(struct hfa384x_bytestr *) &ssid,
(p80211pstrd_t *) &wlandev->ssid);
/* Collect the port status */
result = hfa384x_drvr_getconfig16(hw,
HFA384x_RID_PORTSTATUS,
&portstatus);
if (result) {
pr_debug
("getconfig(0x%02x) failed, result = %d\n",
HFA384x_RID_PORTSTATUS, result);
return;
}
wlandev->macmode =
(portstatus == HFA384x_PSTATUS_CONN_IBSS) ?
WLAN_MACMODE_IBSS_STA : WLAN_MACMODE_ESS_STA;
/* signal back up to cfg80211 layer */
prism2_connect_result(wlandev, P80211ENUM_truth_false);
/* Get the ball rolling on the comms quality stuff */
prism2sta_commsqual_defer(&hw->commsqual_bh);
}
break;
case HFA384x_LINK_DISCONNECTED:
/* This one indicates that our association is gone. We've
* lost connection with the AP and/or been disassociated.
* This indicates that the MAC has completely cleared it's
* associated state. We * should send a deauth indication
* (implying disassoc) up * to the MLME.
* Response:
* Indicate Deauthentication
* Block Transmits, Ignore receives of data frames
*/
if (wlandev->netdev->type == ARPHRD_ETHER)
netdev_info(wlandev->netdev,
"linkstatus=DISCONNECTED (unhandled)\n");
wlandev->macmode = WLAN_MACMODE_NONE;
netif_carrier_off(wlandev->netdev);
/* signal back up to cfg80211 layer */
prism2_disconnected(wlandev);
break;
case HFA384x_LINK_AP_CHANGE:
/* This one indicates that the MAC has decided to and
* successfully completed a change to another AP. We
* should probably implement a reassociation indication
* in response to this one. I'm thinking that the the
* p80211 layer needs to be notified in case of
* buffering/queueing issues. User mode also needs to be
* notified so that any BSS dependent elements can be
* updated.
* associated state. We * should send a deauth indication
* (implying disassoc) up * to the MLME.
* Response:
* Indicate Reassociation
* Enable Transmits, Receives and pass up data frames
*/
netdev_info(wlandev->netdev, "linkstatus=AP_CHANGE\n");
result = hfa384x_drvr_getconfig(hw,
HFA384x_RID_CURRENTBSSID,
wlandev->bssid, WLAN_BSSID_LEN);
if (result) {
pr_debug("getconfig(0x%02x) failed, result = %d\n",
HFA384x_RID_CURRENTBSSID, result);
return;
}
result = hfa384x_drvr_getconfig(hw,
HFA384x_RID_CURRENTSSID,
&ssid, sizeof(ssid));
if (result) {
pr_debug("getconfig(0x%02x) failed, result = %d\n",
HFA384x_RID_CURRENTSSID, result);
return;
}
prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *) &ssid,
(p80211pstrd_t *) &wlandev->ssid);
hw->link_status = HFA384x_LINK_CONNECTED;
netif_carrier_on(wlandev->netdev);
/* signal back up to cfg80211 layer */
prism2_roamed(wlandev);
break;
case HFA384x_LINK_AP_OUTOFRANGE:
/* This one indicates that the MAC has decided that the
* AP is out of range, but hasn't found a better candidate
* so the MAC maintains its "associated" state in case
* we get back in range. We should block transmits and
* receives in this state. Do we need an indication here?
* Probably not since a polling user-mode element would
* get this status from from p2PortStatus(FD40). What about
* p80211?
* Response:
* Block Transmits, Ignore receives of data frames
*/
netdev_info(wlandev->netdev, "linkstatus=AP_OUTOFRANGE (unhandled)\n");
netif_carrier_off(wlandev->netdev);
break;
case HFA384x_LINK_AP_INRANGE:
/* This one indicates that the MAC has decided that the
* AP is back in range. We continue working with our
* existing association.
* Response:
* Enable Transmits, Receives and pass up data frames
*/
netdev_info(wlandev->netdev, "linkstatus=AP_INRANGE\n");
hw->link_status = HFA384x_LINK_CONNECTED;
netif_carrier_on(wlandev->netdev);
break;
case HFA384x_LINK_ASSOCFAIL:
/* This one is actually a peer to CONNECTED. We've
* requested a join for a given SSID and optionally BSSID.
* We can use this one to indicate authentication and
* association failures. The trick is going to be
* 1) identifying the failure, and 2) state management.
* Response:
* Disable Transmits, Ignore receives of data frames
*/
if (hw->join_ap && --hw->join_retries > 0) {
hfa384x_JoinRequest_data_t joinreq;
joinreq = hw->joinreq;
/* Send the join request */
hfa384x_drvr_setconfig(hw,
HFA384x_RID_JOINREQUEST,
&joinreq,
HFA384x_RID_JOINREQUEST_LEN);
netdev_info(wlandev->netdev,
"linkstatus=ASSOCFAIL (re-submitting join)\n");
} else {
netdev_info(wlandev->netdev, "linkstatus=ASSOCFAIL (unhandled)\n");
}
netif_carrier_off(wlandev->netdev);
/* signal back up to cfg80211 layer */
prism2_connect_result(wlandev, P80211ENUM_truth_true);
break;
default:
/* This is bad, IO port problems? */
netdev_warn(wlandev->netdev,
"unknown linkstatus=0x%02x\n", hw->link_status);
return;
}
wlandev->linkstatus = (hw->link_status == HFA384x_LINK_CONNECTED);
}
/*----------------------------------------------------------------
* prism2sta_inf_linkstatus
*
* Handles the receipt of a Link Status info frame.
*
* Arguments:
* wlandev wlan device structure
* inf ptr to info frame (contents in hfa384x order)
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
----------------------------------------------------------------*/
static void prism2sta_inf_linkstatus(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
hw->link_status_new = le16_to_cpu(inf->info.linkstatus.linkstatus);
schedule_work(&hw->link_bh);
}
/*----------------------------------------------------------------
* prism2sta_inf_assocstatus
*
* Handles the receipt of an Association Status info frame. Should
* be present in APs only.
*
* Arguments:
* wlandev wlan device structure
* inf ptr to info frame (contents in hfa384x order)
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
----------------------------------------------------------------*/
static void prism2sta_inf_assocstatus(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
hfa384x_AssocStatus_t rec;
int i;
memcpy(&rec, &inf->info.assocstatus, sizeof(rec));
rec.assocstatus = le16_to_cpu(rec.assocstatus);
rec.reason = le16_to_cpu(rec.reason);
/*
** Find the address in the list of authenticated stations.
** If it wasn't found, then this address has not been previously
** authenticated and something weird has happened if this is
** anything other than an "authentication failed" message.
** If the address was found, then set the "associated" flag for
** that station, based on whether the station is associating or
** losing its association. Something weird has also happened
** if we find the address in the list of authenticated stations
** but we are getting an "authentication failed" message.
*/
for (i = 0; i < hw->authlist.cnt; i++)
if (memcmp(rec.sta_addr, hw->authlist.addr[i], ETH_ALEN) == 0)
break;
if (i >= hw->authlist.cnt) {
if (rec.assocstatus != HFA384x_ASSOCSTATUS_AUTHFAIL)
netdev_warn(wlandev->netdev,
"assocstatus info frame received for non-authenticated station.\n");
} else {
hw->authlist.assoc[i] =
(rec.assocstatus == HFA384x_ASSOCSTATUS_STAASSOC ||
rec.assocstatus == HFA384x_ASSOCSTATUS_REASSOC);
if (rec.assocstatus == HFA384x_ASSOCSTATUS_AUTHFAIL)
netdev_warn(wlandev->netdev,
"authfail assocstatus info frame received for authenticated station.\n");
}
}
/*----------------------------------------------------------------
* prism2sta_inf_authreq
*
* Handles the receipt of an Authentication Request info frame. Should
* be present in APs only.
*
* Arguments:
* wlandev wlan device structure
* inf ptr to info frame (contents in hfa384x order)
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
*
----------------------------------------------------------------*/
static void prism2sta_inf_authreq(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
struct sk_buff *skb;
skb = dev_alloc_skb(sizeof(*inf));
if (skb) {
skb_put(skb, sizeof(*inf));
memcpy(skb->data, inf, sizeof(*inf));
skb_queue_tail(&hw->authq, skb);
schedule_work(&hw->link_bh);
}
}
static void prism2sta_inf_authreq_defer(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
hfa384x_authenticateStation_data_t rec;
int i, added, result, cnt;
u8 *addr;
/*
** Build the AuthenticateStation record. Initialize it for denying
** authentication.
*/
ether_addr_copy(rec.address, inf->info.authreq.sta_addr);
rec.status = P80211ENUM_status_unspec_failure;
/*
** Authenticate based on the access mode.
*/
switch (hw->accessmode) {
case WLAN_ACCESS_NONE:
/*
** Deny all new authentications. However, if a station
** is ALREADY authenticated, then accept it.
*/
for (i = 0; i < hw->authlist.cnt; i++)
if (memcmp(rec.address, hw->authlist.addr[i],
ETH_ALEN) == 0) {
rec.status = P80211ENUM_status_successful;
break;
}
break;
case WLAN_ACCESS_ALL:
/*
** Allow all authentications.
*/
rec.status = P80211ENUM_status_successful;
break;
case WLAN_ACCESS_ALLOW:
/*
** Only allow the authentication if the MAC address
** is in the list of allowed addresses.
**
** Since this is the interrupt handler, we may be here
** while the access list is in the middle of being
** updated. Choose the list which is currently okay.
** See "prism2mib_priv_accessallow()" for details.
*/
if (hw->allow.modify == 0) {
cnt = hw->allow.cnt;
addr = hw->allow.addr[0];
} else {
cnt = hw->allow.cnt1;
addr = hw->allow.addr1[0];
}
for (i = 0; i < cnt; i++, addr += ETH_ALEN)
if (memcmp(rec.address, addr, ETH_ALEN) == 0) {
rec.status = P80211ENUM_status_successful;
break;
}
break;
case WLAN_ACCESS_DENY:
/*
** Allow the authentication UNLESS the MAC address is
** in the list of denied addresses.
**
** Since this is the interrupt handler, we may be here
** while the access list is in the middle of being
** updated. Choose the list which is currently okay.
** See "prism2mib_priv_accessdeny()" for details.
*/
if (hw->deny.modify == 0) {
cnt = hw->deny.cnt;
addr = hw->deny.addr[0];
} else {
cnt = hw->deny.cnt1;
addr = hw->deny.addr1[0];
}
rec.status = P80211ENUM_status_successful;
for (i = 0; i < cnt; i++, addr += ETH_ALEN)
if (memcmp(rec.address, addr, ETH_ALEN) == 0) {
rec.status = P80211ENUM_status_unspec_failure;
break;
}
break;
}
/*
** If the authentication is okay, then add the MAC address to the
** list of authenticated stations. Don't add the address if it
** is already in the list. (802.11b does not seem to disallow
** a station from issuing an authentication request when the
** station is already authenticated. Does this sort of thing
** ever happen? We might as well do the check just in case.)
*/
added = 0;
if (rec.status == P80211ENUM_status_successful) {
for (i = 0; i < hw->authlist.cnt; i++)
if (memcmp(rec.address, hw->authlist.addr[i], ETH_ALEN)
== 0)
break;
if (i >= hw->authlist.cnt) {
if (hw->authlist.cnt >= WLAN_AUTH_MAX) {
rec.status = P80211ENUM_status_ap_full;
} else {
ether_addr_copy(
hw->authlist.addr[hw->authlist.cnt],
rec.address);
hw->authlist.cnt++;
added = 1;
}
}
}
/*
** Send back the results of the authentication. If this doesn't work,
** then make sure to remove the address from the authenticated list if
** it was added.
*/
rec.status = cpu_to_le16(rec.status);
rec.algorithm = inf->info.authreq.algorithm;
result = hfa384x_drvr_setconfig(hw, HFA384x_RID_AUTHENTICATESTA,
&rec, sizeof(rec));
if (result) {
if (added)
hw->authlist.cnt--;
netdev_err(wlandev->netdev,
"setconfig(authenticatestation) failed, result=%d\n",
result);
}
}
/*----------------------------------------------------------------
* prism2sta_inf_psusercnt
*
* Handles the receipt of a PowerSaveUserCount info frame. Should
* be present in APs only.
*
* Arguments:
* wlandev wlan device structure
* inf ptr to info frame (contents in hfa384x order)
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
----------------------------------------------------------------*/
static void prism2sta_inf_psusercnt(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
hw->psusercount = le16_to_cpu(inf->info.psusercnt.usercnt);
}
/*----------------------------------------------------------------
* prism2sta_ev_info
*
* Handles the Info event.
*
* Arguments:
* wlandev wlan device structure
* inf ptr to a generic info frame
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
----------------------------------------------------------------*/
void prism2sta_ev_info(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf)
{
inf->infotype = le16_to_cpu(inf->infotype);
/* Dispatch */
switch (inf->infotype) {
case HFA384x_IT_HANDOVERADDR:
prism2sta_inf_handover(wlandev, inf);
break;
case HFA384x_IT_COMMTALLIES:
prism2sta_inf_tallies(wlandev, inf);
break;
case HFA384x_IT_HOSTSCANRESULTS:
prism2sta_inf_hostscanresults(wlandev, inf);
break;
case HFA384x_IT_SCANRESULTS:
prism2sta_inf_scanresults(wlandev, inf);
break;
case HFA384x_IT_CHINFORESULTS:
prism2sta_inf_chinforesults(wlandev, inf);
break;
case HFA384x_IT_LINKSTATUS:
prism2sta_inf_linkstatus(wlandev, inf);
break;
case HFA384x_IT_ASSOCSTATUS:
prism2sta_inf_assocstatus(wlandev, inf);
break;
case HFA384x_IT_AUTHREQ:
prism2sta_inf_authreq(wlandev, inf);
break;
case HFA384x_IT_PSUSERCNT:
prism2sta_inf_psusercnt(wlandev, inf);
break;
case HFA384x_IT_KEYIDCHANGED:
netdev_warn(wlandev->netdev, "Unhandled IT_KEYIDCHANGED\n");
break;
case HFA384x_IT_ASSOCREQ:
netdev_warn(wlandev->netdev, "Unhandled IT_ASSOCREQ\n");
break;
case HFA384x_IT_MICFAILURE:
netdev_warn(wlandev->netdev, "Unhandled IT_MICFAILURE\n");
break;
default:
netdev_warn(wlandev->netdev,
"Unknown info type=0x%02x\n", inf->infotype);
break;
}
}
/*----------------------------------------------------------------
* prism2sta_ev_txexc
*
* Handles the TxExc event. A Transmit Exception event indicates
* that the MAC's TX process was unsuccessful - so the packet did
* not get transmitted.
*
* Arguments:
* wlandev wlan device structure
* status tx frame status word
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
----------------------------------------------------------------*/
void prism2sta_ev_txexc(wlandevice_t *wlandev, u16 status)
{
pr_debug("TxExc status=0x%x.\n", status);
}
/*----------------------------------------------------------------
* prism2sta_ev_tx
*
* Handles the Tx event.
*
* Arguments:
* wlandev wlan device structure
* status tx frame status word
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
----------------------------------------------------------------*/
void prism2sta_ev_tx(wlandevice_t *wlandev, u16 status)
{
pr_debug("Tx Complete, status=0x%04x\n", status);
/* update linux network stats */
wlandev->netdev->stats.tx_packets++;
}
/*----------------------------------------------------------------
* prism2sta_ev_rx
*
* Handles the Rx event.
*
* Arguments:
* wlandev wlan device structure
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
----------------------------------------------------------------*/
void prism2sta_ev_rx(wlandevice_t *wlandev, struct sk_buff *skb)
{
p80211netdev_rx(wlandev, skb);
}
/*----------------------------------------------------------------
* prism2sta_ev_alloc
*
* Handles the Alloc event.
*
* Arguments:
* wlandev wlan device structure
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
----------------------------------------------------------------*/
void prism2sta_ev_alloc(wlandevice_t *wlandev)
{
netif_wake_queue(wlandev->netdev);
}
/*----------------------------------------------------------------
* create_wlan
*
* Called at module init time. This creates the wlandevice_t structure
* and initializes it with relevant bits.
*
* Arguments:
* none
*
* Returns:
* the created wlandevice_t structure.
*
* Side effects:
* also allocates the priv/hw structures.
*
* Call context:
* process thread
*
----------------------------------------------------------------*/
static wlandevice_t *create_wlan(void)
{
wlandevice_t *wlandev = NULL;
hfa384x_t *hw = NULL;
/* Alloc our structures */
wlandev = kzalloc(sizeof(wlandevice_t), GFP_KERNEL);
hw = kzalloc(sizeof(hfa384x_t), GFP_KERNEL);
if (!wlandev || !hw) {
pr_err("%s: Memory allocation failure.\n", dev_info);
kfree(wlandev);
kfree(hw);
return NULL;
}
/* Initialize the network device object. */
wlandev->nsdname = dev_info;
wlandev->msdstate = WLAN_MSD_HWPRESENT_PENDING;
wlandev->priv = hw;
wlandev->open = prism2sta_open;
wlandev->close = prism2sta_close;
wlandev->reset = prism2sta_reset;
wlandev->txframe = prism2sta_txframe;
wlandev->mlmerequest = prism2sta_mlmerequest;
wlandev->set_multicast_list = prism2sta_setmulticast;
wlandev->tx_timeout = hfa384x_tx_timeout;
wlandev->nsdcaps = P80211_NSDCAP_HWFRAGMENT | P80211_NSDCAP_AUTOJOIN;
/* Initialize the device private data structure. */
hw->dot11_desired_bss_type = 1;
return wlandev;
}
void prism2sta_commsqual_defer(struct work_struct *data)
{
hfa384x_t *hw = container_of(data, struct hfa384x, commsqual_bh);
wlandevice_t *wlandev = hw->wlandev;
hfa384x_bytestr32_t ssid;
struct p80211msg_dot11req_mibget msg;
p80211item_uint32_t *mibitem = (p80211item_uint32_t *)
&msg.mibattribute.data;
int result = 0;
if (hw->wlandev->hwremoved)
return;
/* we don't care if we're in AP mode */
if ((wlandev->macmode == WLAN_MACMODE_NONE) ||
(wlandev->macmode == WLAN_MACMODE_ESS_AP)) {
return;
}
/* It only makes sense to poll these in non-IBSS */
if (wlandev->macmode != WLAN_MACMODE_IBSS_STA) {
result = hfa384x_drvr_getconfig(
hw, HFA384x_RID_DBMCOMMSQUALITY,
&hw->qual, HFA384x_RID_DBMCOMMSQUALITY_LEN);
if (result) {
netdev_err(wlandev->netdev, "error fetching commsqual\n");
return;
}
pr_debug("commsqual %d %d %d\n",
le16_to_cpu(hw->qual.CQ_currBSS),
le16_to_cpu(hw->qual.ASL_currBSS),
le16_to_cpu(hw->qual.ANL_currFC));
}
/* Get the signal rate */
msg.msgcode = DIDmsg_dot11req_mibget;
mibitem->did = DIDmib_p2_p2MAC_p2CurrentTxRate;
result = p80211req_dorequest(wlandev, (u8 *) &msg);
if (result) {
pr_debug("get signal rate failed, result = %d\n",
result);
return;
}
switch (mibitem->data) {
case HFA384x_RATEBIT_1:
hw->txrate = 10;
break;
case HFA384x_RATEBIT_2:
hw->txrate = 20;
break;
case HFA384x_RATEBIT_5dot5:
hw->txrate = 55;
break;
case HFA384x_RATEBIT_11:
hw->txrate = 110;
break;
default:
pr_debug("Bad ratebit (%d)\n", mibitem->data);
}
/* Lastly, we need to make sure the BSSID didn't change on us */
result = hfa384x_drvr_getconfig(hw,
HFA384x_RID_CURRENTBSSID,
wlandev->bssid, WLAN_BSSID_LEN);
if (result) {
pr_debug("getconfig(0x%02x) failed, result = %d\n",
HFA384x_RID_CURRENTBSSID, result);
return;
}
result = hfa384x_drvr_getconfig(hw,
HFA384x_RID_CURRENTSSID,
&ssid, sizeof(ssid));
if (result) {
pr_debug("getconfig(0x%02x) failed, result = %d\n",
HFA384x_RID_CURRENTSSID, result);
return;
}
prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *) &ssid,
(p80211pstrd_t *) &wlandev->ssid);
/* Reschedule timer */
mod_timer(&hw->commsqual_timer, jiffies + HZ);
}
void prism2sta_commsqual_timer(unsigned long data)
{
hfa384x_t *hw = (hfa384x_t *) data;
schedule_work(&hw->commsqual_bh);
}
| gpl-2.0 |
CyanogenMod/android_kernel_bn_acclaim | arch/blackfin/mach-bf537/boards/minotaur.c | 2441 | 13444 | /*
* Copyright 2004-2009 Analog Devices Inc.
* 2008-2009 Cambridge Signal Processing
* 2005 National ICT Australia (NICTA)
* Aidan Williams <aidan@nicta.com.au>
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/usb/sl811.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/reboot.h>
#include <asm/portmux.h>
#include <linux/spi/ad7877.h>
/*
* Name the Board for the /proc/cpuinfo
*/
const char bfin_board_name[] = "CamSig Minotaur BF537";
#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
static struct resource bfin_pcmcia_cf_resources[] = {
{
.start = 0x20310000, /* IO PORT */
.end = 0x20312000,
.flags = IORESOURCE_MEM,
}, {
.start = 0x20311000, /* Attribute Memory */
.end = 0x20311FFF,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF4,
.end = IRQ_PF4,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
}, {
.start = IRQ_PF6, /* Card Detect PF6 */
.end = IRQ_PF6,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_pcmcia_cf_device = {
.name = "bfin_cf_pcmcia",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources),
.resource = bfin_pcmcia_cf_resources,
};
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
#include <linux/bfin_mac.h>
static const unsigned short bfin_mac_peripherals[] = P_MII0;
static struct bfin_phydev_platform_data bfin_phydev_data[] = {
{
.addr = 1,
.irq = IRQ_MAC_PHYINT,
},
};
static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
.phydev_number = 1,
.phydev_data = bfin_phydev_data,
.phy_mode = PHY_INTERFACE_MODE_MII,
.mac_peripherals = bfin_mac_peripherals,
};
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
.dev = {
.platform_data = &bfin_mii_bus_data,
}
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
.dev = {
.platform_data = &bfin_mii_bus,
}
};
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
static struct resource net2272_bfin_resources[] = {
{
.start = 0x20300000,
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device net2272_bfin_device = {
.name = "net2272",
.id = -1,
.num_resources = ARRAY_SIZE(net2272_bfin_resources),
.resource = net2272_bfin_resources,
};
#endif
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
/* Partition sizes */
#define FLASH_SIZE 0x00400000
#define PSIZE_UBOOT 0x00030000
#define PSIZE_INITRAMFS 0x00240000
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
.name = "bootloader(spi)",
.size = PSIZE_UBOOT,
.offset = 0x000000,
.mask_flags = MTD_CAP_ROM
}, {
.name = "initramfs(spi)",
.size = PSIZE_INITRAMFS,
.offset = PSIZE_UBOOT
}, {
.name = "opt(spi)",
.size = FLASH_SIZE - (PSIZE_UBOOT + PSIZE_INITRAMFS),
.offset = PSIZE_UBOOT + PSIZE_INITRAMFS,
}
};
static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
.type = "m25p64",
};
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 8,
};
#endif
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
{
/* the modalias must be the same as spi device driver name */
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
.controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
};
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 8,
.enable_dma = 1, /* master has the ability to do dma transfer */
};
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI,
.end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_spi0_device = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &bfin_spi0_info, /* Passed to driver */
},
};
#endif /* spi master and devices */
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = UART0_THR,
.end = UART0_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
static struct resource bfin_uart1_resources[] = {
{
.start = UART1_THR,
.end = UART1_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_ERROR,
.end = IRQ_UART1_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_TX,
.end = CH_UART1_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
static struct platform_device bfin_uart1_device = {
.name = "bfin-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_uart1_resources),
.resource = bfin_uart1_resources,
.dev = {
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
static struct resource bfin_sir0_resources[] = {
{
.start = 0xFFC00400,
.end = 0xFFC004FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir0_device = {
.name = "bfin_sir",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
.resource = bfin_sir0_resources,
};
#endif
#ifdef CONFIG_BFIN_SIR1
static struct resource bfin_sir1_resources[] = {
{
.start = 0xFFC02000,
.end = 0xFFC020FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir1_device = {
.name = "bfin_sir",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sir1_resources),
.resource = bfin_sir1_resources,
};
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
static struct resource bfin_twi0_resource[] = {
[0] = {
.start = TWI0_REGBASE,
.end = TWI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI,
.end = IRQ_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi_device = {
.name = "i2c-bfin-twi",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_twi0_resource),
.resource = bfin_twi0_resource,
};
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
static struct resource bfin_sport0_uart_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
.name = "bfin-sport-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
.resource = bfin_sport0_uart_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
static struct resource bfin_sport1_uart_resources[] = {
{
.start = SPORT1_TCR1,
.end = SPORT1_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT1_RX,
.end = IRQ_SPORT1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
.name = "bfin-sport-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
.resource = bfin_sport1_uart_resources,
.dev = {
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
},
};
#endif
#endif
static struct platform_device *minotaur_devices[] __initdata = {
#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
&bfin_pcmcia_cf_device,
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
&rtc_device,
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
&bfin_mii_bus,
&bfin_mac_device,
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
&net2272_bfin_device,
#endif
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
&bfin_spi0_device,
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
&bfin_sir0_device,
#endif
#ifdef CONFIG_BFIN_SIR1
&bfin_sir1_device,
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
&i2c_bfin_twi_device,
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
};
static int __init minotaur_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(minotaur_devices, ARRAY_SIZE(minotaur_devices));
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
spi_register_board_info(bfin_spi_board_info,
ARRAY_SIZE(bfin_spi_board_info));
#endif
return 0;
}
arch_initcall(minotaur_init);
static struct platform_device *minotaur_early_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
};
void __init native_machine_early_platform_add_devices(void)
{
printk(KERN_INFO "register early platform devices\n");
early_platform_add_devices(minotaur_early_devices,
ARRAY_SIZE(minotaur_early_devices));
}
void native_machine_restart(char *cmd)
{
/* workaround reboot hang when booting from SPI */
if ((bfin_read_SYSCR() & 0x7) == 0x3)
bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS);
}
| gpl-2.0 |
eugene373/ApexqJB | arch/blackfin/mach-bf548/boards/cm_bf548.c | 2441 | 28377 | /*
* Copyright 2004-2009 Analog Devices Inc.
* 2008-2009 Bluetechnix
* 2005 National ICT Australia (NICTA)
* Aidan Williams <aidan@nicta.com.au>
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/usb/musb.h>
#include <asm/bfin5xx_spi.h>
#include <asm/dma.h>
#include <asm/gpio.h>
#include <asm/nand.h>
#include <asm/portmux.h>
#include <asm/bfin_sdh.h>
#include <mach/bf54x_keys.h>
#include <asm/dpmc.h>
#include <linux/input.h>
#include <linux/spi/ad7877.h>
/*
* Name the Board for the /proc/cpuinfo
*/
const char bfin_board_name[] = "Bluetechnix CM-BF548";
/*
* Driver needs to know address, irq and flag pin.
*/
#if defined(CONFIG_FB_BF54X_LQ043) || defined(CONFIG_FB_BF54X_LQ043_MODULE)
#include <mach/bf54x-lq043.h>
static struct bfin_bf54xfb_mach_info bf54x_lq043_data = {
.width = 480,
.height = 272,
.xres = {480, 480, 480},
.yres = {272, 272, 272},
.bpp = {24, 24, 24},
.disp = GPIO_PE3,
};
static struct resource bf54x_lq043_resources[] = {
{
.start = IRQ_EPPI0_ERR,
.end = IRQ_EPPI0_ERR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bf54x_lq043_device = {
.name = "bf54x-lq043",
.id = -1,
.num_resources = ARRAY_SIZE(bf54x_lq043_resources),
.resource = bf54x_lq043_resources,
.dev = {
.platform_data = &bf54x_lq043_data,
},
};
#endif
#if defined(CONFIG_KEYBOARD_BFIN) || defined(CONFIG_KEYBOARD_BFIN_MODULE)
static unsigned int bf548_keymap[] = {
KEYVAL(0, 0, KEY_ENTER),
KEYVAL(0, 1, KEY_HELP),
KEYVAL(0, 2, KEY_0),
KEYVAL(0, 3, KEY_BACKSPACE),
KEYVAL(1, 0, KEY_TAB),
KEYVAL(1, 1, KEY_9),
KEYVAL(1, 2, KEY_8),
KEYVAL(1, 3, KEY_7),
KEYVAL(2, 0, KEY_DOWN),
KEYVAL(2, 1, KEY_6),
KEYVAL(2, 2, KEY_5),
KEYVAL(2, 3, KEY_4),
KEYVAL(3, 0, KEY_UP),
KEYVAL(3, 1, KEY_3),
KEYVAL(3, 2, KEY_2),
KEYVAL(3, 3, KEY_1),
};
static struct bfin_kpad_platform_data bf54x_kpad_data = {
.rows = 4,
.cols = 4,
.keymap = bf548_keymap,
.keymapsize = ARRAY_SIZE(bf548_keymap),
.repeat = 0,
.debounce_time = 5000, /* ns (5ms) */
.coldrive_time = 1000, /* ns (1ms) */
.keyup_test_interval = 50, /* ms (50ms) */
};
static struct resource bf54x_kpad_resources[] = {
{
.start = IRQ_KEY,
.end = IRQ_KEY,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bf54x_kpad_device = {
.name = "bf54x-keys",
.id = -1,
.num_resources = ARRAY_SIZE(bf54x_kpad_resources),
.resource = bf54x_kpad_resources,
.dev = {
.platform_data = &bf54x_kpad_data,
},
};
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = UART0_DLL,
.end = UART0_RBR+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
static struct resource bfin_uart1_resources[] = {
{
.start = UART1_DLL,
.end = UART1_RBR+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_ERROR,
.end = IRQ_UART1_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_TX,
.end = CH_UART1_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX,
.flags = IORESOURCE_DMA,
},
#ifdef CONFIG_BFIN_UART1_CTSRTS
{ /* CTS pin -- 0 means not supported */
.start = GPIO_PE10,
.end = GPIO_PE10,
.flags = IORESOURCE_IO,
},
{ /* RTS pin -- 0 means not supported */
.start = GPIO_PE9,
.end = GPIO_PE9,
.flags = IORESOURCE_IO,
},
#endif
};
static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX,
#ifdef CONFIG_BFIN_UART1_CTSRTS
P_UART1_RTS, P_UART1_CTS,
#endif
0
};
static struct platform_device bfin_uart1_device = {
.name = "bfin-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_uart1_resources),
.resource = bfin_uart1_resources,
.dev = {
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART2
static struct resource bfin_uart2_resources[] = {
{
.start = UART2_DLL,
.end = UART2_RBR+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART2_RX,
.end = IRQ_UART2_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART2_ERROR,
.end = IRQ_UART2_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART2_TX,
.end = CH_UART2_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART2_RX,
.end = CH_UART2_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart2_peripherals[] = {
P_UART2_TX, P_UART2_RX, 0
};
static struct platform_device bfin_uart2_device = {
.name = "bfin-uart",
.id = 2,
.num_resources = ARRAY_SIZE(bfin_uart2_resources),
.resource = bfin_uart2_resources,
.dev = {
.platform_data = &bfin_uart2_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART3
static struct resource bfin_uart3_resources[] = {
{
.start = UART3_DLL,
.end = UART3_RBR+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART3_RX,
.end = IRQ_UART3_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART3_ERROR,
.end = IRQ_UART3_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART3_TX,
.end = CH_UART3_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART3_RX,
.end = CH_UART3_RX,
.flags = IORESOURCE_DMA,
},
#ifdef CONFIG_BFIN_UART3_CTSRTS
{ /* CTS pin -- 0 means not supported */
.start = GPIO_PB3,
.end = GPIO_PB3,
.flags = IORESOURCE_IO,
},
{ /* RTS pin -- 0 means not supported */
.start = GPIO_PB2,
.end = GPIO_PB2,
.flags = IORESOURCE_IO,
},
#endif
};
static unsigned short bfin_uart3_peripherals[] = {
P_UART3_TX, P_UART3_RX,
#ifdef CONFIG_BFIN_UART3_CTSRTS
P_UART3_RTS, P_UART3_CTS,
#endif
0
};
static struct platform_device bfin_uart3_device = {
.name = "bfin-uart",
.id = 3,
.num_resources = ARRAY_SIZE(bfin_uart3_resources),
.resource = bfin_uart3_resources,
.dev = {
.platform_data = &bfin_uart3_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
static struct resource bfin_sir0_resources[] = {
{
.start = 0xFFC00400,
.end = 0xFFC004FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir0_device = {
.name = "bfin_sir",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
.resource = bfin_sir0_resources,
};
#endif
#ifdef CONFIG_BFIN_SIR1
static struct resource bfin_sir1_resources[] = {
{
.start = 0xFFC02000,
.end = 0xFFC020FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir1_device = {
.name = "bfin_sir",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sir1_resources),
.resource = bfin_sir1_resources,
};
#endif
#ifdef CONFIG_BFIN_SIR2
static struct resource bfin_sir2_resources[] = {
{
.start = 0xFFC02100,
.end = 0xFFC021FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART2_RX,
.end = IRQ_UART2_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART2_RX,
.end = CH_UART2_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir2_device = {
.name = "bfin_sir",
.id = 2,
.num_resources = ARRAY_SIZE(bfin_sir2_resources),
.resource = bfin_sir2_resources,
};
#endif
#ifdef CONFIG_BFIN_SIR3
static struct resource bfin_sir3_resources[] = {
{
.start = 0xFFC03100,
.end = 0xFFC031FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART3_RX,
.end = IRQ_UART3_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART3_RX,
.end = CH_UART3_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir3_device = {
.name = "bfin_sir",
.id = 3,
.num_resources = ARRAY_SIZE(bfin_sir3_resources),
.resource = bfin_sir3_resources,
};
#endif
#endif
#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
#include <linux/smsc911x.h>
static struct resource smsc911x_resources[] = {
{
.name = "smsc911x-memory",
.start = 0x24000000,
.end = 0x24000000 + 0xFF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_PE6,
.end = IRQ_PE6,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
},
};
static struct smsc911x_platform_config smsc911x_config = {
.flags = SMSC911X_USE_16BIT,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device smsc911x_device = {
.name = "smsc911x",
.id = 0,
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
.dev = {
.platform_data = &smsc911x_config,
},
};
#endif
#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
static struct resource musb_resources[] = {
[0] = {
.start = 0xFFC03C00,
.end = 0xFFC040FF,
.flags = IORESOURCE_MEM,
},
[1] = { /* general IRQ */
.start = IRQ_USB_INT0,
.end = IRQ_USB_INT0,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
.name = "mc"
},
[2] = { /* DMA IRQ */
.start = IRQ_USB_DMA,
.end = IRQ_USB_DMA,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
.name = "dma"
},
};
static struct musb_hdrc_config musb_config = {
.multipoint = 0,
.dyn_fifo = 0,
.soft_con = 1,
.dma = 1,
.num_eps = 8,
.dma_channels = 8,
.gpio_vrsel = GPIO_PH6,
/* Some custom boards need to be active low, just set it to "0"
* if it is the case.
*/
.gpio_vrsel_active = 1,
.clkin = 24, /* musb CLKIN in MHZ */
};
static struct musb_hdrc_platform_data musb_plat = {
#if defined(CONFIG_USB_MUSB_OTG)
.mode = MUSB_OTG,
#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
.mode = MUSB_HOST,
#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
.mode = MUSB_PERIPHERAL,
#endif
.config = &musb_config,
};
static u64 musb_dmamask = ~(u32)0;
static struct platform_device musb_device = {
.name = "musb-blackfin",
.id = 0,
.dev = {
.dma_mask = &musb_dmamask,
.coherent_dma_mask = 0xffffffff,
.platform_data = &musb_plat,
},
.num_resources = ARRAY_SIZE(musb_resources),
.resource = musb_resources,
};
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
static struct resource bfin_sport0_uart_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
.name = "bfin-sport-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
.resource = bfin_sport0_uart_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
static struct resource bfin_sport1_uart_resources[] = {
{
.start = SPORT1_TCR1,
.end = SPORT1_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT1_RX,
.end = IRQ_SPORT1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
.name = "bfin-sport-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
.resource = bfin_sport1_uart_resources,
.dev = {
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
static struct resource bfin_sport2_uart_resources[] = {
{
.start = SPORT2_TCR1,
.end = SPORT2_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT2_RX,
.end = IRQ_SPORT2_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT2_ERROR,
.end = IRQ_SPORT2_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport2_peripherals[] = {
P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
};
static struct platform_device bfin_sport2_uart_device = {
.name = "bfin-sport-uart",
.id = 2,
.num_resources = ARRAY_SIZE(bfin_sport2_uart_resources),
.resource = bfin_sport2_uart_resources,
.dev = {
.platform_data = &bfin_sport2_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
static struct resource bfin_sport3_uart_resources[] = {
{
.start = SPORT3_TCR1,
.end = SPORT3_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT3_RX,
.end = IRQ_SPORT3_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT3_ERROR,
.end = IRQ_SPORT3_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport3_peripherals[] = {
P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
};
static struct platform_device bfin_sport3_uart_device = {
.name = "bfin-sport-uart",
.id = 3,
.num_resources = ARRAY_SIZE(bfin_sport3_uart_resources),
.resource = bfin_sport3_uart_resources,
.dev = {
.platform_data = &bfin_sport3_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE)
static struct resource bfin_atapi_resources[] = {
{
.start = 0xFFC03800,
.end = 0xFFC0386F,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_ATAPI_ERR,
.end = IRQ_ATAPI_ERR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_atapi_device = {
.name = "pata-bf54x",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_atapi_resources),
.resource = bfin_atapi_resources,
};
#endif
#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
static struct mtd_partition partition_info[] = {
{
.name = "linux kernel(nand)",
.offset = 0,
.size = 4 * 1024 * 1024,
},
{
.name = "file system(nand)",
.offset = 4 * 1024 * 1024,
.size = (256 - 4) * 1024 * 1024,
},
};
static struct bf5xx_nand_platform bf5xx_nand_platform = {
.data_width = NFC_NWIDTH_8,
.partitions = partition_info,
.nr_partitions = ARRAY_SIZE(partition_info),
.rd_dly = 3,
.wr_dly = 3,
};
static struct resource bf5xx_nand_resources[] = {
{
.start = 0xFFC03B00,
.end = 0xFFC03B4F,
.flags = IORESOURCE_MEM,
},
{
.start = CH_NFC,
.end = CH_NFC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bf5xx_nand_device = {
.name = "bf5xx-nand",
.id = 0,
.num_resources = ARRAY_SIZE(bf5xx_nand_resources),
.resource = bf5xx_nand_resources,
.dev = {
.platform_data = &bf5xx_nand_platform,
},
};
#endif
#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE)
static struct bfin_sd_host bfin_sdh_data = {
.dma_chan = CH_SDH,
.irq_int0 = IRQ_SDH_MASK0,
.pin_req = {P_SD_D0, P_SD_D1, P_SD_D2, P_SD_D3, P_SD_CLK, P_SD_CMD, 0},
};
static struct platform_device bf54x_sdh_device = {
.name = "bfin-sdh",
.id = 0,
.dev = {
.platform_data = &bfin_sdh_data,
},
};
#endif
#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
static unsigned short bfin_can_peripherals[] = {
P_CAN0_RX, P_CAN0_TX, 0
};
static struct resource bfin_can_resources[] = {
{
.start = 0xFFC02A00,
.end = 0xFFC02FFF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_CAN0_RX,
.end = IRQ_CAN0_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_CAN0_TX,
.end = IRQ_CAN0_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_CAN0_ERROR,
.end = IRQ_CAN0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_can_device = {
.name = "bfin_can",
.num_resources = ARRAY_SIZE(bfin_can_resources),
.resource = bfin_can_resources,
.dev = {
.platform_data = &bfin_can_peripherals, /* Passed to driver */
},
};
#endif
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static struct mtd_partition para_partitions[] = {
{
.name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
.name = "linux kernel(nor)",
.size = 0x100000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(nor)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct physmap_flash_data para_flash_data = {
.width = 2,
.parts = para_partitions,
.nr_parts = ARRAY_SIZE(para_partitions),
};
static struct resource para_flash_resource = {
.start = 0x20000000,
.end = 0x207fffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device para_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = ¶_flash_data,
},
.num_resources = 1,
.resource = ¶_flash_resource,
};
#endif
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
/* SPI flash chip (m25p16) */
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
.name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
.name = "linux kernel(spi)",
.size = 0x1c0000,
.offset = 0x40000
}
};
static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
.type = "m25p16",
};
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
.enable_dma = 0,
.bits_per_word = 16,
};
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
.x_plate_ohms = 419,
.y_plate_ohms = 486,
.pressure_max = 1000,
.pressure_min = 0,
.stopacq_polarity = 1,
.first_conversion_delay = 3,
.acquisition_time = 1,
.averaging = 1,
.pen_down_acc_interval = 1,
};
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
static struct bfin5xx_spi_chip spidev_chip_info = {
.enable_dma = 0,
.bits_per_word = 8,
};
#endif
static struct spi_board_info bf54x_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
{
/* the modalias must be the same as spi device driver name */
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* SPI_SSEL1*/
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
{
.modalias = "ad7877",
.platform_data = &bfin_ad7877_ts_info,
.irq = IRQ_PJ11,
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
.controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
{
.modalias = "spidev",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
.controller_data = &spidev_chip_info,
},
#endif
};
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI0,
.end = CH_SPI0,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI0,
.end = IRQ_SPI0,
.flags = IORESOURCE_IRQ,
}
};
/* SPI (1) */
static struct resource bfin_spi1_resource[] = {
[0] = {
.start = SPI1_REGBASE,
.end = SPI1_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI1,
.end = CH_SPI1,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI1,
.end = IRQ_SPI1,
.flags = IORESOURCE_IRQ,
}
};
/* SPI controller data */
static struct bfin5xx_spi_master bf54x_spi_master_info0 = {
.num_chipselect = 4,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
};
static struct platform_device bf54x_spi_master0 = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &bf54x_spi_master_info0, /* Passed to driver */
},
};
static struct bfin5xx_spi_master bf54x_spi_master_info1 = {
.num_chipselect = 4,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0},
};
static struct platform_device bf54x_spi_master1 = {
.name = "bfin-spi",
.id = 1, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi1_resource),
.resource = bfin_spi1_resource,
.dev = {
.platform_data = &bf54x_spi_master_info1, /* Passed to driver */
},
};
#endif /* spi master and devices */
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
static struct resource bfin_twi0_resource[] = {
[0] = {
.start = TWI0_REGBASE,
.end = TWI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI0,
.end = IRQ_TWI0,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi0_device = {
.name = "i2c-bfin-twi",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_twi0_resource),
.resource = bfin_twi0_resource,
};
#if !defined(CONFIG_BF542) /* The BF542 only has 1 TWI */
static struct resource bfin_twi1_resource[] = {
[0] = {
.start = TWI1_REGBASE,
.end = TWI1_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI1,
.end = IRQ_TWI1,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi1_device = {
.name = "i2c-bfin-twi",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_twi1_resource),
.resource = bfin_twi1_resource,
};
#endif
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#include <linux/gpio_keys.h>
static struct gpio_keys_button bfin_gpio_keys_table[] = {
{BTN_0, GPIO_PH7, 1, "gpio-keys: BTN0"},
};
static struct gpio_keys_platform_data bfin_gpio_keys_data = {
.buttons = bfin_gpio_keys_table,
.nbuttons = ARRAY_SIZE(bfin_gpio_keys_table),
};
static struct platform_device bfin_device_gpiokeys = {
.name = "gpio-keys",
.dev = {
.platform_data = &bfin_gpio_keys_data,
},
};
#endif
static const unsigned int cclk_vlev_datasheet[] =
{
/*
* Internal VLEV BF54XSBBC1533
****temporarily using these values until data sheet is updated
*/
VRPAIR(VLEV_085, 150000000),
VRPAIR(VLEV_090, 250000000),
VRPAIR(VLEV_110, 276000000),
VRPAIR(VLEV_115, 301000000),
VRPAIR(VLEV_120, 525000000),
VRPAIR(VLEV_125, 550000000),
VRPAIR(VLEV_130, 600000000),
};
static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
.tuple_tab = cclk_vlev_datasheet,
.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
.vr_settling_time = 25 /* us */,
};
static struct platform_device bfin_dpmc = {
.name = "bfin dpmc",
.dev = {
.platform_data = &bfin_dmpc_vreg_data,
},
};
static struct platform_device *cm_bf548_devices[] __initdata = {
&bfin_dpmc,
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
&rtc_device,
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART2
&bfin_uart2_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART3
&bfin_uart3_device,
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
&bfin_sir0_device,
#endif
#ifdef CONFIG_BFIN_SIR1
&bfin_sir1_device,
#endif
#ifdef CONFIG_BFIN_SIR2
&bfin_sir2_device,
#endif
#ifdef CONFIG_BFIN_SIR3
&bfin_sir3_device,
#endif
#endif
#if defined(CONFIG_FB_BF54X_LQ043) || defined(CONFIG_FB_BF54X_LQ043_MODULE)
&bf54x_lq043_device,
#endif
#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
&smsc911x_device,
#endif
#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
&musb_device,
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
&bfin_sport2_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
&bfin_sport3_uart_device,
#endif
#endif
#if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE)
&bfin_atapi_device,
#endif
#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
&bf5xx_nand_device,
#endif
#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE)
&bf54x_sdh_device,
#endif
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
&bf54x_spi_master0,
&bf54x_spi_master1,
#endif
#if defined(CONFIG_KEYBOARD_BFIN) || defined(CONFIG_KEYBOARD_BFIN_MODULE)
&bf54x_kpad_device,
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
&i2c_bfin_twi0_device,
#if !defined(CONFIG_BF542)
&i2c_bfin_twi1_device,
#endif
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
&bfin_device_gpiokeys,
#endif
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
¶_flash_device,
#endif
#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
&bfin_can_device,
#endif
};
static int __init cm_bf548_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(cm_bf548_devices, ARRAY_SIZE(cm_bf548_devices));
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
spi_register_board_info(bf54x_spi_board_info,
ARRAY_SIZE(bf54x_spi_board_info));
#endif
return 0;
}
arch_initcall(cm_bf548_init);
static struct platform_device *cm_bf548_early_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART2
&bfin_uart2_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART3
&bfin_uart3_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
&bfin_sport2_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
&bfin_sport3_uart_device,
#endif
#endif
};
void __init native_machine_early_platform_add_devices(void)
{
printk(KERN_INFO "register early platform devices\n");
early_platform_add_devices(cm_bf548_early_devices,
ARRAY_SIZE(cm_bf548_early_devices));
}
| gpl-2.0 |
Lprigara/KernelLinuxRaspberry | arch/x86/kernel/apic/hw_nmi.c | 2441 | 2171 | /*
* HW NMI watchdog support
*
* started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
*
* Arch specific calls to support NMI watchdog
*
* Bits copied from original nmi.c file
*
*/
#include <asm/apic.h>
#include <asm/nmi.h>
#include <linux/cpumask.h>
#include <linux/kdebug.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/nmi.h>
#include <linux/module.h>
#include <linux/delay.h>
#ifdef CONFIG_HARDLOCKUP_DETECTOR
u64 hw_nmi_get_sample_period(int watchdog_thresh)
{
return (u64)(cpu_khz) * 1000 * watchdog_thresh;
}
#endif
#ifdef arch_trigger_all_cpu_backtrace
/* For reliability, we're prepared to waste bits here. */
static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
/* "in progress" flag of arch_trigger_all_cpu_backtrace */
static unsigned long backtrace_flag;
void arch_trigger_all_cpu_backtrace(void)
{
int i;
if (test_and_set_bit(0, &backtrace_flag))
/*
* If there is already a trigger_all_cpu_backtrace() in progress
* (backtrace_flag == 1), don't output double cpu dump infos.
*/
return;
cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
printk(KERN_INFO "sending NMI to all CPUs:\n");
apic->send_IPI_all(NMI_VECTOR);
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
for (i = 0; i < 10 * 1000; i++) {
if (cpumask_empty(to_cpumask(backtrace_mask)))
break;
mdelay(1);
}
clear_bit(0, &backtrace_flag);
smp_mb__after_clear_bit();
}
static int __kprobes
arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
{
int cpu;
cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
arch_spin_lock(&lock);
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
show_regs(regs);
arch_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return NMI_HANDLED;
}
return NMI_DONE;
}
static int __init register_trigger_all_cpu_backtrace(void)
{
register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
0, "arch_bt");
return 0;
}
early_initcall(register_trigger_all_cpu_backtrace);
#endif
| gpl-2.0 |
hiikezoe/android_kernel_huawei_204hw | drivers/usb/serial/ipaq.c | 4489 | 33692 | /*
* USB Compaq iPAQ driver
*
* Copyright (C) 2001 - 2002
* Ganesh Varadarajan <ganesh@veritas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#define KP_RETRIES 100
/*
* Version Information
*/
#define DRIVER_VERSION "v1.0"
#define DRIVER_AUTHOR "Ganesh Varadarajan <ganesh@veritas.com>"
#define DRIVER_DESC "USB PocketPC PDA driver"
static __u16 product, vendor;
static bool debug;
static int connect_retries = KP_RETRIES;
static int initial_wait;
/* Function prototypes for an ipaq */
static int ipaq_open(struct tty_struct *tty,
struct usb_serial_port *port);
static int ipaq_calc_num_ports(struct usb_serial *serial);
static int ipaq_startup(struct usb_serial *serial);
static struct usb_device_id ipaq_id_table [] = {
/* The first entry is a placeholder for the insmod-specified device */
{ USB_DEVICE(0x049F, 0x0003) },
{ USB_DEVICE(0x0104, 0x00BE) }, /* Socket USB Sync */
{ USB_DEVICE(0x03F0, 0x1016) }, /* HP USB Sync */
{ USB_DEVICE(0x03F0, 0x1116) }, /* HP USB Sync 1611 */
{ USB_DEVICE(0x03F0, 0x1216) }, /* HP USB Sync 1612 */
{ USB_DEVICE(0x03F0, 0x2016) }, /* HP USB Sync 1620 */
{ USB_DEVICE(0x03F0, 0x2116) }, /* HP USB Sync 1621 */
{ USB_DEVICE(0x03F0, 0x2216) }, /* HP USB Sync 1622 */
{ USB_DEVICE(0x03F0, 0x3016) }, /* HP USB Sync 1630 */
{ USB_DEVICE(0x03F0, 0x3116) }, /* HP USB Sync 1631 */
{ USB_DEVICE(0x03F0, 0x3216) }, /* HP USB Sync 1632 */
{ USB_DEVICE(0x03F0, 0x4016) }, /* HP USB Sync 1640 */
{ USB_DEVICE(0x03F0, 0x4116) }, /* HP USB Sync 1641 */
{ USB_DEVICE(0x03F0, 0x4216) }, /* HP USB Sync 1642 */
{ USB_DEVICE(0x03F0, 0x5016) }, /* HP USB Sync 1650 */
{ USB_DEVICE(0x03F0, 0x5116) }, /* HP USB Sync 1651 */
{ USB_DEVICE(0x03F0, 0x5216) }, /* HP USB Sync 1652 */
{ USB_DEVICE(0x0409, 0x00D5) }, /* NEC USB Sync */
{ USB_DEVICE(0x0409, 0x00D6) }, /* NEC USB Sync */
{ USB_DEVICE(0x0409, 0x00D7) }, /* NEC USB Sync */
{ USB_DEVICE(0x0409, 0x8024) }, /* NEC USB Sync */
{ USB_DEVICE(0x0409, 0x8025) }, /* NEC USB Sync */
{ USB_DEVICE(0x043E, 0x9C01) }, /* LGE USB Sync */
{ USB_DEVICE(0x045E, 0x00CE) }, /* Microsoft USB Sync */
{ USB_DEVICE(0x045E, 0x0400) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0401) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0402) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0403) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0404) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0405) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0406) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0407) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0408) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0409) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x040A) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x040B) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x040C) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x040D) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x040E) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x040F) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0410) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0411) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0412) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0413) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0414) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0415) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0416) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0417) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0432) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0433) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0434) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0435) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0436) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0437) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0438) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0439) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x043A) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x043B) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x043C) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x043D) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x043E) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x043F) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0440) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0441) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0442) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0443) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0444) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0445) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0446) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0447) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0448) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0449) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x044A) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x044B) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x044C) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x044D) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x044E) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x044F) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0450) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0451) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0452) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0453) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0454) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0455) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0456) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0457) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0458) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0459) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x045A) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x045B) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x045C) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x045D) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x045E) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x045F) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0460) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0461) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0462) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0463) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0464) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0465) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0466) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0467) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0468) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0469) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x046A) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x046B) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x046C) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x046D) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x046E) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x046F) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0470) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0471) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0472) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0473) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0474) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0475) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0476) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0477) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0478) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0479) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x047A) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x047B) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x04C8) }, /* Windows Powered Smartphone 2002 */
{ USB_DEVICE(0x045E, 0x04C9) }, /* Windows Powered Smartphone 2002 */
{ USB_DEVICE(0x045E, 0x04CA) }, /* Windows Powered Smartphone 2002 */
{ USB_DEVICE(0x045E, 0x04CB) }, /* Windows Powered Smartphone 2002 */
{ USB_DEVICE(0x045E, 0x04CC) }, /* Windows Powered Smartphone 2002 */
{ USB_DEVICE(0x045E, 0x04CD) }, /* Windows Powered Smartphone 2002 */
{ USB_DEVICE(0x045E, 0x04CE) }, /* Windows Powered Smartphone 2002 */
{ USB_DEVICE(0x045E, 0x04D7) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04D8) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04D9) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04DA) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04DB) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04DC) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04DD) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04DE) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04DF) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E0) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E1) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E2) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E3) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E4) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E5) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E6) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E7) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E8) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E9) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04EA) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x049F, 0x0003) }, /* Compaq iPAQ USB Sync */
{ USB_DEVICE(0x049F, 0x0032) }, /* Compaq iPAQ USB Sync */
{ USB_DEVICE(0x04A4, 0x0014) }, /* Hitachi USB Sync */
{ USB_DEVICE(0x04AD, 0x0301) }, /* USB Sync 0301 */
{ USB_DEVICE(0x04AD, 0x0302) }, /* USB Sync 0302 */
{ USB_DEVICE(0x04AD, 0x0303) }, /* USB Sync 0303 */
{ USB_DEVICE(0x04AD, 0x0306) }, /* GPS Pocket PC USB Sync */
{ USB_DEVICE(0x04B7, 0x0531) }, /* MyGuide 7000 XL USB Sync */
{ USB_DEVICE(0x04C5, 0x1058) }, /* FUJITSU USB Sync */
{ USB_DEVICE(0x04C5, 0x1079) }, /* FUJITSU USB Sync */
{ USB_DEVICE(0x04DA, 0x2500) }, /* Panasonic USB Sync */
{ USB_DEVICE(0x04DD, 0x9102) }, /* SHARP WS003SH USB Modem */
{ USB_DEVICE(0x04DD, 0x9121) }, /* SHARP WS004SH USB Modem */
{ USB_DEVICE(0x04DD, 0x9123) }, /* SHARP WS007SH USB Modem */
{ USB_DEVICE(0x04DD, 0x9151) }, /* SHARP S01SH USB Modem */
{ USB_DEVICE(0x04DD, 0x91AC) }, /* SHARP WS011SH USB Modem */
{ USB_DEVICE(0x04E8, 0x5F00) }, /* Samsung NEXiO USB Sync */
{ USB_DEVICE(0x04E8, 0x5F01) }, /* Samsung NEXiO USB Sync */
{ USB_DEVICE(0x04E8, 0x5F02) }, /* Samsung NEXiO USB Sync */
{ USB_DEVICE(0x04E8, 0x5F03) }, /* Samsung NEXiO USB Sync */
{ USB_DEVICE(0x04E8, 0x5F04) }, /* Samsung NEXiO USB Sync */
{ USB_DEVICE(0x04E8, 0x6611) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x6613) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x6615) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x6617) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x6619) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x661B) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x662E) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x6630) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x6632) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04f1, 0x3011) }, /* JVC USB Sync */
{ USB_DEVICE(0x04F1, 0x3012) }, /* JVC USB Sync */
{ USB_DEVICE(0x0502, 0x1631) }, /* c10 Series */
{ USB_DEVICE(0x0502, 0x1632) }, /* c20 Series */
{ USB_DEVICE(0x0502, 0x16E1) }, /* Acer n10 Handheld USB Sync */
{ USB_DEVICE(0x0502, 0x16E2) }, /* Acer n20 Handheld USB Sync */
{ USB_DEVICE(0x0502, 0x16E3) }, /* Acer n30 Handheld USB Sync */
{ USB_DEVICE(0x0536, 0x01A0) }, /* HHP PDT */
{ USB_DEVICE(0x0543, 0x0ED9) }, /* ViewSonic Color Pocket PC V35 */
{ USB_DEVICE(0x0543, 0x1527) }, /* ViewSonic Color Pocket PC V36 */
{ USB_DEVICE(0x0543, 0x1529) }, /* ViewSonic Color Pocket PC V37 */
{ USB_DEVICE(0x0543, 0x152B) }, /* ViewSonic Color Pocket PC V38 */
{ USB_DEVICE(0x0543, 0x152E) }, /* ViewSonic Pocket PC */
{ USB_DEVICE(0x0543, 0x1921) }, /* ViewSonic Communicator Pocket PC */
{ USB_DEVICE(0x0543, 0x1922) }, /* ViewSonic Smartphone */
{ USB_DEVICE(0x0543, 0x1923) }, /* ViewSonic Pocket PC V30 */
{ USB_DEVICE(0x05E0, 0x2000) }, /* Symbol USB Sync */
{ USB_DEVICE(0x05E0, 0x2001) }, /* Symbol USB Sync 0x2001 */
{ USB_DEVICE(0x05E0, 0x2002) }, /* Symbol USB Sync 0x2002 */
{ USB_DEVICE(0x05E0, 0x2003) }, /* Symbol USB Sync 0x2003 */
{ USB_DEVICE(0x05E0, 0x2004) }, /* Symbol USB Sync 0x2004 */
{ USB_DEVICE(0x05E0, 0x2005) }, /* Symbol USB Sync 0x2005 */
{ USB_DEVICE(0x05E0, 0x2006) }, /* Symbol USB Sync 0x2006 */
{ USB_DEVICE(0x05E0, 0x2007) }, /* Symbol USB Sync 0x2007 */
{ USB_DEVICE(0x05E0, 0x2008) }, /* Symbol USB Sync 0x2008 */
{ USB_DEVICE(0x05E0, 0x2009) }, /* Symbol USB Sync 0x2009 */
{ USB_DEVICE(0x05E0, 0x200A) }, /* Symbol USB Sync 0x200A */
{ USB_DEVICE(0x067E, 0x1001) }, /* Intermec Mobile Computer */
{ USB_DEVICE(0x07CF, 0x2001) }, /* CASIO USB Sync 2001 */
{ USB_DEVICE(0x07CF, 0x2002) }, /* CASIO USB Sync 2002 */
{ USB_DEVICE(0x07CF, 0x2003) }, /* CASIO USB Sync 2003 */
{ USB_DEVICE(0x0930, 0x0700) }, /* TOSHIBA USB Sync 0700 */
{ USB_DEVICE(0x0930, 0x0705) }, /* TOSHIBA Pocket PC e310 */
{ USB_DEVICE(0x0930, 0x0706) }, /* TOSHIBA Pocket PC e740 */
{ USB_DEVICE(0x0930, 0x0707) }, /* TOSHIBA Pocket PC e330 Series */
{ USB_DEVICE(0x0930, 0x0708) }, /* TOSHIBA Pocket PC e350 Series */
{ USB_DEVICE(0x0930, 0x0709) }, /* TOSHIBA Pocket PC e750 Series */
{ USB_DEVICE(0x0930, 0x070A) }, /* TOSHIBA Pocket PC e400 Series */
{ USB_DEVICE(0x0930, 0x070B) }, /* TOSHIBA Pocket PC e800 Series */
{ USB_DEVICE(0x094B, 0x0001) }, /* Linkup Systems USB Sync */
{ USB_DEVICE(0x0960, 0x0065) }, /* BCOM USB Sync 0065 */
{ USB_DEVICE(0x0960, 0x0066) }, /* BCOM USB Sync 0066 */
{ USB_DEVICE(0x0960, 0x0067) }, /* BCOM USB Sync 0067 */
{ USB_DEVICE(0x0961, 0x0010) }, /* Portatec USB Sync */
{ USB_DEVICE(0x099E, 0x0052) }, /* Trimble GeoExplorer */
{ USB_DEVICE(0x099E, 0x4000) }, /* TDS Data Collector */
{ USB_DEVICE(0x0B05, 0x4200) }, /* ASUS USB Sync */
{ USB_DEVICE(0x0B05, 0x4201) }, /* ASUS USB Sync */
{ USB_DEVICE(0x0B05, 0x4202) }, /* ASUS USB Sync */
{ USB_DEVICE(0x0B05, 0x420F) }, /* ASUS USB Sync */
{ USB_DEVICE(0x0B05, 0x9200) }, /* ASUS USB Sync */
{ USB_DEVICE(0x0B05, 0x9202) }, /* ASUS USB Sync */
{ USB_DEVICE(0x0BB4, 0x00CE) }, /* HTC USB Sync */
{ USB_DEVICE(0x0BB4, 0x00CF) }, /* HTC USB Modem */
{ USB_DEVICE(0x0BB4, 0x0A01) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A02) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A03) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A04) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A05) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A06) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A07) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A08) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A09) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A0A) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A0B) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A0C) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A0D) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A0E) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A0F) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A10) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A11) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A12) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A13) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A14) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A15) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A16) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A17) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A18) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A19) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A1A) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A1B) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A1C) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A1D) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A1E) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A1F) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A20) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A21) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A22) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A23) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A24) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A25) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A26) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A27) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A28) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A29) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A2A) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A2B) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A2C) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A2D) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A2E) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A2F) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A30) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A31) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A32) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A33) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A34) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A35) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A36) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A37) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A38) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A39) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A3A) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A3B) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A3C) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A3D) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A3E) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A3F) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A40) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A41) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A42) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A43) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A44) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A45) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A46) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A47) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A48) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A49) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A4A) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A4B) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A4C) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A4D) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A4E) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A4F) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A50) }, /* HTC SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A51) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A52) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A53) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A54) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A55) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A56) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A57) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A58) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A59) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A5A) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A5B) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A5C) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A5D) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A5E) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A5F) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A60) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A61) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A62) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A63) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A64) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A65) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A66) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A67) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A68) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A69) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A6A) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A6B) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A6C) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A6D) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A6E) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A6F) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A70) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A71) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A72) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A73) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A74) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A75) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A76) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A77) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A78) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A79) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A7A) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A7B) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A7C) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A7D) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A7E) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A7F) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A80) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A81) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A82) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A83) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A84) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A85) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A86) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A87) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A88) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A89) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A8A) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A8B) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A8C) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A8D) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A8E) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A8F) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A90) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A91) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A92) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A93) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A94) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A95) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A96) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A97) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A98) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A99) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A9A) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A9B) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A9C) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A9D) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A9E) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A9F) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0BCE) }, /* "High Tech Computer Corp" */
{ USB_DEVICE(0x0BF8, 0x1001) }, /* Fujitsu Siemens Computers USB Sync */
{ USB_DEVICE(0x0C44, 0x03A2) }, /* Motorola iDEN Smartphone */
{ USB_DEVICE(0x0C8E, 0x6000) }, /* Cesscom Luxian Series */
{ USB_DEVICE(0x0CAD, 0x9001) }, /* Motorola PowerPad Pocket PC Device */
{ USB_DEVICE(0x0F4E, 0x0200) }, /* Freedom Scientific USB Sync */
{ USB_DEVICE(0x0F98, 0x0201) }, /* Cyberbank USB Sync */
{ USB_DEVICE(0x0FB8, 0x3001) }, /* Wistron USB Sync */
{ USB_DEVICE(0x0FB8, 0x3002) }, /* Wistron USB Sync */
{ USB_DEVICE(0x0FB8, 0x3003) }, /* Wistron USB Sync */
{ USB_DEVICE(0x0FB8, 0x4001) }, /* Wistron USB Sync */
{ USB_DEVICE(0x1066, 0x00CE) }, /* E-TEN USB Sync */
{ USB_DEVICE(0x1066, 0x0300) }, /* E-TEN P3XX Pocket PC */
{ USB_DEVICE(0x1066, 0x0500) }, /* E-TEN P5XX Pocket PC */
{ USB_DEVICE(0x1066, 0x0600) }, /* E-TEN P6XX Pocket PC */
{ USB_DEVICE(0x1066, 0x0700) }, /* E-TEN P7XX Pocket PC */
{ USB_DEVICE(0x1114, 0x0001) }, /* Psion Teklogix Sync 753x */
{ USB_DEVICE(0x1114, 0x0004) }, /* Psion Teklogix Sync netBookPro */
{ USB_DEVICE(0x1114, 0x0006) }, /* Psion Teklogix Sync 7525 */
{ USB_DEVICE(0x1182, 0x1388) }, /* VES USB Sync */
{ USB_DEVICE(0x11D9, 0x1002) }, /* Rugged Pocket PC 2003 */
{ USB_DEVICE(0x11D9, 0x1003) }, /* Rugged Pocket PC 2003 */
{ USB_DEVICE(0x1231, 0xCE01) }, /* USB Sync 03 */
{ USB_DEVICE(0x1231, 0xCE02) }, /* USB Sync 03 */
{ USB_DEVICE(0x1690, 0x0601) }, /* Askey USB Sync */
{ USB_DEVICE(0x22B8, 0x4204) }, /* Motorola MPx200 Smartphone */
{ USB_DEVICE(0x22B8, 0x4214) }, /* Motorola MPc GSM */
{ USB_DEVICE(0x22B8, 0x4224) }, /* Motorola MPx220 Smartphone */
{ USB_DEVICE(0x22B8, 0x4234) }, /* Motorola MPc CDMA */
{ USB_DEVICE(0x22B8, 0x4244) }, /* Motorola MPx100 Smartphone */
{ USB_DEVICE(0x3340, 0x011C) }, /* Mio DigiWalker PPC StrongARM */
{ USB_DEVICE(0x3340, 0x0326) }, /* Mio DigiWalker 338 */
{ USB_DEVICE(0x3340, 0x0426) }, /* Mio DigiWalker 338 */
{ USB_DEVICE(0x3340, 0x043A) }, /* Mio DigiWalker USB Sync */
{ USB_DEVICE(0x3340, 0x051C) }, /* MiTAC USB Sync 528 */
{ USB_DEVICE(0x3340, 0x053A) }, /* Mio DigiWalker SmartPhone USB Sync */
{ USB_DEVICE(0x3340, 0x071C) }, /* MiTAC USB Sync */
{ USB_DEVICE(0x3340, 0x0B1C) }, /* Generic PPC StrongARM */
{ USB_DEVICE(0x3340, 0x0E3A) }, /* Generic PPC USB Sync */
{ USB_DEVICE(0x3340, 0x0F1C) }, /* Itautec USB Sync */
{ USB_DEVICE(0x3340, 0x0F3A) }, /* Generic SmartPhone USB Sync */
{ USB_DEVICE(0x3340, 0x1326) }, /* Itautec USB Sync */
{ USB_DEVICE(0x3340, 0x191C) }, /* YAKUMO USB Sync */
{ USB_DEVICE(0x3340, 0x2326) }, /* Vobis USB Sync */
{ USB_DEVICE(0x3340, 0x3326) }, /* MEDION Winodws Moble USB Sync */
{ USB_DEVICE(0x3708, 0x20CE) }, /* Legend USB Sync */
{ USB_DEVICE(0x3708, 0x21CE) }, /* Lenovo USB Sync */
{ USB_DEVICE(0x4113, 0x0210) }, /* Mobile Media Technology USB Sync */
{ USB_DEVICE(0x4113, 0x0211) }, /* Mobile Media Technology USB Sync */
{ USB_DEVICE(0x4113, 0x0400) }, /* Mobile Media Technology USB Sync */
{ USB_DEVICE(0x4113, 0x0410) }, /* Mobile Media Technology USB Sync */
{ USB_DEVICE(0x413C, 0x4001) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4002) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4003) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4004) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4005) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4006) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4007) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4008) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4009) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x4505, 0x0010) }, /* Smartphone */
{ USB_DEVICE(0x5E04, 0xCE00) }, /* SAGEM Wireless Assistant */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, ipaq_id_table);
static struct usb_driver ipaq_driver = {
.name = "ipaq",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = ipaq_id_table,
};
/* All of the device info needed for the Compaq iPAQ */
static struct usb_serial_driver ipaq_device = {
.driver = {
.owner = THIS_MODULE,
.name = "ipaq",
},
.description = "PocketPC PDA",
.id_table = ipaq_id_table,
.bulk_in_size = 256,
.bulk_out_size = 256,
.open = ipaq_open,
.attach = ipaq_startup,
.calc_num_ports = ipaq_calc_num_ports,
};
static struct usb_serial_driver * const serial_drivers[] = {
&ipaq_device, NULL
};
static int ipaq_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
int result = 0;
int retries = connect_retries;
dbg("%s - port %d", __func__, port->number);
msleep(1000*initial_wait);
/*
* Send out control message observed in win98 sniffs. Not sure what
* it does, but from empirical observations, it seems that the device
* will start the chat sequence once one of these messages gets
* through. Since this has a reasonably high failure rate, we retry
* several times.
*/
while (retries--) {
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21,
0x1, 0, NULL, 0, 100);
if (!result)
break;
msleep(1000);
}
if (!retries && result) {
dev_err(&port->dev, "%s - failed doing control urb, error %d\n",
__func__, result);
return result;
}
return usb_serial_generic_open(tty, port);
}
static int ipaq_calc_num_ports(struct usb_serial *serial)
{
/*
* some devices have 3 endpoints, the 3rd of which
* must be ignored as it would make the core
* create a second port which oopses when used
*/
int ipaq_num_ports = 1;
dbg("%s - numberofendpoints: %d", __FUNCTION__,
(int)serial->interface->cur_altsetting->desc.bNumEndpoints);
/*
* a few devices have 4 endpoints, seemingly Yakuma devices,
* and we need the second pair, so let them have 2 ports
*
* TODO: can we drop port 1 ?
*/
if (serial->interface->cur_altsetting->desc.bNumEndpoints > 3) {
ipaq_num_ports = 2;
}
return ipaq_num_ports;
}
static int ipaq_startup(struct usb_serial *serial)
{
dbg("%s", __func__);
/* Some of the devices in ipaq_id_table[] are composite, and we
* shouldn't bind to all the interfaces. This test will rule out
* some obviously invalid possibilities.
*/
if (serial->num_bulk_in < serial->num_ports ||
serial->num_bulk_out < serial->num_ports)
return -ENODEV;
if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
/*
* FIXME: HP iPaq rx3715, possibly others, have 1 config that
* is labeled as 2
*/
dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
serial->dev->actconfig->desc.bConfigurationValue);
return -ENODEV;
}
dbg("%s - iPAQ module configured for %d ports",
__FUNCTION__, serial->num_ports);
return usb_reset_configuration(serial->dev);
}
static int __init ipaq_init(void)
{
int retval;
if (vendor) {
ipaq_id_table[0].idVendor = vendor;
ipaq_id_table[0].idProduct = product;
}
retval = usb_serial_register_drivers(&ipaq_driver, serial_drivers);
if (retval == 0)
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
return retval;
}
static void __exit ipaq_exit(void)
{
usb_serial_deregister_drivers(&ipaq_driver, serial_drivers);
}
module_init(ipaq_init);
module_exit(ipaq_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
module_param(vendor, ushort, 0);
MODULE_PARM_DESC(vendor, "User specified USB idVendor");
module_param(product, ushort, 0);
MODULE_PARM_DESC(product, "User specified USB idProduct");
module_param(connect_retries, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(connect_retries,
"Maximum number of connect retries (one second each)");
module_param(initial_wait, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(initial_wait,
"Time to wait before attempting a connection (in seconds)");
| gpl-2.0 |
virt2real/v2r_kernel | drivers/media/dvb/frontends/nxt200x.c | 4745 | 30282 | /*
* Support for NXT2002 and NXT2004 - VSB/QAM
*
* Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com>
* Copyright (C) 2006 Michael Krufky <mkrufky@m1k.net>
* based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net>
* and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
/*
* NOTES ABOUT THIS DRIVER
*
* This Linux driver supports:
* B2C2/BBTI Technisat Air2PC - ATSC (NXT2002)
* AverTVHD MCE A180 (NXT2004)
* ATI HDTV Wonder (NXT2004)
*
* This driver needs external firmware. Please use the command
* "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2002" or
* "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2004" to
* download/extract the appropriate firmware, and then copy it to
* /usr/lib/hotplug/firmware/ or /lib/firmware/
* (depending on configuration of firmware hotplug).
*/
#define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw"
#define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw"
#define CRC_CCIT_MASK 0x1021
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "dvb_frontend.h"
#include "nxt200x.h"
struct nxt200x_state {
struct i2c_adapter* i2c;
const struct nxt200x_config* config;
struct dvb_frontend frontend;
/* demodulator private data */
nxt_chip_type demod_chip;
u8 initialised:1;
};
static int debug;
#define dprintk(args...) \
do { \
if (debug) printk(KERN_DEBUG "nxt200x: " args); \
} while (0)
static int i2c_writebytes (struct nxt200x_state* state, u8 addr, u8 *buf, u8 len)
{
int err;
struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = buf, .len = len };
if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) {
printk (KERN_WARNING "nxt200x: %s: i2c write error (addr 0x%02x, err == %i)\n",
__func__, addr, err);
return -EREMOTEIO;
}
return 0;
}
static int i2c_readbytes(struct nxt200x_state *state, u8 addr, u8 *buf, u8 len)
{
int err;
struct i2c_msg msg = { .addr = addr, .flags = I2C_M_RD, .buf = buf, .len = len };
if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) {
printk (KERN_WARNING "nxt200x: %s: i2c read error (addr 0x%02x, err == %i)\n",
__func__, addr, err);
return -EREMOTEIO;
}
return 0;
}
static int nxt200x_writebytes (struct nxt200x_state* state, u8 reg,
const u8 *buf, u8 len)
{
u8 buf2 [len+1];
int err;
struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf2, .len = len + 1 };
buf2[0] = reg;
memcpy(&buf2[1], buf, len);
if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) {
printk (KERN_WARNING "nxt200x: %s: i2c write error (addr 0x%02x, err == %i)\n",
__func__, state->config->demod_address, err);
return -EREMOTEIO;
}
return 0;
}
static int nxt200x_readbytes(struct nxt200x_state *state, u8 reg, u8 *buf, u8 len)
{
u8 reg2 [] = { reg };
struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = reg2, .len = 1 },
{ .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = buf, .len = len } };
int err;
if ((err = i2c_transfer (state->i2c, msg, 2)) != 2) {
printk (KERN_WARNING "nxt200x: %s: i2c read error (addr 0x%02x, err == %i)\n",
__func__, state->config->demod_address, err);
return -EREMOTEIO;
}
return 0;
}
static u16 nxt200x_crc(u16 crc, u8 c)
{
u8 i;
u16 input = (u16) c & 0xFF;
input<<=8;
for(i=0; i<8; i++) {
if((crc^input) & 0x8000)
crc=(crc<<1)^CRC_CCIT_MASK;
else
crc<<=1;
input<<=1;
}
return crc;
}
static int nxt200x_writereg_multibyte (struct nxt200x_state* state, u8 reg, u8* data, u8 len)
{
u8 attr, len2, buf;
dprintk("%s\n", __func__);
/* set mutli register register */
nxt200x_writebytes(state, 0x35, ®, 1);
/* send the actual data */
nxt200x_writebytes(state, 0x36, data, len);
switch (state->demod_chip) {
case NXT2002:
len2 = len;
buf = 0x02;
break;
case NXT2004:
/* probably not right, but gives correct values */
attr = 0x02;
if (reg & 0x80) {
attr = attr << 1;
if (reg & 0x04)
attr = attr >> 1;
}
/* set write bit */
len2 = ((attr << 4) | 0x10) | len;
buf = 0x80;
break;
default:
return -EINVAL;
break;
}
/* set multi register length */
nxt200x_writebytes(state, 0x34, &len2, 1);
/* toggle the multireg write bit */
nxt200x_writebytes(state, 0x21, &buf, 1);
nxt200x_readbytes(state, 0x21, &buf, 1);
switch (state->demod_chip) {
case NXT2002:
if ((buf & 0x02) == 0)
return 0;
break;
case NXT2004:
if (buf == 0)
return 0;
break;
default:
return -EINVAL;
break;
}
printk(KERN_WARNING "nxt200x: Error writing multireg register 0x%02X\n",reg);
return 0;
}
static int nxt200x_readreg_multibyte (struct nxt200x_state* state, u8 reg, u8* data, u8 len)
{
int i;
u8 buf, len2, attr;
dprintk("%s\n", __func__);
/* set mutli register register */
nxt200x_writebytes(state, 0x35, ®, 1);
switch (state->demod_chip) {
case NXT2002:
/* set multi register length */
len2 = len & 0x80;
nxt200x_writebytes(state, 0x34, &len2, 1);
/* read the actual data */
nxt200x_readbytes(state, reg, data, len);
return 0;
break;
case NXT2004:
/* probably not right, but gives correct values */
attr = 0x02;
if (reg & 0x80) {
attr = attr << 1;
if (reg & 0x04)
attr = attr >> 1;
}
/* set multi register length */
len2 = (attr << 4) | len;
nxt200x_writebytes(state, 0x34, &len2, 1);
/* toggle the multireg bit*/
buf = 0x80;
nxt200x_writebytes(state, 0x21, &buf, 1);
/* read the actual data */
for(i = 0; i < len; i++) {
nxt200x_readbytes(state, 0x36 + i, &data[i], 1);
}
return 0;
break;
default:
return -EINVAL;
break;
}
}
static void nxt200x_microcontroller_stop (struct nxt200x_state* state)
{
u8 buf, stopval, counter = 0;
dprintk("%s\n", __func__);
/* set correct stop value */
switch (state->demod_chip) {
case NXT2002:
stopval = 0x40;
break;
case NXT2004:
stopval = 0x10;
break;
default:
stopval = 0;
break;
}
buf = 0x80;
nxt200x_writebytes(state, 0x22, &buf, 1);
while (counter < 20) {
nxt200x_readbytes(state, 0x31, &buf, 1);
if (buf & stopval)
return;
msleep(10);
counter++;
}
printk(KERN_WARNING "nxt200x: Timeout waiting for nxt200x to stop. This is ok after firmware upload.\n");
return;
}
static void nxt200x_microcontroller_start (struct nxt200x_state* state)
{
u8 buf;
dprintk("%s\n", __func__);
buf = 0x00;
nxt200x_writebytes(state, 0x22, &buf, 1);
}
static void nxt2004_microcontroller_init (struct nxt200x_state* state)
{
u8 buf[9];
u8 counter = 0;
dprintk("%s\n", __func__);
buf[0] = 0x00;
nxt200x_writebytes(state, 0x2b, buf, 1);
buf[0] = 0x70;
nxt200x_writebytes(state, 0x34, buf, 1);
buf[0] = 0x04;
nxt200x_writebytes(state, 0x35, buf, 1);
buf[0] = 0x01; buf[1] = 0x23; buf[2] = 0x45; buf[3] = 0x67; buf[4] = 0x89;
buf[5] = 0xAB; buf[6] = 0xCD; buf[7] = 0xEF; buf[8] = 0xC0;
nxt200x_writebytes(state, 0x36, buf, 9);
buf[0] = 0x80;
nxt200x_writebytes(state, 0x21, buf, 1);
while (counter < 20) {
nxt200x_readbytes(state, 0x21, buf, 1);
if (buf[0] == 0)
return;
msleep(10);
counter++;
}
printk(KERN_WARNING "nxt200x: Timeout waiting for nxt2004 to init.\n");
return;
}
static int nxt200x_writetuner (struct nxt200x_state* state, u8* data)
{
u8 buf, count = 0;
dprintk("%s\n", __func__);
dprintk("Tuner Bytes: %02X %02X %02X %02X\n", data[1], data[2], data[3], data[4]);
/* if NXT2004, write directly to tuner. if NXT2002, write through NXT chip.
* direct write is required for Philips TUV1236D and ALPS TDHU2 */
switch (state->demod_chip) {
case NXT2004:
if (i2c_writebytes(state, data[0], data+1, 4))
printk(KERN_WARNING "nxt200x: error writing to tuner\n");
/* wait until we have a lock */
while (count < 20) {
i2c_readbytes(state, data[0], &buf, 1);
if (buf & 0x40)
return 0;
msleep(100);
count++;
}
printk("nxt2004: timeout waiting for tuner lock\n");
break;
case NXT2002:
/* set the i2c transfer speed to the tuner */
buf = 0x03;
nxt200x_writebytes(state, 0x20, &buf, 1);
/* setup to transfer 4 bytes via i2c */
buf = 0x04;
nxt200x_writebytes(state, 0x34, &buf, 1);
/* write actual tuner bytes */
nxt200x_writebytes(state, 0x36, data+1, 4);
/* set tuner i2c address */
buf = data[0] << 1;
nxt200x_writebytes(state, 0x35, &buf, 1);
/* write UC Opmode to begin transfer */
buf = 0x80;
nxt200x_writebytes(state, 0x21, &buf, 1);
while (count < 20) {
nxt200x_readbytes(state, 0x21, &buf, 1);
if ((buf & 0x80)== 0x00)
return 0;
msleep(100);
count++;
}
printk("nxt2002: timeout error writing tuner\n");
break;
default:
return -EINVAL;
break;
}
return 0;
}
static void nxt200x_agc_reset(struct nxt200x_state* state)
{
u8 buf;
dprintk("%s\n", __func__);
switch (state->demod_chip) {
case NXT2002:
buf = 0x08;
nxt200x_writebytes(state, 0x08, &buf, 1);
buf = 0x00;
nxt200x_writebytes(state, 0x08, &buf, 1);
break;
case NXT2004:
nxt200x_readreg_multibyte(state, 0x08, &buf, 1);
buf = 0x08;
nxt200x_writereg_multibyte(state, 0x08, &buf, 1);
buf = 0x00;
nxt200x_writereg_multibyte(state, 0x08, &buf, 1);
break;
default:
break;
}
return;
}
static int nxt2002_load_firmware (struct dvb_frontend* fe, const struct firmware *fw)
{
struct nxt200x_state* state = fe->demodulator_priv;
u8 buf[3], written = 0, chunkpos = 0;
u16 rambase, position, crc = 0;
dprintk("%s\n", __func__);
dprintk("Firmware is %zu bytes\n", fw->size);
/* Get the RAM base for this nxt2002 */
nxt200x_readbytes(state, 0x10, buf, 1);
if (buf[0] & 0x10)
rambase = 0x1000;
else
rambase = 0x0000;
dprintk("rambase on this nxt2002 is %04X\n", rambase);
/* Hold the micro in reset while loading firmware */
buf[0] = 0x80;
nxt200x_writebytes(state, 0x2B, buf, 1);
for (position = 0; position < fw->size; position++) {
if (written == 0) {
crc = 0;
chunkpos = 0x28;
buf[0] = ((rambase + position) >> 8);
buf[1] = (rambase + position) & 0xFF;
buf[2] = 0x81;
/* write starting address */
nxt200x_writebytes(state, 0x29, buf, 3);
}
written++;
chunkpos++;
if ((written % 4) == 0)
nxt200x_writebytes(state, chunkpos, &fw->data[position-3], 4);
crc = nxt200x_crc(crc, fw->data[position]);
if ((written == 255) || (position+1 == fw->size)) {
/* write remaining bytes of firmware */
nxt200x_writebytes(state, chunkpos+4-(written %4),
&fw->data[position-(written %4) + 1],
written %4);
buf[0] = crc << 8;
buf[1] = crc & 0xFF;
/* write crc */
nxt200x_writebytes(state, 0x2C, buf, 2);
/* do a read to stop things */
nxt200x_readbytes(state, 0x2A, buf, 1);
/* set transfer mode to complete */
buf[0] = 0x80;
nxt200x_writebytes(state, 0x2B, buf, 1);
written = 0;
}
}
return 0;
};
static int nxt2004_load_firmware (struct dvb_frontend* fe, const struct firmware *fw)
{
struct nxt200x_state* state = fe->demodulator_priv;
u8 buf[3];
u16 rambase, position, crc=0;
dprintk("%s\n", __func__);
dprintk("Firmware is %zu bytes\n", fw->size);
/* set rambase */
rambase = 0x1000;
/* hold the micro in reset while loading firmware */
buf[0] = 0x80;
nxt200x_writebytes(state, 0x2B, buf,1);
/* calculate firmware CRC */
for (position = 0; position < fw->size; position++) {
crc = nxt200x_crc(crc, fw->data[position]);
}
buf[0] = rambase >> 8;
buf[1] = rambase & 0xFF;
buf[2] = 0x81;
/* write starting address */
nxt200x_writebytes(state,0x29,buf,3);
for (position = 0; position < fw->size;) {
nxt200x_writebytes(state, 0x2C, &fw->data[position],
fw->size-position > 255 ? 255 : fw->size-position);
position += (fw->size-position > 255 ? 255 : fw->size-position);
}
buf[0] = crc >> 8;
buf[1] = crc & 0xFF;
dprintk("firmware crc is 0x%02X 0x%02X\n", buf[0], buf[1]);
/* write crc */
nxt200x_writebytes(state, 0x2C, buf,2);
/* do a read to stop things */
nxt200x_readbytes(state, 0x2C, buf, 1);
/* set transfer mode to complete */
buf[0] = 0x80;
nxt200x_writebytes(state, 0x2B, buf,1);
return 0;
};
static int nxt200x_setup_frontend_parameters (struct dvb_frontend* fe,
struct dvb_frontend_parameters *p)
{
struct nxt200x_state* state = fe->demodulator_priv;
u8 buf[5];
/* stop the micro first */
nxt200x_microcontroller_stop(state);
if (state->demod_chip == NXT2004) {
/* make sure demod is set to digital */
buf[0] = 0x04;
nxt200x_writebytes(state, 0x14, buf, 1);
buf[0] = 0x00;
nxt200x_writebytes(state, 0x17, buf, 1);
}
/* set additional params */
switch (p->u.vsb.modulation) {
case QAM_64:
case QAM_256:
/* Set punctured clock for QAM */
/* This is just a guess since I am unable to test it */
if (state->config->set_ts_params)
state->config->set_ts_params(fe, 1);
break;
case VSB_8:
/* Set non-punctured clock for VSB */
if (state->config->set_ts_params)
state->config->set_ts_params(fe, 0);
break;
default:
return -EINVAL;
break;
}
if (fe->ops.tuner_ops.calc_regs) {
/* get tuning information */
fe->ops.tuner_ops.calc_regs(fe, p, buf, 5);
/* write frequency information */
nxt200x_writetuner(state, buf);
}
/* reset the agc now that tuning has been completed */
nxt200x_agc_reset(state);
/* set target power level */
switch (p->u.vsb.modulation) {
case QAM_64:
case QAM_256:
buf[0] = 0x74;
break;
case VSB_8:
buf[0] = 0x70;
break;
default:
return -EINVAL;
break;
}
nxt200x_writebytes(state, 0x42, buf, 1);
/* configure sdm */
switch (state->demod_chip) {
case NXT2002:
buf[0] = 0x87;
break;
case NXT2004:
buf[0] = 0x07;
break;
default:
return -EINVAL;
break;
}
nxt200x_writebytes(state, 0x57, buf, 1);
/* write sdm1 input */
buf[0] = 0x10;
buf[1] = 0x00;
switch (state->demod_chip) {
case NXT2002:
nxt200x_writereg_multibyte(state, 0x58, buf, 2);
break;
case NXT2004:
nxt200x_writebytes(state, 0x58, buf, 2);
break;
default:
return -EINVAL;
break;
}
/* write sdmx input */
switch (p->u.vsb.modulation) {
case QAM_64:
buf[0] = 0x68;
break;
case QAM_256:
buf[0] = 0x64;
break;
case VSB_8:
buf[0] = 0x60;
break;
default:
return -EINVAL;
break;
}
buf[1] = 0x00;
switch (state->demod_chip) {
case NXT2002:
nxt200x_writereg_multibyte(state, 0x5C, buf, 2);
break;
case NXT2004:
nxt200x_writebytes(state, 0x5C, buf, 2);
break;
default:
return -EINVAL;
break;
}
/* write adc power lpf fc */
buf[0] = 0x05;
nxt200x_writebytes(state, 0x43, buf, 1);
if (state->demod_chip == NXT2004) {
/* write ??? */
buf[0] = 0x00;
buf[1] = 0x00;
nxt200x_writebytes(state, 0x46, buf, 2);
}
/* write accumulator2 input */
buf[0] = 0x80;
buf[1] = 0x00;
switch (state->demod_chip) {
case NXT2002:
nxt200x_writereg_multibyte(state, 0x4B, buf, 2);
break;
case NXT2004:
nxt200x_writebytes(state, 0x4B, buf, 2);
break;
default:
return -EINVAL;
break;
}
/* write kg1 */
buf[0] = 0x00;
nxt200x_writebytes(state, 0x4D, buf, 1);
/* write sdm12 lpf fc */
buf[0] = 0x44;
nxt200x_writebytes(state, 0x55, buf, 1);
/* write agc control reg */
buf[0] = 0x04;
nxt200x_writebytes(state, 0x41, buf, 1);
if (state->demod_chip == NXT2004) {
nxt200x_readreg_multibyte(state, 0x80, buf, 1);
buf[0] = 0x24;
nxt200x_writereg_multibyte(state, 0x80, buf, 1);
/* soft reset? */
nxt200x_readreg_multibyte(state, 0x08, buf, 1);
buf[0] = 0x10;
nxt200x_writereg_multibyte(state, 0x08, buf, 1);
nxt200x_readreg_multibyte(state, 0x08, buf, 1);
buf[0] = 0x00;
nxt200x_writereg_multibyte(state, 0x08, buf, 1);
nxt200x_readreg_multibyte(state, 0x80, buf, 1);
buf[0] = 0x04;
nxt200x_writereg_multibyte(state, 0x80, buf, 1);
buf[0] = 0x00;
nxt200x_writereg_multibyte(state, 0x81, buf, 1);
buf[0] = 0x80; buf[1] = 0x00; buf[2] = 0x00;
nxt200x_writereg_multibyte(state, 0x82, buf, 3);
nxt200x_readreg_multibyte(state, 0x88, buf, 1);
buf[0] = 0x11;
nxt200x_writereg_multibyte(state, 0x88, buf, 1);
nxt200x_readreg_multibyte(state, 0x80, buf, 1);
buf[0] = 0x44;
nxt200x_writereg_multibyte(state, 0x80, buf, 1);
}
/* write agc ucgp0 */
switch (p->u.vsb.modulation) {
case QAM_64:
buf[0] = 0x02;
break;
case QAM_256:
buf[0] = 0x03;
break;
case VSB_8:
buf[0] = 0x00;
break;
default:
return -EINVAL;
break;
}
nxt200x_writebytes(state, 0x30, buf, 1);
/* write agc control reg */
buf[0] = 0x00;
nxt200x_writebytes(state, 0x41, buf, 1);
/* write accumulator2 input */
buf[0] = 0x80;
buf[1] = 0x00;
switch (state->demod_chip) {
case NXT2002:
nxt200x_writereg_multibyte(state, 0x49, buf, 2);
nxt200x_writereg_multibyte(state, 0x4B, buf, 2);
break;
case NXT2004:
nxt200x_writebytes(state, 0x49, buf, 2);
nxt200x_writebytes(state, 0x4B, buf, 2);
break;
default:
return -EINVAL;
break;
}
/* write agc control reg */
buf[0] = 0x04;
nxt200x_writebytes(state, 0x41, buf, 1);
nxt200x_microcontroller_start(state);
if (state->demod_chip == NXT2004) {
nxt2004_microcontroller_init(state);
/* ???? */
buf[0] = 0xF0;
buf[1] = 0x00;
nxt200x_writebytes(state, 0x5C, buf, 2);
}
/* adjacent channel detection should be done here, but I don't
have any stations with this need so I cannot test it */
return 0;
}
static int nxt200x_read_status(struct dvb_frontend* fe, fe_status_t* status)
{
struct nxt200x_state* state = fe->demodulator_priv;
u8 lock;
nxt200x_readbytes(state, 0x31, &lock, 1);
*status = 0;
if (lock & 0x20) {
*status |= FE_HAS_SIGNAL;
*status |= FE_HAS_CARRIER;
*status |= FE_HAS_VITERBI;
*status |= FE_HAS_SYNC;
*status |= FE_HAS_LOCK;
}
return 0;
}
static int nxt200x_read_ber(struct dvb_frontend* fe, u32* ber)
{
struct nxt200x_state* state = fe->demodulator_priv;
u8 b[3];
nxt200x_readreg_multibyte(state, 0xE6, b, 3);
*ber = ((b[0] << 8) + b[1]) * 8;
return 0;
}
static int nxt200x_read_signal_strength(struct dvb_frontend* fe, u16* strength)
{
struct nxt200x_state* state = fe->demodulator_priv;
u8 b[2];
u16 temp = 0;
/* setup to read cluster variance */
b[0] = 0x00;
nxt200x_writebytes(state, 0xA1, b, 1);
/* get multreg val */
nxt200x_readreg_multibyte(state, 0xA6, b, 2);
temp = (b[0] << 8) | b[1];
*strength = ((0x7FFF - temp) & 0x0FFF) * 16;
return 0;
}
static int nxt200x_read_snr(struct dvb_frontend* fe, u16* snr)
{
struct nxt200x_state* state = fe->demodulator_priv;
u8 b[2];
u16 temp = 0, temp2;
u32 snrdb = 0;
/* setup to read cluster variance */
b[0] = 0x00;
nxt200x_writebytes(state, 0xA1, b, 1);
/* get multreg val from 0xA6 */
nxt200x_readreg_multibyte(state, 0xA6, b, 2);
temp = (b[0] << 8) | b[1];
temp2 = 0x7FFF - temp;
/* snr will be in db */
if (temp2 > 0x7F00)
snrdb = 1000*24 + ( 1000*(30-24) * ( temp2 - 0x7F00 ) / ( 0x7FFF - 0x7F00 ) );
else if (temp2 > 0x7EC0)
snrdb = 1000*18 + ( 1000*(24-18) * ( temp2 - 0x7EC0 ) / ( 0x7F00 - 0x7EC0 ) );
else if (temp2 > 0x7C00)
snrdb = 1000*12 + ( 1000*(18-12) * ( temp2 - 0x7C00 ) / ( 0x7EC0 - 0x7C00 ) );
else
snrdb = 1000*0 + ( 1000*(12-0) * ( temp2 - 0 ) / ( 0x7C00 - 0 ) );
/* the value reported back from the frontend will be FFFF=32db 0000=0db */
*snr = snrdb * (0xFFFF/32000);
return 0;
}
static int nxt200x_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
{
struct nxt200x_state* state = fe->demodulator_priv;
u8 b[3];
nxt200x_readreg_multibyte(state, 0xE6, b, 3);
*ucblocks = b[2];
return 0;
}
static int nxt200x_sleep(struct dvb_frontend* fe)
{
return 0;
}
static int nxt2002_init(struct dvb_frontend* fe)
{
struct nxt200x_state* state = fe->demodulator_priv;
const struct firmware *fw;
int ret;
u8 buf[2];
/* request the firmware, this will block until someone uploads it */
printk("nxt2002: Waiting for firmware upload (%s)...\n", NXT2002_DEFAULT_FIRMWARE);
ret = request_firmware(&fw, NXT2002_DEFAULT_FIRMWARE,
state->i2c->dev.parent);
printk("nxt2002: Waiting for firmware upload(2)...\n");
if (ret) {
printk("nxt2002: No firmware uploaded (timeout or file not found?)\n");
return ret;
}
ret = nxt2002_load_firmware(fe, fw);
release_firmware(fw);
if (ret) {
printk("nxt2002: Writing firmware to device failed\n");
return ret;
}
printk("nxt2002: Firmware upload complete\n");
/* Put the micro into reset */
nxt200x_microcontroller_stop(state);
/* ensure transfer is complete */
buf[0]=0x00;
nxt200x_writebytes(state, 0x2B, buf, 1);
/* Put the micro into reset for real this time */
nxt200x_microcontroller_stop(state);
/* soft reset everything (agc,frontend,eq,fec)*/
buf[0] = 0x0F;
nxt200x_writebytes(state, 0x08, buf, 1);
buf[0] = 0x00;
nxt200x_writebytes(state, 0x08, buf, 1);
/* write agc sdm configure */
buf[0] = 0xF1;
nxt200x_writebytes(state, 0x57, buf, 1);
/* write mod output format */
buf[0] = 0x20;
nxt200x_writebytes(state, 0x09, buf, 1);
/* write fec mpeg mode */
buf[0] = 0x7E;
buf[1] = 0x00;
nxt200x_writebytes(state, 0xE9, buf, 2);
/* write mux selection */
buf[0] = 0x00;
nxt200x_writebytes(state, 0xCC, buf, 1);
return 0;
}
static int nxt2004_init(struct dvb_frontend* fe)
{
struct nxt200x_state* state = fe->demodulator_priv;
const struct firmware *fw;
int ret;
u8 buf[3];
/* ??? */
buf[0]=0x00;
nxt200x_writebytes(state, 0x1E, buf, 1);
/* request the firmware, this will block until someone uploads it */
printk("nxt2004: Waiting for firmware upload (%s)...\n", NXT2004_DEFAULT_FIRMWARE);
ret = request_firmware(&fw, NXT2004_DEFAULT_FIRMWARE,
state->i2c->dev.parent);
printk("nxt2004: Waiting for firmware upload(2)...\n");
if (ret) {
printk("nxt2004: No firmware uploaded (timeout or file not found?)\n");
return ret;
}
ret = nxt2004_load_firmware(fe, fw);
release_firmware(fw);
if (ret) {
printk("nxt2004: Writing firmware to device failed\n");
return ret;
}
printk("nxt2004: Firmware upload complete\n");
/* ensure transfer is complete */
buf[0] = 0x01;
nxt200x_writebytes(state, 0x19, buf, 1);
nxt2004_microcontroller_init(state);
nxt200x_microcontroller_stop(state);
nxt200x_microcontroller_stop(state);
nxt2004_microcontroller_init(state);
nxt200x_microcontroller_stop(state);
/* soft reset everything (agc,frontend,eq,fec)*/
buf[0] = 0xFF;
nxt200x_writereg_multibyte(state, 0x08, buf, 1);
buf[0] = 0x00;
nxt200x_writereg_multibyte(state, 0x08, buf, 1);
/* write agc sdm configure */
buf[0] = 0xD7;
nxt200x_writebytes(state, 0x57, buf, 1);
/* ???*/
buf[0] = 0x07;
buf[1] = 0xfe;
nxt200x_writebytes(state, 0x35, buf, 2);
buf[0] = 0x12;
nxt200x_writebytes(state, 0x34, buf, 1);
buf[0] = 0x80;
nxt200x_writebytes(state, 0x21, buf, 1);
/* ???*/
buf[0] = 0x21;
nxt200x_writebytes(state, 0x0A, buf, 1);
/* ???*/
buf[0] = 0x01;
nxt200x_writereg_multibyte(state, 0x80, buf, 1);
/* write fec mpeg mode */
buf[0] = 0x7E;
buf[1] = 0x00;
nxt200x_writebytes(state, 0xE9, buf, 2);
/* write mux selection */
buf[0] = 0x00;
nxt200x_writebytes(state, 0xCC, buf, 1);
/* ???*/
nxt200x_readreg_multibyte(state, 0x80, buf, 1);
buf[0] = 0x00;
nxt200x_writereg_multibyte(state, 0x80, buf, 1);
/* soft reset? */
nxt200x_readreg_multibyte(state, 0x08, buf, 1);
buf[0] = 0x10;
nxt200x_writereg_multibyte(state, 0x08, buf, 1);
nxt200x_readreg_multibyte(state, 0x08, buf, 1);
buf[0] = 0x00;
nxt200x_writereg_multibyte(state, 0x08, buf, 1);
/* ???*/
nxt200x_readreg_multibyte(state, 0x80, buf, 1);
buf[0] = 0x01;
nxt200x_writereg_multibyte(state, 0x80, buf, 1);
buf[0] = 0x70;
nxt200x_writereg_multibyte(state, 0x81, buf, 1);
buf[0] = 0x31; buf[1] = 0x5E; buf[2] = 0x66;
nxt200x_writereg_multibyte(state, 0x82, buf, 3);
nxt200x_readreg_multibyte(state, 0x88, buf, 1);
buf[0] = 0x11;
nxt200x_writereg_multibyte(state, 0x88, buf, 1);
nxt200x_readreg_multibyte(state, 0x80, buf, 1);
buf[0] = 0x40;
nxt200x_writereg_multibyte(state, 0x80, buf, 1);
nxt200x_readbytes(state, 0x10, buf, 1);
buf[0] = 0x10;
nxt200x_writebytes(state, 0x10, buf, 1);
nxt200x_readbytes(state, 0x0A, buf, 1);
buf[0] = 0x21;
nxt200x_writebytes(state, 0x0A, buf, 1);
nxt2004_microcontroller_init(state);
buf[0] = 0x21;
nxt200x_writebytes(state, 0x0A, buf, 1);
buf[0] = 0x7E;
nxt200x_writebytes(state, 0xE9, buf, 1);
buf[0] = 0x00;
nxt200x_writebytes(state, 0xEA, buf, 1);
nxt200x_readreg_multibyte(state, 0x80, buf, 1);
buf[0] = 0x00;
nxt200x_writereg_multibyte(state, 0x80, buf, 1);
nxt200x_readreg_multibyte(state, 0x80, buf, 1);
buf[0] = 0x00;
nxt200x_writereg_multibyte(state, 0x80, buf, 1);
/* soft reset? */
nxt200x_readreg_multibyte(state, 0x08, buf, 1);
buf[0] = 0x10;
nxt200x_writereg_multibyte(state, 0x08, buf, 1);
nxt200x_readreg_multibyte(state, 0x08, buf, 1);
buf[0] = 0x00;
nxt200x_writereg_multibyte(state, 0x08, buf, 1);
nxt200x_readreg_multibyte(state, 0x80, buf, 1);
buf[0] = 0x04;
nxt200x_writereg_multibyte(state, 0x80, buf, 1);
buf[0] = 0x00;
nxt200x_writereg_multibyte(state, 0x81, buf, 1);
buf[0] = 0x80; buf[1] = 0x00; buf[2] = 0x00;
nxt200x_writereg_multibyte(state, 0x82, buf, 3);
nxt200x_readreg_multibyte(state, 0x88, buf, 1);
buf[0] = 0x11;
nxt200x_writereg_multibyte(state, 0x88, buf, 1);
nxt200x_readreg_multibyte(state, 0x80, buf, 1);
buf[0] = 0x44;
nxt200x_writereg_multibyte(state, 0x80, buf, 1);
/* initialize tuner */
nxt200x_readbytes(state, 0x10, buf, 1);
buf[0] = 0x12;
nxt200x_writebytes(state, 0x10, buf, 1);
buf[0] = 0x04;
nxt200x_writebytes(state, 0x13, buf, 1);
buf[0] = 0x00;
nxt200x_writebytes(state, 0x16, buf, 1);
buf[0] = 0x04;
nxt200x_writebytes(state, 0x14, buf, 1);
buf[0] = 0x00;
nxt200x_writebytes(state, 0x14, buf, 1);
nxt200x_writebytes(state, 0x17, buf, 1);
nxt200x_writebytes(state, 0x14, buf, 1);
nxt200x_writebytes(state, 0x17, buf, 1);
return 0;
}
static int nxt200x_init(struct dvb_frontend* fe)
{
struct nxt200x_state* state = fe->demodulator_priv;
int ret = 0;
if (!state->initialised) {
switch (state->demod_chip) {
case NXT2002:
ret = nxt2002_init(fe);
break;
case NXT2004:
ret = nxt2004_init(fe);
break;
default:
return -EINVAL;
break;
}
state->initialised = 1;
}
return ret;
}
static int nxt200x_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings)
{
fesettings->min_delay_ms = 500;
fesettings->step_size = 0;
fesettings->max_drift = 0;
return 0;
}
static void nxt200x_release(struct dvb_frontend* fe)
{
struct nxt200x_state* state = fe->demodulator_priv;
kfree(state);
}
static struct dvb_frontend_ops nxt200x_ops;
struct dvb_frontend* nxt200x_attach(const struct nxt200x_config* config,
struct i2c_adapter* i2c)
{
struct nxt200x_state* state = NULL;
u8 buf [] = {0,0,0,0,0};
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct nxt200x_state), GFP_KERNEL);
if (state == NULL)
goto error;
/* setup the state */
state->config = config;
state->i2c = i2c;
state->initialised = 0;
/* read card id */
nxt200x_readbytes(state, 0x00, buf, 5);
dprintk("NXT info: %02X %02X %02X %02X %02X\n",
buf[0], buf[1], buf[2], buf[3], buf[4]);
/* set demod chip */
switch (buf[0]) {
case 0x04:
state->demod_chip = NXT2002;
printk("nxt200x: NXT2002 Detected\n");
break;
case 0x05:
state->demod_chip = NXT2004;
printk("nxt200x: NXT2004 Detected\n");
break;
default:
goto error;
}
/* make sure demod chip is supported */
switch (state->demod_chip) {
case NXT2002:
if (buf[0] != 0x04) goto error; /* device id */
if (buf[1] != 0x02) goto error; /* fab id */
if (buf[2] != 0x11) goto error; /* month */
if (buf[3] != 0x20) goto error; /* year msb */
if (buf[4] != 0x00) goto error; /* year lsb */
break;
case NXT2004:
if (buf[0] != 0x05) goto error; /* device id */
break;
default:
goto error;
}
/* create dvb_frontend */
memcpy(&state->frontend.ops, &nxt200x_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
error:
kfree(state);
printk("Unknown/Unsupported NXT chip: %02X %02X %02X %02X %02X\n",
buf[0], buf[1], buf[2], buf[3], buf[4]);
return NULL;
}
static struct dvb_frontend_ops nxt200x_ops = {
.info = {
.name = "Nextwave NXT200X VSB/QAM frontend",
.type = FE_ATSC,
.frequency_min = 54000000,
.frequency_max = 860000000,
.frequency_stepsize = 166666, /* stepsize is just a guess */
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_8VSB | FE_CAN_QAM_64 | FE_CAN_QAM_256
},
.release = nxt200x_release,
.init = nxt200x_init,
.sleep = nxt200x_sleep,
.set_frontend = nxt200x_setup_frontend_parameters,
.get_tune_settings = nxt200x_get_tune_settings,
.read_status = nxt200x_read_status,
.read_ber = nxt200x_read_ber,
.read_signal_strength = nxt200x_read_signal_strength,
.read_snr = nxt200x_read_snr,
.read_ucblocks = nxt200x_read_ucblocks,
};
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("NXT200X (ATSC 8VSB & ITU-T J.83 AnnexB 64/256 QAM) Demodulator Driver");
MODULE_AUTHOR("Kirk Lapray, Michael Krufky, Jean-Francois Thibert, and Taylor Jacob");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(nxt200x_attach);
| gpl-2.0 |
boa19861105/android_443_KitKat_kernel_htc_dlxub1 | drivers/gpu/drm/gma500/mdfld_intel_display.c | 5257 | 32165 | /*
* Copyright © 2006-2007 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include "psb_intel_reg.h"
#include "psb_intel_display.h"
#include "framebuffer.h"
#include "mdfld_output.h"
#include "mdfld_dsi_output.h"
/* Hardcoded currently */
static int ksel = KSEL_CRYSTAL_19;
struct psb_intel_range_t {
int min, max;
};
struct mrst_limit_t {
struct psb_intel_range_t dot, m, p1;
};
struct mrst_clock_t {
/* derived values */
int dot;
int m;
int p1;
};
#define COUNT_MAX 0x10000000
void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
{
int count, temp;
u32 pipeconf_reg = PIPEACONF;
switch (pipe) {
case 0:
break;
case 1:
pipeconf_reg = PIPEBCONF;
break;
case 2:
pipeconf_reg = PIPECCONF;
break;
default:
DRM_ERROR("Illegal Pipe Number.\n");
return;
}
/* FIXME JLIU7_PO */
psb_intel_wait_for_vblank(dev);
return;
/* Wait for for the pipe disable to take effect. */
for (count = 0; count < COUNT_MAX; count++) {
temp = REG_READ(pipeconf_reg);
if ((temp & PIPEACONF_PIPE_STATE) == 0)
break;
}
}
void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
{
int count, temp;
u32 pipeconf_reg = PIPEACONF;
switch (pipe) {
case 0:
break;
case 1:
pipeconf_reg = PIPEBCONF;
break;
case 2:
pipeconf_reg = PIPECCONF;
break;
default:
DRM_ERROR("Illegal Pipe Number.\n");
return;
}
/* FIXME JLIU7_PO */
psb_intel_wait_for_vblank(dev);
return;
/* Wait for for the pipe enable to take effect. */
for (count = 0; count < COUNT_MAX; count++) {
temp = REG_READ(pipeconf_reg);
if ((temp & PIPEACONF_PIPE_STATE) == 1)
break;
}
}
static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void psb_intel_crtc_commit(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
}
static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
/**
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
{
u32 pfit_control;
pfit_control = REG_READ(PFIT_CONTROL);
/* See if the panel fitter is in use */
if ((pfit_control & PFIT_ENABLE) == 0)
return -1;
/* 965 can place panel fitter on either pipe */
return (pfit_control >> 29) & 0x3;
}
static struct drm_device globle_dev;
void mdfld__intel_plane_set_alpha(int enable)
{
struct drm_device *dev = &globle_dev;
int dspcntr_reg = DSPACNTR;
u32 dspcntr;
dspcntr = REG_READ(dspcntr_reg);
if (enable) {
dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA;
dspcntr |= DISPPLANE_32BPP;
} else {
dspcntr &= ~DISPPLANE_32BPP;
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
}
REG_WRITE(dspcntr_reg, dspcntr);
}
static int check_fb(struct drm_framebuffer *fb)
{
if (!fb)
return 0;
switch (fb->bits_per_pixel) {
case 8:
case 16:
case 24:
case 32:
return 0;
default:
DRM_ERROR("Unknown color depth\n");
return -EINVAL;
}
}
static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
/* struct drm_i915_master_private *master_priv; */
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
unsigned long start, offset;
int dsplinoff = DSPALINOFF;
int dspsurf = DSPASURF;
int dspstride = DSPASTRIDE;
int dspcntr_reg = DSPACNTR;
u32 dspcntr;
int ret;
memcpy(&globle_dev, dev, sizeof(struct drm_device));
dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
/* no fb bound */
if (!crtc->fb) {
dev_dbg(dev->dev, "No FB bound\n");
return 0;
}
ret = check_fb(crtc->fb);
if (ret)
return ret;
switch (pipe) {
case 0:
dsplinoff = DSPALINOFF;
break;
case 1:
dsplinoff = DSPBLINOFF;
dspsurf = DSPBSURF;
dspstride = DSPBSTRIDE;
dspcntr_reg = DSPBCNTR;
break;
case 2:
dsplinoff = DSPCLINOFF;
dspsurf = DSPCSURF;
dspstride = DSPCSTRIDE;
dspcntr_reg = DSPCCNTR;
break;
default:
DRM_ERROR("Illegal Pipe Number.\n");
return -EINVAL;
}
if (!gma_power_begin(dev, true))
return 0;
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
REG_WRITE(dspstride, crtc->fb->pitches[0]);
dspcntr = REG_READ(dspcntr_reg);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
if (crtc->fb->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
break;
case 24:
case 32:
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
}
REG_WRITE(dspcntr_reg, dspcntr);
dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
start, offset, x, y);
REG_WRITE(dsplinoff, offset);
REG_READ(dsplinoff);
REG_WRITE(dspsurf, start);
REG_READ(dspsurf);
gma_power_end(dev);
return 0;
}
/*
* Disable the pipe, plane and pll.
*
*/
void mdfld_disable_crtc(struct drm_device *dev, int pipe)
{
int dpll_reg = MRST_DPLL_A;
int dspcntr_reg = DSPACNTR;
int dspbase_reg = MRST_DSPABASE;
int pipeconf_reg = PIPEACONF;
u32 temp;
dev_dbg(dev->dev, "pipe = %d\n", pipe);
switch (pipe) {
case 0:
break;
case 1:
dpll_reg = MDFLD_DPLL_B;
dspcntr_reg = DSPBCNTR;
dspbase_reg = DSPBSURF;
pipeconf_reg = PIPEBCONF;
break;
case 2:
dpll_reg = MRST_DPLL_A;
dspcntr_reg = DSPCCNTR;
dspbase_reg = MDFLD_DSPCBASE;
pipeconf_reg = PIPECCONF;
break;
default:
DRM_ERROR("Illegal Pipe Number.\n");
return;
}
if (pipe != 1)
mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe),
HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
/* Disable display plane */
temp = REG_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
REG_WRITE(dspcntr_reg,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
REG_READ(dspbase_reg);
}
/* FIXME_JLIU7 MDFLD_PO revisit */
/* Next, disable display pipes */
temp = REG_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
temp &= ~PIPEACONF_ENABLE;
temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
REG_WRITE(pipeconf_reg, temp);
REG_READ(pipeconf_reg);
/* Wait for for the pipe disable to take effect. */
mdfldWaitForPipeDisable(dev, pipe);
}
temp = REG_READ(dpll_reg);
if (temp & DPLL_VCO_ENABLE) {
if ((pipe != 1 &&
!((REG_READ(PIPEACONF) | REG_READ(PIPECCONF))
& PIPEACONF_ENABLE)) || pipe == 1) {
temp &= ~(DPLL_VCO_ENABLE);
REG_WRITE(dpll_reg, temp);
REG_READ(dpll_reg);
/* Wait for the clocks to turn off. */
/* FIXME_MDFLD PO may need more delay */
udelay(500);
if (!(temp & MDFLD_PWR_GATE_EN)) {
/* gating power of DPLL */
REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(5000);
}
}
}
}
/**
* Sets the power management mode of the pipe and plane.
*
* This code should probably grow support for turning the cursor off and back
* on appropriately at the same time as we're turning the pipe off/on.
*/
static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
int dpll_reg = MRST_DPLL_A;
int dspcntr_reg = DSPACNTR;
int dspbase_reg = MRST_DSPABASE;
int pipeconf_reg = PIPEACONF;
u32 pipestat_reg = PIPEASTAT;
u32 pipeconf = dev_priv->pipeconf[pipe];
u32 temp;
int timeout = 0;
dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe);
/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */
/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */
switch (pipe) {
case 0:
break;
case 1:
dpll_reg = DPLL_B;
dspcntr_reg = DSPBCNTR;
dspbase_reg = MRST_DSPBBASE;
pipeconf_reg = PIPEBCONF;
dpll_reg = MDFLD_DPLL_B;
break;
case 2:
dpll_reg = MRST_DPLL_A;
dspcntr_reg = DSPCCNTR;
dspbase_reg = MDFLD_DSPCBASE;
pipeconf_reg = PIPECCONF;
pipestat_reg = PIPECSTAT;
break;
default:
DRM_ERROR("Illegal Pipe Number.\n");
return;
}
if (!gma_power_begin(dev, true))
return;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
*/
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
temp = REG_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) == 0) {
/* When ungating power of DPLL, needs to wait 0.5us
before enable the VCO */
if (temp & MDFLD_PWR_GATE_EN) {
temp &= ~MDFLD_PWR_GATE_EN;
REG_WRITE(dpll_reg, temp);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
REG_WRITE(dpll_reg, temp);
REG_READ(dpll_reg);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
REG_READ(dpll_reg);
/**
* wait for DSI PLL to lock
* NOTE: only need to poll status of pipe 0 and pipe 1,
* since both MIPI pipes share the same PLL.
*/
while ((pipe != 2) && (timeout < 20000) &&
!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
udelay(150);
timeout++;
}
}
/* Enable the plane */
temp = REG_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
REG_WRITE(dspcntr_reg,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
}
/* Enable the pipe */
temp = REG_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) == 0) {
REG_WRITE(pipeconf_reg, pipeconf);
/* Wait for for the pipe enable to take effect. */
mdfldWaitForPipeEnable(dev, pipe);
}
/*workaround for sighting 3741701 Random X blank display*/
/*perform w/a in video mode only on pipe A or C*/
if (pipe == 0 || pipe == 2) {
REG_WRITE(pipestat_reg, REG_READ(pipestat_reg));
msleep(100);
if (PIPE_VBLANK_STATUS & REG_READ(pipestat_reg))
dev_dbg(dev->dev, "OK");
else {
dev_dbg(dev->dev, "STUCK!!!!");
/*shutdown controller*/
temp = REG_READ(dspcntr_reg);
REG_WRITE(dspcntr_reg,
temp & ~DISPLAY_PLANE_ENABLE);
REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
/*mdfld_dsi_dpi_shut_down(dev, pipe);*/
REG_WRITE(0xb048, 1);
msleep(100);
temp = REG_READ(pipeconf_reg);
temp &= ~PIPEACONF_ENABLE;
REG_WRITE(pipeconf_reg, temp);
msleep(100); /*wait for pipe disable*/
REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0);
msleep(100);
REG_WRITE(0xb004, REG_READ(0xb004));
/* try to bring the controller back up again*/
REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1);
temp = REG_READ(dspcntr_reg);
REG_WRITE(dspcntr_reg,
temp | DISPLAY_PLANE_ENABLE);
REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
/*mdfld_dsi_dpi_turn_on(dev, pipe);*/
REG_WRITE(0xb048, 2);
msleep(100);
temp = REG_READ(pipeconf_reg);
temp |= PIPEACONF_ENABLE;
REG_WRITE(pipeconf_reg, temp);
}
}
psb_intel_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, true); TODO */
break;
case DRM_MODE_DPMS_OFF:
/* Give the overlay scaler a chance to disable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
if (pipe != 1)
mdfld_dsi_gen_fifo_ready(dev,
MIPI_GEN_FIFO_STAT_REG(pipe),
HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
/* Disable the VGA plane that we never use */
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
temp = REG_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
REG_WRITE(dspcntr_reg,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
REG_READ(dspbase_reg);
}
/* Next, disable display pipes */
temp = REG_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
temp &= ~PIPEACONF_ENABLE;
temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
REG_WRITE(pipeconf_reg, temp);
REG_READ(pipeconf_reg);
/* Wait for for the pipe disable to take effect. */
mdfldWaitForPipeDisable(dev, pipe);
}
temp = REG_READ(dpll_reg);
if (temp & DPLL_VCO_ENABLE) {
if ((pipe != 1 && !((REG_READ(PIPEACONF)
| REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
|| pipe == 1) {
temp &= ~(DPLL_VCO_ENABLE);
REG_WRITE(dpll_reg, temp);
REG_READ(dpll_reg);
/* Wait for the clocks to turn off. */
/* FIXME_MDFLD PO may need more delay */
udelay(500);
}
}
break;
}
gma_power_end(dev);
}
#define MDFLD_LIMT_DPLL_19 0
#define MDFLD_LIMT_DPLL_25 1
#define MDFLD_LIMT_DPLL_83 2
#define MDFLD_LIMT_DPLL_100 3
#define MDFLD_LIMT_DSIPLL_19 4
#define MDFLD_LIMT_DSIPLL_25 5
#define MDFLD_LIMT_DSIPLL_83 6
#define MDFLD_LIMT_DSIPLL_100 7
#define MDFLD_DOT_MIN 19750
#define MDFLD_DOT_MAX 120000
#define MDFLD_DPLL_M_MIN_19 113
#define MDFLD_DPLL_M_MAX_19 155
#define MDFLD_DPLL_P1_MIN_19 2
#define MDFLD_DPLL_P1_MAX_19 10
#define MDFLD_DPLL_M_MIN_25 101
#define MDFLD_DPLL_M_MAX_25 130
#define MDFLD_DPLL_P1_MIN_25 2
#define MDFLD_DPLL_P1_MAX_25 10
#define MDFLD_DPLL_M_MIN_83 64
#define MDFLD_DPLL_M_MAX_83 64
#define MDFLD_DPLL_P1_MIN_83 2
#define MDFLD_DPLL_P1_MAX_83 2
#define MDFLD_DPLL_M_MIN_100 64
#define MDFLD_DPLL_M_MAX_100 64
#define MDFLD_DPLL_P1_MIN_100 2
#define MDFLD_DPLL_P1_MAX_100 2
#define MDFLD_DSIPLL_M_MIN_19 131
#define MDFLD_DSIPLL_M_MAX_19 175
#define MDFLD_DSIPLL_P1_MIN_19 3
#define MDFLD_DSIPLL_P1_MAX_19 8
#define MDFLD_DSIPLL_M_MIN_25 97
#define MDFLD_DSIPLL_M_MAX_25 140
#define MDFLD_DSIPLL_P1_MIN_25 3
#define MDFLD_DSIPLL_P1_MAX_25 9
#define MDFLD_DSIPLL_M_MIN_83 33
#define MDFLD_DSIPLL_M_MAX_83 92
#define MDFLD_DSIPLL_P1_MIN_83 2
#define MDFLD_DSIPLL_P1_MAX_83 3
#define MDFLD_DSIPLL_M_MIN_100 97
#define MDFLD_DSIPLL_M_MAX_100 140
#define MDFLD_DSIPLL_P1_MIN_100 3
#define MDFLD_DSIPLL_P1_MAX_100 9
static const struct mrst_limit_t mdfld_limits[] = {
{ /* MDFLD_LIMT_DPLL_19 */
.dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
.m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19},
.p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19},
},
{ /* MDFLD_LIMT_DPLL_25 */
.dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
.m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25},
.p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25},
},
{ /* MDFLD_LIMT_DPLL_83 */
.dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
.m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83},
.p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83},
},
{ /* MDFLD_LIMT_DPLL_100 */
.dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
.m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100},
.p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100},
},
{ /* MDFLD_LIMT_DSIPLL_19 */
.dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
.m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19},
.p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19},
},
{ /* MDFLD_LIMT_DSIPLL_25 */
.dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
.m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25},
.p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25},
},
{ /* MDFLD_LIMT_DSIPLL_83 */
.dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
.m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83},
.p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83},
},
{ /* MDFLD_LIMT_DSIPLL_100 */
.dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
.m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100},
.p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100},
},
};
#define MDFLD_M_MIN 21
#define MDFLD_M_MAX 180
static const u32 mdfld_m_converts[] = {
/* M configuration table from 9-bit LFSR table */
224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
173, 342, 171, 85, 298, 149, 74, 37, 18, 265, /* 31 - 40 */
388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
};
static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
{
const struct mrst_limit_t *limit = NULL;
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
|| psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
else if (ksel == KSEL_BYPASS_25)
limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25];
else if ((ksel == KSEL_BYPASS_83_100) &&
(dev_priv->core_freq == 166))
limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83];
else if ((ksel == KSEL_BYPASS_83_100) &&
(dev_priv->core_freq == 100 ||
dev_priv->core_freq == 200))
limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
} else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
else if (ksel == KSEL_BYPASS_25)
limit = &mdfld_limits[MDFLD_LIMT_DPLL_25];
else if ((ksel == KSEL_BYPASS_83_100) &&
(dev_priv->core_freq == 166))
limit = &mdfld_limits[MDFLD_LIMT_DPLL_83];
else if ((ksel == KSEL_BYPASS_83_100) &&
(dev_priv->core_freq == 100 ||
dev_priv->core_freq == 200))
limit = &mdfld_limits[MDFLD_LIMT_DPLL_100];
} else {
limit = NULL;
dev_dbg(dev->dev, "mdfld_limit Wrong display type.\n");
}
return limit;
}
/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
static void mdfld_clock(int refclk, struct mrst_clock_t *clock)
{
clock->dot = (refclk * clock->m) / clock->p1;
}
/**
* Returns a set of divisors for the desired target clock with the given refclk,
* or FALSE. Divisor values are the actual divisors for
*/
static bool
mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
struct mrst_clock_t *best_clock)
{
struct mrst_clock_t clock;
const struct mrst_limit_t *limit = mdfld_limit(crtc);
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
clock.p1++) {
int this_err;
mdfld_clock(refclk, &clock);
this_err = abs(clock.dot - target);
if (this_err < err) {
*best_clock = clock;
err = this_err;
}
}
}
return err != target;
}
static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
int fp_reg = MRST_FPA0;
int dpll_reg = MRST_DPLL_A;
int dspcntr_reg = DSPACNTR;
int pipeconf_reg = PIPEACONF;
int htot_reg = HTOTAL_A;
int hblank_reg = HBLANK_A;
int hsync_reg = HSYNC_A;
int vtot_reg = VTOTAL_A;
int vblank_reg = VBLANK_A;
int vsync_reg = VSYNC_A;
int dspsize_reg = DSPASIZE;
int dsppos_reg = DSPAPOS;
int pipesrc_reg = PIPEASRC;
u32 *pipeconf = &dev_priv->pipeconf[pipe];
u32 *dspcntr = &dev_priv->dspcntr[pipe];
int refclk = 0;
int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
clk_tmp = 0;
struct mrst_clock_t clock;
bool ok;
u32 dpll = 0, fp = 0;
bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct psb_intel_encoder *psb_intel_encoder = NULL;
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_encoder *encoder;
struct drm_connector *connector;
int timeout = 0;
int ret;
dev_dbg(dev->dev, "pipe = 0x%x\n", pipe);
#if 0
if (pipe == 1) {
if (!gma_power_begin(dev, true))
return 0;
android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode,
x, y, old_fb);
goto mrst_crtc_mode_set_exit;
}
#endif
switch (pipe) {
case 0:
break;
case 1:
fp_reg = FPB0;
dpll_reg = DPLL_B;
dspcntr_reg = DSPBCNTR;
pipeconf_reg = PIPEBCONF;
htot_reg = HTOTAL_B;
hblank_reg = HBLANK_B;
hsync_reg = HSYNC_B;
vtot_reg = VTOTAL_B;
vblank_reg = VBLANK_B;
vsync_reg = VSYNC_B;
dspsize_reg = DSPBSIZE;
dsppos_reg = DSPBPOS;
pipesrc_reg = PIPEBSRC;
fp_reg = MDFLD_DPLL_DIV0;
dpll_reg = MDFLD_DPLL_B;
break;
case 2:
dpll_reg = MRST_DPLL_A;
dspcntr_reg = DSPCCNTR;
pipeconf_reg = PIPECCONF;
htot_reg = HTOTAL_C;
hblank_reg = HBLANK_C;
hsync_reg = HSYNC_C;
vtot_reg = VTOTAL_C;
vblank_reg = VBLANK_C;
vsync_reg = VSYNC_C;
dspsize_reg = DSPCSIZE;
dsppos_reg = DSPCPOS;
pipesrc_reg = PIPECSRC;
break;
default:
DRM_ERROR("Illegal Pipe Number.\n");
return 0;
}
ret = check_fb(crtc->fb);
if (ret)
return ret;
dev_dbg(dev->dev, "adjusted_hdisplay = %d\n",
adjusted_mode->hdisplay);
dev_dbg(dev->dev, "adjusted_vdisplay = %d\n",
adjusted_mode->vdisplay);
dev_dbg(dev->dev, "adjusted_hsync_start = %d\n",
adjusted_mode->hsync_start);
dev_dbg(dev->dev, "adjusted_hsync_end = %d\n",
adjusted_mode->hsync_end);
dev_dbg(dev->dev, "adjusted_htotal = %d\n",
adjusted_mode->htotal);
dev_dbg(dev->dev, "adjusted_vsync_start = %d\n",
adjusted_mode->vsync_start);
dev_dbg(dev->dev, "adjusted_vsync_end = %d\n",
adjusted_mode->vsync_end);
dev_dbg(dev->dev, "adjusted_vtotal = %d\n",
adjusted_mode->vtotal);
dev_dbg(dev->dev, "adjusted_clock = %d\n",
adjusted_mode->clock);
dev_dbg(dev->dev, "hdisplay = %d\n",
mode->hdisplay);
dev_dbg(dev->dev, "vdisplay = %d\n",
mode->vdisplay);
if (!gma_power_begin(dev, true))
return 0;
memcpy(&psb_intel_crtc->saved_mode, mode,
sizeof(struct drm_display_mode));
memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode,
sizeof(struct drm_display_mode));
list_for_each_entry(connector, &mode_config->connector_list, head) {
if (!connector)
continue;
encoder = connector->encoder;
if (!encoder)
continue;
if (encoder->crtc != crtc)
continue;
psb_intel_encoder = psb_intel_attached_encoder(connector);
switch (psb_intel_encoder->type) {
case INTEL_OUTPUT_MIPI:
is_mipi = true;
break;
case INTEL_OUTPUT_MIPI2:
is_mipi2 = true;
break;
case INTEL_OUTPUT_HDMI:
is_hdmi = true;
break;
}
}
/* Disable the VGA plane that we never use */
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable the panel fitter if it was on our pipe */
if (psb_intel_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
if (pipe == 1) {
/* FIXME: To make HDMI display with 864x480 (TPO), 480x864
* (PYR) or 480x854 (TMD), set the sprite width/height and
* souce image size registers with the adjusted mode for
* pipe B.
*/
/*
* The defined sprite rectangle must always be completely
* contained within the displayable area of the screen image
* (frame buffer).
*/
REG_WRITE(dspsize_reg, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
| (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
/* Set the CRTC with encoder mode. */
REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16)
| (mode->crtc_vdisplay - 1));
} else {
REG_WRITE(dspsize_reg,
((mode->crtc_vdisplay - 1) << 16) |
(mode->crtc_hdisplay - 1));
REG_WRITE(pipesrc_reg,
((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1));
}
REG_WRITE(dsppos_reg, 0);
if (psb_intel_encoder)
drm_connector_property_get_value(connector,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
/* Medfield doesn't have register support for centering so we
* need to mess with the h/vblank and h/vsync start and ends
* to get centering
*/
int offsetX = 0, offsetY = 0;
offsetX = (adjusted_mode->crtc_hdisplay -
mode->crtc_hdisplay) / 2;
offsetY = (adjusted_mode->crtc_vdisplay -
mode->crtc_vdisplay) / 2;
REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start -
offsetX - 1) |
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start -
offsetX - 1) |
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start -
offsetY - 1) |
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start -
offsetY - 1) |
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
} else {
REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
}
/* Flush the plane changes */
{
struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
/* setup pipeconf */
*pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
/* Set up the display plane register */
*dspcntr = REG_READ(dspcntr_reg);
*dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
*dspcntr |= DISPLAY_PLANE_ENABLE;
if (is_mipi2)
goto mrst_crtc_mode_set_exit;
clk = adjusted_mode->clock;
if (is_hdmi) {
if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) {
refclk = 19200;
if (is_mipi || is_mipi2)
clk_n = 1, clk_p2 = 8;
else if (is_hdmi)
clk_n = 1, clk_p2 = 10;
} else if (ksel == KSEL_BYPASS_25) {
refclk = 25000;
if (is_mipi || is_mipi2)
clk_n = 1, clk_p2 = 8;
else if (is_hdmi)
clk_n = 1, clk_p2 = 10;
} else if ((ksel == KSEL_BYPASS_83_100) &&
dev_priv->core_freq == 166) {
refclk = 83000;
if (is_mipi || is_mipi2)
clk_n = 4, clk_p2 = 8;
else if (is_hdmi)
clk_n = 4, clk_p2 = 10;
} else if ((ksel == KSEL_BYPASS_83_100) &&
(dev_priv->core_freq == 100 ||
dev_priv->core_freq == 200)) {
refclk = 100000;
if (is_mipi || is_mipi2)
clk_n = 4, clk_p2 = 8;
else if (is_hdmi)
clk_n = 4, clk_p2 = 10;
}
if (is_mipi)
clk_byte = dev_priv->bpp / 8;
else if (is_mipi2)
clk_byte = dev_priv->bpp2 / 8;
clk_tmp = clk * clk_n * clk_p2 * clk_byte;
dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d.\n",
clk, clk_n, clk_p2);
dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d.\n",
adjusted_mode->clock, clk_tmp);
ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
if (!ok) {
DRM_ERROR
("mdfldFindBestPLL fail in mdfld_crtc_mode_set.\n");
} else {
m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
dev_dbg(dev->dev, "dot clock = %d,"
"m = %d, p1 = %d, m_conv = %d.\n",
clock.dot, clock.m,
clock.p1, m_conv);
}
dpll = REG_READ(dpll_reg);
if (dpll & DPLL_VCO_ENABLE) {
dpll &= ~DPLL_VCO_ENABLE;
REG_WRITE(dpll_reg, dpll);
REG_READ(dpll_reg);
/* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
/* reset M1, N1 & P1 */
REG_WRITE(fp_reg, 0);
dpll &= ~MDFLD_P1_MASK;
REG_WRITE(dpll_reg, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
/* When ungating power of DPLL, needs to wait 0.5us before
* enable the VCO */
if (dpll & MDFLD_PWR_GATE_EN) {
dpll &= ~MDFLD_PWR_GATE_EN;
REG_WRITE(dpll_reg, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
dpll = 0;
#if 0 /* FIXME revisit later */
if (ksel == KSEL_CRYSTAL_19 || ksel == KSEL_BYPASS_19 ||
ksel == KSEL_BYPASS_25)
dpll &= ~MDFLD_INPUT_REF_SEL;
else if (ksel == KSEL_BYPASS_83_100)
dpll |= MDFLD_INPUT_REF_SEL;
#endif /* FIXME revisit later */
if (is_hdmi)
dpll |= MDFLD_VCO_SEL;
fp = (clk_n / 2) << 16;
fp |= m_conv;
/* compute bitmask from p1 value */
dpll |= (1 << (clock.p1 - 2)) << 17;
#if 0 /* 1080p30 & 720p */
dpll = 0x00050000;
fp = 0x000001be;
#endif
#if 0 /* 480p */
dpll = 0x02010000;
fp = 0x000000d2;
#endif
} else {
#if 0 /*DBI_TPO_480x864*/
dpll = 0x00020000;
fp = 0x00000156;
#endif /* DBI_TPO_480x864 */ /* get from spec. */
dpll = 0x00800000;
fp = 0x000000c1;
}
REG_WRITE(fp_reg, fp);
REG_WRITE(dpll_reg, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
dpll |= DPLL_VCO_ENABLE;
REG_WRITE(dpll_reg, dpll);
REG_READ(dpll_reg);
/* wait for DSI PLL to lock */
while (timeout < 20000 &&
!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
udelay(150);
timeout++;
}
if (is_mipi)
goto mrst_crtc_mode_set_exit;
dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi);
REG_WRITE(pipeconf_reg, *pipeconf);
REG_READ(pipeconf_reg);
/* Wait for for the pipe enable to take effect. */
REG_WRITE(dspcntr_reg, *dspcntr);
psb_intel_wait_for_vblank(dev);
mrst_crtc_mode_set_exit:
gma_power_end(dev);
return 0;
}
const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
.dpms = mdfld_crtc_dpms,
.mode_fixup = psb_intel_crtc_mode_fixup,
.mode_set = mdfld_crtc_mode_set,
.mode_set_base = mdfld__intel_pipe_set_base,
.prepare = psb_intel_crtc_prepare,
.commit = psb_intel_crtc_commit,
};
| gpl-2.0 |
davidmueller13/f2fs-backport | arch/s390/mm/extmem.c | 7305 | 18910 | /*
* File...........: arch/s390/mm/extmem.c
* Author(s)......: Carsten Otte <cotte@de.ibm.com>
* Rob M van der Heij <rvdheij@nl.ibm.com>
* Steven Shultz <shultzss@us.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation 2002-2004
*/
#define KMSG_COMPONENT "extmem"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bootmem.h>
#include <linux/ctype.h>
#include <linux/ioport.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/ebcdic.h>
#include <asm/errno.h>
#include <asm/extmem.h>
#include <asm/cpcmd.h>
#include <asm/setup.h>
#define DCSS_LOADSHR 0x00
#define DCSS_LOADNSR 0x04
#define DCSS_PURGESEG 0x08
#define DCSS_FINDSEG 0x0c
#define DCSS_LOADNOLY 0x10
#define DCSS_SEGEXT 0x18
#define DCSS_LOADSHRX 0x20
#define DCSS_LOADNSRX 0x24
#define DCSS_FINDSEGX 0x2c
#define DCSS_SEGEXTX 0x38
#define DCSS_FINDSEGA 0x0c
struct qrange {
unsigned long start; /* last byte type */
unsigned long end; /* last byte reserved */
};
struct qout64 {
unsigned long segstart;
unsigned long segend;
int segcnt;
int segrcnt;
struct qrange range[6];
};
#ifdef CONFIG_64BIT
struct qrange_old {
unsigned int start; /* last byte type */
unsigned int end; /* last byte reserved */
};
/* output area format for the Diag x'64' old subcode x'18' */
struct qout64_old {
int segstart;
int segend;
int segcnt;
int segrcnt;
struct qrange_old range[6];
};
#endif
struct qin64 {
char qopcode;
char rsrv1[3];
char qrcode;
char rsrv2[3];
char qname[8];
unsigned int qoutptr;
short int qoutlen;
};
struct dcss_segment {
struct list_head list;
char dcss_name[8];
char res_name[15];
unsigned long start_addr;
unsigned long end;
atomic_t ref_count;
int do_nonshared;
unsigned int vm_segtype;
struct qrange range[6];
int segcnt;
struct resource *res;
};
static DEFINE_MUTEX(dcss_lock);
static LIST_HEAD(dcss_list);
static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
"EW/EN-MIXED" };
static int loadshr_scode, loadnsr_scode, findseg_scode;
static int segext_scode, purgeseg_scode;
static int scode_set;
/* set correct Diag x'64' subcodes. */
static int
dcss_set_subcodes(void)
{
#ifdef CONFIG_64BIT
char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA);
unsigned long rx, ry;
int rc;
if (name == NULL)
return -ENOMEM;
rx = (unsigned long) name;
ry = DCSS_FINDSEGX;
strcpy(name, "dummy");
asm volatile(
" diag %0,%1,0x64\n"
"0: ipm %2\n"
" srl %2,28\n"
" j 2f\n"
"1: la %2,3\n"
"2:\n"
EX_TABLE(0b, 1b)
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
kfree(name);
/* Diag x'64' new subcodes are supported, set to new subcodes */
if (rc != 3) {
loadshr_scode = DCSS_LOADSHRX;
loadnsr_scode = DCSS_LOADNSRX;
purgeseg_scode = DCSS_PURGESEG;
findseg_scode = DCSS_FINDSEGX;
segext_scode = DCSS_SEGEXTX;
return 0;
}
#endif
/* Diag x'64' new subcodes are not supported, set to old subcodes */
loadshr_scode = DCSS_LOADNOLY;
loadnsr_scode = DCSS_LOADNSR;
purgeseg_scode = DCSS_PURGESEG;
findseg_scode = DCSS_FINDSEG;
segext_scode = DCSS_SEGEXT;
return 0;
}
/*
* Create the 8 bytes, ebcdic VM segment name from
* an ascii name.
*/
static void
dcss_mkname(char *name, char *dcss_name)
{
int i;
for (i = 0; i < 8; i++) {
if (name[i] == '\0')
break;
dcss_name[i] = toupper(name[i]);
};
for (; i < 8; i++)
dcss_name[i] = ' ';
ASCEBC(dcss_name, 8);
}
/*
* search all segments in dcss_list, and return the one
* namend *name. If not found, return NULL.
*/
static struct dcss_segment *
segment_by_name (char *name)
{
char dcss_name[9];
struct list_head *l;
struct dcss_segment *tmp, *retval = NULL;
BUG_ON(!mutex_is_locked(&dcss_lock));
dcss_mkname (name, dcss_name);
list_for_each (l, &dcss_list) {
tmp = list_entry (l, struct dcss_segment, list);
if (memcmp(tmp->dcss_name, dcss_name, 8) == 0) {
retval = tmp;
break;
}
}
return retval;
}
/*
* Perform a function on a dcss segment.
*/
static inline int
dcss_diag(int *func, void *parameter,
unsigned long *ret1, unsigned long *ret2)
{
unsigned long rx, ry;
int rc;
if (scode_set == 0) {
rc = dcss_set_subcodes();
if (rc < 0)
return rc;
scode_set = 1;
}
rx = (unsigned long) parameter;
ry = (unsigned long) *func;
#ifdef CONFIG_64BIT
/* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */
if (*func > DCSS_SEGEXT)
asm volatile(
" diag %0,%1,0x64\n"
" ipm %2\n"
" srl %2,28\n"
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
/* 31-bit Diag x'64' old subcode, switch to 31-bit addressing mode */
else
asm volatile(
" sam31\n"
" diag %0,%1,0x64\n"
" sam64\n"
" ipm %2\n"
" srl %2,28\n"
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
#else
asm volatile(
" diag %0,%1,0x64\n"
" ipm %2\n"
" srl %2,28\n"
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
#endif
*ret1 = rx;
*ret2 = ry;
return rc;
}
static inline int
dcss_diag_translate_rc (int vm_rc) {
if (vm_rc == 44)
return -ENOENT;
return -EIO;
}
/* do a diag to get info about a segment.
* fills start_address, end and vm_segtype fields
*/
static int
query_segment_type (struct dcss_segment *seg)
{
unsigned long dummy, vmrc;
int diag_cc, rc, i;
struct qout64 *qout;
struct qin64 *qin;
qin = kmalloc(sizeof(*qin), GFP_KERNEL | GFP_DMA);
qout = kmalloc(sizeof(*qout), GFP_KERNEL | GFP_DMA);
if ((qin == NULL) || (qout == NULL)) {
rc = -ENOMEM;
goto out_free;
}
/* initialize diag input parameters */
qin->qopcode = DCSS_FINDSEGA;
qin->qoutptr = (unsigned long) qout;
qin->qoutlen = sizeof(struct qout64);
memcpy (qin->qname, seg->dcss_name, 8);
diag_cc = dcss_diag(&segext_scode, qin, &dummy, &vmrc);
if (diag_cc < 0) {
rc = diag_cc;
goto out_free;
}
if (diag_cc > 1) {
pr_warning("Querying a DCSS type failed with rc=%ld\n", vmrc);
rc = dcss_diag_translate_rc (vmrc);
goto out_free;
}
#ifdef CONFIG_64BIT
/* Only old format of output area of Diagnose x'64' is supported,
copy data for the new format. */
if (segext_scode == DCSS_SEGEXT) {
struct qout64_old *qout_old;
qout_old = kzalloc(sizeof(*qout_old), GFP_KERNEL | GFP_DMA);
if (qout_old == NULL) {
rc = -ENOMEM;
goto out_free;
}
memcpy(qout_old, qout, sizeof(struct qout64_old));
qout->segstart = (unsigned long) qout_old->segstart;
qout->segend = (unsigned long) qout_old->segend;
qout->segcnt = qout_old->segcnt;
qout->segrcnt = qout_old->segrcnt;
if (qout->segcnt > 6)
qout->segrcnt = 6;
for (i = 0; i < qout->segrcnt; i++) {
qout->range[i].start =
(unsigned long) qout_old->range[i].start;
qout->range[i].end =
(unsigned long) qout_old->range[i].end;
}
kfree(qout_old);
}
#endif
if (qout->segcnt > 6) {
rc = -EOPNOTSUPP;
goto out_free;
}
if (qout->segcnt == 1) {
seg->vm_segtype = qout->range[0].start & 0xff;
} else {
/* multi-part segment. only one type supported here:
- all parts are contiguous
- all parts are either EW or EN type
- maximum 6 parts allowed */
unsigned long start = qout->segstart >> PAGE_SHIFT;
for (i=0; i<qout->segcnt; i++) {
if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) &&
((qout->range[i].start & 0xff) != SEG_TYPE_EN)) {
rc = -EOPNOTSUPP;
goto out_free;
}
if (start != qout->range[i].start >> PAGE_SHIFT) {
rc = -EOPNOTSUPP;
goto out_free;
}
start = (qout->range[i].end >> PAGE_SHIFT) + 1;
}
seg->vm_segtype = SEG_TYPE_EWEN;
}
/* analyze diag output and update seg */
seg->start_addr = qout->segstart;
seg->end = qout->segend;
memcpy (seg->range, qout->range, 6*sizeof(struct qrange));
seg->segcnt = qout->segcnt;
rc = 0;
out_free:
kfree(qin);
kfree(qout);
return rc;
}
/*
* get info about a segment
* possible return values:
* -ENOSYS : we are not running on VM
* -EIO : could not perform query diagnose
* -ENOENT : no such segment
* -EOPNOTSUPP: multi-part segment cannot be used with linux
* -ENOMEM : out of memory
* 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
*/
int
segment_type (char* name)
{
int rc;
struct dcss_segment seg;
if (!MACHINE_IS_VM)
return -ENOSYS;
dcss_mkname(name, seg.dcss_name);
rc = query_segment_type (&seg);
if (rc < 0)
return rc;
return seg.vm_segtype;
}
/*
* check if segment collides with other segments that are currently loaded
* returns 1 if this is the case, 0 if no collision was found
*/
static int
segment_overlaps_others (struct dcss_segment *seg)
{
struct list_head *l;
struct dcss_segment *tmp;
BUG_ON(!mutex_is_locked(&dcss_lock));
list_for_each(l, &dcss_list) {
tmp = list_entry(l, struct dcss_segment, list);
if ((tmp->start_addr >> 20) > (seg->end >> 20))
continue;
if ((tmp->end >> 20) < (seg->start_addr >> 20))
continue;
if (seg == tmp)
continue;
return 1;
}
return 0;
}
/*
* real segment loading function, called from segment_load
*/
static int
__segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end)
{
unsigned long start_addr, end_addr, dummy;
struct dcss_segment *seg;
int rc, diag_cc;
start_addr = end_addr = 0;
seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
if (seg == NULL) {
rc = -ENOMEM;
goto out;
}
dcss_mkname (name, seg->dcss_name);
rc = query_segment_type (seg);
if (rc < 0)
goto out_free;
if (loadshr_scode == DCSS_LOADSHRX) {
if (segment_overlaps_others(seg)) {
rc = -EBUSY;
goto out_free;
}
}
rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
if (rc)
goto out_free;
seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (seg->res == NULL) {
rc = -ENOMEM;
goto out_shared;
}
seg->res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
seg->res->start = seg->start_addr;
seg->res->end = seg->end;
memcpy(&seg->res_name, seg->dcss_name, 8);
EBCASC(seg->res_name, 8);
seg->res_name[8] = '\0';
strncat(seg->res_name, " (DCSS)", 7);
seg->res->name = seg->res_name;
rc = seg->vm_segtype;
if (rc == SEG_TYPE_SC ||
((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared))
seg->res->flags |= IORESOURCE_READONLY;
if (request_resource(&iomem_resource, seg->res)) {
rc = -EBUSY;
kfree(seg->res);
goto out_shared;
}
if (do_nonshared)
diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name,
&start_addr, &end_addr);
else
diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name,
&start_addr, &end_addr);
if (diag_cc < 0) {
dcss_diag(&purgeseg_scode, seg->dcss_name,
&dummy, &dummy);
rc = diag_cc;
goto out_resource;
}
if (diag_cc > 1) {
pr_warning("Loading DCSS %s failed with rc=%ld\n", name,
end_addr);
rc = dcss_diag_translate_rc(end_addr);
dcss_diag(&purgeseg_scode, seg->dcss_name,
&dummy, &dummy);
goto out_resource;
}
seg->start_addr = start_addr;
seg->end = end_addr;
seg->do_nonshared = do_nonshared;
atomic_set(&seg->ref_count, 1);
list_add(&seg->list, &dcss_list);
*addr = seg->start_addr;
*end = seg->end;
if (do_nonshared)
pr_info("DCSS %s of range %p to %p and type %s loaded as "
"exclusive-writable\n", name, (void*) seg->start_addr,
(void*) seg->end, segtype_string[seg->vm_segtype]);
else {
pr_info("DCSS %s of range %p to %p and type %s loaded in "
"shared access mode\n", name, (void*) seg->start_addr,
(void*) seg->end, segtype_string[seg->vm_segtype]);
}
goto out;
out_resource:
release_resource(seg->res);
kfree(seg->res);
out_shared:
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
out_free:
kfree(seg);
out:
return rc;
}
/*
* this function loads a DCSS segment
* name : name of the DCSS
* do_nonshared : 0 indicates that the dcss should be shared with other linux images
* 1 indicates that the dcss should be exclusive for this linux image
* addr : will be filled with start address of the segment
* end : will be filled with end address of the segment
* return values:
* -ENOSYS : we are not running on VM
* -EIO : could not perform query or load diagnose
* -ENOENT : no such segment
* -EOPNOTSUPP: multi-part segment cannot be used with linux
* -ENOSPC : segment cannot be used (overlaps with storage)
* -EBUSY : segment can temporarily not be used (overlaps with dcss)
* -ERANGE : segment cannot be used (exceeds kernel mapping range)
* -EPERM : segment is currently loaded with incompatible permissions
* -ENOMEM : out of memory
* 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
*/
int
segment_load (char *name, int do_nonshared, unsigned long *addr,
unsigned long *end)
{
struct dcss_segment *seg;
int rc;
if (!MACHINE_IS_VM)
return -ENOSYS;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL)
rc = __segment_load (name, do_nonshared, addr, end);
else {
if (do_nonshared == seg->do_nonshared) {
atomic_inc(&seg->ref_count);
*addr = seg->start_addr;
*end = seg->end;
rc = seg->vm_segtype;
} else {
*addr = *end = 0;
rc = -EPERM;
}
}
mutex_unlock(&dcss_lock);
return rc;
}
/*
* this function modifies the shared state of a DCSS segment. note that
* name : name of the DCSS
* do_nonshared : 0 indicates that the dcss should be shared with other linux images
* 1 indicates that the dcss should be exclusive for this linux image
* return values:
* -EIO : could not perform load diagnose (segment gone!)
* -ENOENT : no such segment (segment gone!)
* -EAGAIN : segment is in use by other exploiters, try later
* -EINVAL : no segment with the given name is currently loaded - name invalid
* -EBUSY : segment can temporarily not be used (overlaps with dcss)
* 0 : operation succeeded
*/
int
segment_modify_shared (char *name, int do_nonshared)
{
struct dcss_segment *seg;
unsigned long start_addr, end_addr, dummy;
int rc, diag_cc;
start_addr = end_addr = 0;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
rc = -EINVAL;
goto out_unlock;
}
if (do_nonshared == seg->do_nonshared) {
pr_info("DCSS %s is already in the requested access "
"mode\n", name);
rc = 0;
goto out_unlock;
}
if (atomic_read (&seg->ref_count) != 1) {
pr_warning("DCSS %s is in use and cannot be reloaded\n",
name);
rc = -EAGAIN;
goto out_unlock;
}
release_resource(seg->res);
if (do_nonshared)
seg->res->flags &= ~IORESOURCE_READONLY;
else
if (seg->vm_segtype == SEG_TYPE_SR ||
seg->vm_segtype == SEG_TYPE_ER)
seg->res->flags |= IORESOURCE_READONLY;
if (request_resource(&iomem_resource, seg->res)) {
pr_warning("DCSS %s overlaps with used memory resources "
"and cannot be reloaded\n", name);
rc = -EBUSY;
kfree(seg->res);
goto out_del_mem;
}
dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
if (do_nonshared)
diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name,
&start_addr, &end_addr);
else
diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name,
&start_addr, &end_addr);
if (diag_cc < 0) {
rc = diag_cc;
goto out_del_res;
}
if (diag_cc > 1) {
pr_warning("Reloading DCSS %s failed with rc=%ld\n", name,
end_addr);
rc = dcss_diag_translate_rc(end_addr);
goto out_del_res;
}
seg->start_addr = start_addr;
seg->end = end_addr;
seg->do_nonshared = do_nonshared;
rc = 0;
goto out_unlock;
out_del_res:
release_resource(seg->res);
kfree(seg->res);
out_del_mem:
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
list_del(&seg->list);
dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
kfree(seg);
out_unlock:
mutex_unlock(&dcss_lock);
return rc;
}
/*
* Decrease the use count of a DCSS segment and remove
* it from the address space if nobody is using it
* any longer.
*/
void
segment_unload(char *name)
{
unsigned long dummy;
struct dcss_segment *seg;
if (!MACHINE_IS_VM)
return;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
pr_err("Unloading unknown DCSS %s failed\n", name);
goto out_unlock;
}
if (atomic_dec_return(&seg->ref_count) != 0)
goto out_unlock;
release_resource(seg->res);
kfree(seg->res);
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
list_del(&seg->list);
dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
kfree(seg);
out_unlock:
mutex_unlock(&dcss_lock);
}
/*
* save segment content permanently
*/
void
segment_save(char *name)
{
struct dcss_segment *seg;
char cmd1[160];
char cmd2[80];
int i, response;
if (!MACHINE_IS_VM)
return;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
pr_err("Saving unknown DCSS %s failed\n", name);
goto out;
}
sprintf(cmd1, "DEFSEG %s", name);
for (i=0; i<seg->segcnt; i++) {
sprintf(cmd1+strlen(cmd1), " %lX-%lX %s",
seg->range[i].start >> PAGE_SHIFT,
seg->range[i].end >> PAGE_SHIFT,
segtype_string[seg->range[i].start & 0xff]);
}
sprintf(cmd2, "SAVESEG %s", name);
response = 0;
cpcmd(cmd1, NULL, 0, &response);
if (response) {
pr_err("Saving a DCSS failed with DEFSEG response code "
"%i\n", response);
goto out;
}
cpcmd(cmd2, NULL, 0, &response);
if (response) {
pr_err("Saving a DCSS failed with SAVESEG response code "
"%i\n", response);
goto out;
}
out:
mutex_unlock(&dcss_lock);
}
/*
* print appropriate error message for segment_load()/segment_type()
* return code
*/
void segment_warning(int rc, char *seg_name)
{
switch (rc) {
case -ENOENT:
pr_err("DCSS %s cannot be loaded or queried\n", seg_name);
break;
case -ENOSYS:
pr_err("DCSS %s cannot be loaded or queried without "
"z/VM\n", seg_name);
break;
case -EIO:
pr_err("Loading or querying DCSS %s resulted in a "
"hardware error\n", seg_name);
break;
case -EOPNOTSUPP:
pr_err("DCSS %s has multiple page ranges and cannot be "
"loaded or queried\n", seg_name);
break;
case -ENOSPC:
pr_err("DCSS %s overlaps with used storage and cannot "
"be loaded\n", seg_name);
break;
case -EBUSY:
pr_err("%s needs used memory resources and cannot be "
"loaded or queried\n", seg_name);
break;
case -EPERM:
pr_err("DCSS %s is already loaded in a different access "
"mode\n", seg_name);
break;
case -ENOMEM:
pr_err("There is not enough memory to load or query "
"DCSS %s\n", seg_name);
break;
case -ERANGE:
pr_err("DCSS %s exceeds the kernel mapping range (%lu) "
"and cannot be loaded\n", seg_name, VMEM_MAX_PHYS);
break;
default:
break;
}
}
EXPORT_SYMBOL(segment_load);
EXPORT_SYMBOL(segment_unload);
EXPORT_SYMBOL(segment_save);
EXPORT_SYMBOL(segment_type);
EXPORT_SYMBOL(segment_modify_shared);
EXPORT_SYMBOL(segment_warning);
| gpl-2.0 |
blue236/gcc | libquadmath/math/atan2q.c | 138 | 4008 | /* atan2q.c -- __float128 version of e_atan2.c.
* Conversion to long double by Jakub Jelinek, jj@ultra.linux.cz.
*/
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
/* atan2q(y,x)
* Method :
* 1. Reduce y to positive by atan2q(y,x)=-atan2q(-y,x).
* 2. Reduce x to positive by (if x and y are unexceptional):
* ARG (x+iy) = arctan(y/x) ... if x > 0,
* ARG (x+iy) = pi - arctan[y/(-x)] ... if x < 0,
*
* Special cases:
*
* ATAN2((anything), NaN ) is NaN;
* ATAN2(NAN , (anything) ) is NaN;
* ATAN2(+-0, +(anything but NaN)) is +-0 ;
* ATAN2(+-0, -(anything but NaN)) is +-pi ;
* ATAN2(+-(anything but 0 and NaN), 0) is +-pi/2;
* ATAN2(+-(anything but INF and NaN), +INF) is +-0 ;
* ATAN2(+-(anything but INF and NaN), -INF) is +-pi;
* ATAN2(+-INF,+INF ) is +-pi/4 ;
* ATAN2(+-INF,-INF ) is +-3pi/4;
* ATAN2(+-INF, (anything but,0,NaN, and INF)) is +-pi/2;
*
* Constants:
* The hexadecimal values are the intended ones for the following
* constants. The decimal values may be used, provided that the
* compiler will convert from decimal to binary accurately enough
* to produce the hexadecimal values shown.
*/
#include "quadmath-imp.h"
static const __float128
tiny = 1.0e-4900Q,
zero = 0.0,
pi_o_4 = 7.85398163397448309615660845819875699e-01Q, /* 3ffe921fb54442d18469898cc51701b8 */
pi_o_2 = 1.57079632679489661923132169163975140e+00Q, /* 3fff921fb54442d18469898cc51701b8 */
pi = 3.14159265358979323846264338327950280e+00Q, /* 4000921fb54442d18469898cc51701b8 */
pi_lo = 8.67181013012378102479704402604335225e-35Q; /* 3f8dcd129024e088a67cc74020bbea64 */
__float128
atan2q (__float128 y, __float128 x)
{
__float128 z;
int64_t k,m,hx,hy,ix,iy;
uint64_t lx,ly;
GET_FLT128_WORDS64(hx,lx,x);
ix = hx&0x7fffffffffffffffLL;
GET_FLT128_WORDS64(hy,ly,y);
iy = hy&0x7fffffffffffffffLL;
if(((ix|((lx|-lx)>>63))>0x7fff000000000000LL)||
((iy|((ly|-ly)>>63))>0x7fff000000000000LL)) /* x or y is NaN */
return x+y;
if(((hx-0x3fff000000000000LL)|lx)==0) return atanq(y); /* x=1.0Q */
m = ((hy>>63)&1)|((hx>>62)&2); /* 2*sign(x)+sign(y) */
/* when y = 0 */
if((iy|ly)==0) {
switch(m) {
case 0:
case 1: return y; /* atan(+-0,+anything)=+-0 */
case 2: return pi+tiny;/* atan(+0,-anything) = pi */
case 3: return -pi-tiny;/* atan(-0,-anything) =-pi */
}
}
/* when x = 0 */
if((ix|lx)==0) return (hy<0)? -pi_o_2-tiny: pi_o_2+tiny;
/* when x is INF */
if(ix==0x7fff000000000000LL) {
if(iy==0x7fff000000000000LL) {
switch(m) {
case 0: return pi_o_4+tiny;/* atan(+INF,+INF) */
case 1: return -pi_o_4-tiny;/* atan(-INF,+INF) */
case 2: return 3.0Q*pi_o_4+tiny;/*atan(+INF,-INF)*/
case 3: return -3.0Q*pi_o_4-tiny;/*atan(-INF,-INF)*/
}
} else {
switch(m) {
case 0: return zero ; /* atan(+...,+INF) */
case 1: return -zero ; /* atan(-...,+INF) */
case 2: return pi+tiny ; /* atan(+...,-INF) */
case 3: return -pi-tiny ; /* atan(-...,-INF) */
}
}
}
/* when y is INF */
if(iy==0x7fff000000000000LL) return (hy<0)? -pi_o_2-tiny: pi_o_2+tiny;
/* compute y/x */
k = (iy-ix)>>48;
if(k > 120) z=pi_o_2+0.5Q*pi_lo; /* |y/x| > 2**120 */
else if(hx<0&&k<-120) z=0.0Q; /* |y|/x < -2**120 */
else z=atanq(fabsq(y/x)); /* safe to do y/x */
switch (m) {
case 0: return z ; /* atan(+,+) */
case 1: {
uint64_t zh;
GET_FLT128_MSW64(zh,z);
SET_FLT128_MSW64(z,zh ^ 0x8000000000000000ULL);
}
return z ; /* atan(-,+) */
case 2: return pi-(z-pi_lo);/* atan(+,-) */
default: /* case 3 */
return (z-pi_lo)-pi;/* atan(-,-) */
}
}
| gpl-2.0 |
cleaton/liquid-chocolate | arch/powerpc/platforms/85xx/mpc85xx_mds.c | 138 | 7147 | /*
* Copyright (C) Freescale Semicondutor, Inc. 2006-2007. All rights reserved.
*
* Author: Andy Fleming <afleming@freescale.com>
*
* Based on 83xx/mpc8360e_pb.c by:
* Li Yang <LeoLi@freescale.com>
* Yin Olivia <Hong-hua.Yin@freescale.com>
*
* Description:
* MPC85xx MDS board specific routines.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/reboot.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/initrd.h>
#include <linux/module.h>
#include <linux/fsl_devices.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <linux/phy.h>
#include <asm/system.h>
#include <asm/atomic.h>
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/irq.h>
#include <mm/mmu_decl.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include <asm/qe.h>
#include <asm/qe_ic.h>
#include <asm/mpic.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif
#define MV88E1111_SCR 0x10
#define MV88E1111_SCR_125CLK 0x0010
static int mpc8568_fixup_125_clock(struct phy_device *phydev)
{
int scr;
int err;
/* Workaround for the 125 CLK Toggle */
scr = phy_read(phydev, MV88E1111_SCR);
if (scr < 0)
return scr;
err = phy_write(phydev, MV88E1111_SCR, scr & ~(MV88E1111_SCR_125CLK));
if (err)
return err;
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err)
return err;
scr = phy_read(phydev, MV88E1111_SCR);
if (scr < 0)
return err;
err = phy_write(phydev, MV88E1111_SCR, scr | 0x0008);
return err;
}
static int mpc8568_mds_phy_fixups(struct phy_device *phydev)
{
int temp;
int err;
/* Errata */
err = phy_write(phydev,29, 0x0006);
if (err)
return err;
temp = phy_read(phydev, 30);
if (temp < 0)
return temp;
temp = (temp & (~0x8000)) | 0x4000;
err = phy_write(phydev,30, temp);
if (err)
return err;
err = phy_write(phydev,29, 0x000a);
if (err)
return err;
temp = phy_read(phydev, 30);
if (temp < 0)
return temp;
temp = phy_read(phydev, 30);
if (temp < 0)
return temp;
temp &= ~0x0020;
err = phy_write(phydev,30,temp);
if (err)
return err;
/* Disable automatic MDI/MDIX selection */
temp = phy_read(phydev, 16);
if (temp < 0)
return temp;
temp &= ~0x0060;
err = phy_write(phydev,16,temp);
return err;
}
/* ************************************************************************
*
* Setup the architecture
*
*/
static void __init mpc85xx_mds_setup_arch(void)
{
struct device_node *np;
static u8 __iomem *bcsr_regs = NULL;
if (ppc_md.progress)
ppc_md.progress("mpc85xx_mds_setup_arch()", 0);
/* Map BCSR area */
np = of_find_node_by_name(NULL, "bcsr");
if (np != NULL) {
struct resource res;
of_address_to_resource(np, 0, &res);
bcsr_regs = ioremap(res.start, res.end - res.start +1);
of_node_put(np);
}
#ifdef CONFIG_PCI
for_each_node_by_type(np, "pci") {
if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
struct resource rsrc;
of_address_to_resource(np, 0, &rsrc);
if ((rsrc.start & 0xfffff) == 0x8000)
fsl_add_bridge(np, 1);
else
fsl_add_bridge(np, 0);
}
}
#endif
#ifdef CONFIG_QUICC_ENGINE
np = of_find_compatible_node(NULL, NULL, "fsl,qe");
if (!np) {
np = of_find_node_by_name(NULL, "qe");
if (!np)
return;
}
qe_reset();
of_node_put(np);
np = of_find_node_by_name(NULL, "par_io");
if (np) {
struct device_node *ucc;
par_io_init(np);
of_node_put(np);
for_each_node_by_name(ucc, "ucc")
par_io_of_config(ucc);
}
if (bcsr_regs) {
#define BCSR_UCC1_GETH_EN (0x1 << 7)
#define BCSR_UCC2_GETH_EN (0x1 << 7)
#define BCSR_UCC1_MODE_MSK (0x3 << 4)
#define BCSR_UCC2_MODE_MSK (0x3 << 0)
/* Turn off UCC1 & UCC2 */
clrbits8(&bcsr_regs[8], BCSR_UCC1_GETH_EN);
clrbits8(&bcsr_regs[9], BCSR_UCC2_GETH_EN);
/* Mode is RGMII, all bits clear */
clrbits8(&bcsr_regs[11], BCSR_UCC1_MODE_MSK |
BCSR_UCC2_MODE_MSK);
/* Turn UCC1 & UCC2 on */
setbits8(&bcsr_regs[8], BCSR_UCC1_GETH_EN);
setbits8(&bcsr_regs[9], BCSR_UCC2_GETH_EN);
iounmap(bcsr_regs);
}
#endif /* CONFIG_QUICC_ENGINE */
}
static int __init board_fixups(void)
{
char phy_id[20];
char *compstrs[2] = {"fsl,gianfar-mdio", "fsl,ucc-mdio"};
struct device_node *mdio;
struct resource res;
int i;
for (i = 0; i < ARRAY_SIZE(compstrs); i++) {
mdio = of_find_compatible_node(NULL, NULL, compstrs[i]);
of_address_to_resource(mdio, 0, &res);
snprintf(phy_id, sizeof(phy_id), "%llx:%02x",
(unsigned long long)res.start, 1);
phy_register_fixup_for_id(phy_id, mpc8568_fixup_125_clock);
phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups);
/* Register a workaround for errata */
snprintf(phy_id, sizeof(phy_id), "%llx:%02x",
(unsigned long long)res.start, 7);
phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups);
of_node_put(mdio);
}
return 0;
}
machine_arch_initcall(mpc85xx_mds, board_fixups);
static struct of_device_id mpc85xx_ids[] = {
{ .type = "soc", },
{ .compatible = "soc", },
{ .compatible = "simple-bus", },
{ .type = "qe", },
{ .compatible = "fsl,qe", },
{},
};
static int __init mpc85xx_publish_devices(void)
{
/* Publish the QE devices */
of_platform_bus_probe(NULL, mpc85xx_ids, NULL);
return 0;
}
machine_device_initcall(mpc85xx_mds, mpc85xx_publish_devices);
static void __init mpc85xx_mds_pic_init(void)
{
struct mpic *mpic;
struct resource r;
struct device_node *np = NULL;
np = of_find_node_by_type(NULL, "open-pic");
if (!np)
return;
if (of_address_to_resource(np, 0, &r)) {
printk(KERN_ERR "Failed to map mpic register space\n");
of_node_put(np);
return;
}
mpic = mpic_alloc(np, r.start,
MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
of_node_put(np);
mpic_init(mpic);
#ifdef CONFIG_QUICC_ENGINE
np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
if (!np) {
np = of_find_node_by_type(NULL, "qeic");
if (!np)
return;
}
qe_ic_init(np, 0, qe_ic_cascade_muxed_mpic, NULL);
of_node_put(np);
#endif /* CONFIG_QUICC_ENGINE */
}
static int __init mpc85xx_mds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "MPC85xxMDS");
}
define_machine(mpc85xx_mds) {
.name = "MPC85xx MDS",
.probe = mpc85xx_mds_probe,
.setup_arch = mpc85xx_mds_setup_arch,
.init_IRQ = mpc85xx_mds_pic_init,
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
};
| gpl-2.0 |
coherentsolutions/linux-2.6.29-ts4700 | drivers/atm/solos-pci.c | 138 | 19650 | /*
* Driver for the Solos PCI ADSL2+ card, designed to support Linux by
* Traverse Technologies -- http://www.traverse.com.au/
* Xrio Limited -- http://www.xrio.com/
*
*
* Copyright © 2008 Traverse Technologies
* Copyright © 2008 Intel Corporation
*
* Authors: Nathan Williams <nathan@traverse.com.au>
* David Woodhouse <dwmw2@infradead.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define DEBUG
#define VERBOSE_DEBUG
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/skbuff.h>
#include <linux/sysfs.h>
#include <linux/device.h>
#include <linux/kobject.h>
#define VERSION "0.04"
#define PTAG "solos-pci"
#define CONFIG_RAM_SIZE 128
#define FLAGS_ADDR 0x7C
#define IRQ_EN_ADDR 0x78
#define FPGA_VER 0x74
#define IRQ_CLEAR 0x70
#define BUG_FLAG 0x6C
#define DATA_RAM_SIZE 32768
#define BUF_SIZE 4096
#define RX_BUF(card, nr) ((card->buffers) + (nr)*BUF_SIZE*2)
#define TX_BUF(card, nr) ((card->buffers) + (nr)*BUF_SIZE*2 + BUF_SIZE)
static int debug = 0;
static int atmdebug = 0;
struct pkt_hdr {
__le16 size;
__le16 vpi;
__le16 vci;
__le16 type;
};
#define PKT_DATA 0
#define PKT_COMMAND 1
#define PKT_POPEN 3
#define PKT_PCLOSE 4
struct solos_card {
void __iomem *config_regs;
void __iomem *buffers;
int nr_ports;
struct pci_dev *dev;
struct atm_dev *atmdev[4];
struct tasklet_struct tlet;
spinlock_t tx_lock;
spinlock_t tx_queue_lock;
spinlock_t cli_queue_lock;
struct sk_buff_head tx_queue[4];
struct sk_buff_head cli_queue[4];
};
#define SOLOS_CHAN(atmdev) ((int)(unsigned long)(atmdev)->phy_data)
MODULE_AUTHOR("Traverse Technologies <support@traverse.com.au>");
MODULE_DESCRIPTION("Solos PCI driver");
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_PARM_DESC(debug, "Enable Loopback");
MODULE_PARM_DESC(atmdebug, "Print ATM data");
module_param(debug, int, 0444);
module_param(atmdebug, int, 0444);
static int opens;
static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb,
struct atm_vcc *vcc);
static int fpga_tx(struct solos_card *);
static irqreturn_t solos_irq(int irq, void *dev_id);
static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci);
static int list_vccs(int vci);
static int atm_init(struct solos_card *);
static void atm_remove(struct solos_card *);
static int send_command(struct solos_card *card, int dev, const char *buf, size_t size);
static void solos_bh(unsigned long);
static int print_buffer(struct sk_buff *buf);
static inline void solos_pop(struct atm_vcc *vcc, struct sk_buff *skb)
{
if (vcc->pop)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
}
static ssize_t console_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev);
struct solos_card *card = atmdev->dev_data;
struct sk_buff *skb;
spin_lock(&card->cli_queue_lock);
skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
spin_unlock(&card->cli_queue_lock);
if(skb == NULL)
return sprintf(buf, "No data.\n");
memcpy(buf, skb->data, skb->len);
dev_dbg(&card->dev->dev, "len: %d\n", skb->len);
kfree_skb(skb);
return skb->len;
}
static int send_command(struct solos_card *card, int dev, const char *buf, size_t size)
{
struct sk_buff *skb;
struct pkt_hdr *header;
// dev_dbg(&card->dev->dev, "size: %d\n", size);
if (size > (BUF_SIZE - sizeof(*header))) {
dev_dbg(&card->dev->dev, "Command is too big. Dropping request\n");
return 0;
}
skb = alloc_skb(size + sizeof(*header), GFP_ATOMIC);
if (!skb) {
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in send_command()\n");
return 0;
}
header = (void *)skb_put(skb, sizeof(*header));
header->size = cpu_to_le16(size);
header->vpi = cpu_to_le16(0);
header->vci = cpu_to_le16(0);
header->type = cpu_to_le16(PKT_COMMAND);
memcpy(skb_put(skb, size), buf, size);
fpga_queue(card, dev, skb, NULL);
return 0;
}
static ssize_t console_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev);
struct solos_card *card = atmdev->dev_data;
int err;
err = send_command(card, SOLOS_CHAN(atmdev), buf, count);
return err?:count;
}
static DEVICE_ATTR(console, 0644, console_show, console_store);
static irqreturn_t solos_irq(int irq, void *dev_id)
{
struct solos_card *card = dev_id;
int handled = 1;
//ACK IRQ
iowrite32(0, card->config_regs + IRQ_CLEAR);
//Disable IRQs from FPGA
iowrite32(0, card->config_regs + IRQ_EN_ADDR);
/* If we only do it when the device is open, we lose console
messages */
if (1 || opens)
tasklet_schedule(&card->tlet);
//Enable IRQs from FPGA
iowrite32(1, card->config_regs + IRQ_EN_ADDR);
return IRQ_RETVAL(handled);
}
void solos_bh(unsigned long card_arg)
{
struct solos_card *card = (void *)card_arg;
int port;
uint32_t card_flags;
uint32_t tx_mask;
uint32_t rx_done = 0;
card_flags = ioread32(card->config_regs + FLAGS_ADDR);
/* The TX bits are set if the channel is busy; clear if not. We want to
invoke fpga_tx() unless _all_ the bits for active channels are set */
tx_mask = (1 << card->nr_ports) - 1;
if ((card_flags & tx_mask) != tx_mask)
fpga_tx(card);
for (port = 0; port < card->nr_ports; port++) {
if (card_flags & (0x10 << port)) {
struct pkt_hdr header;
struct sk_buff *skb;
struct atm_vcc *vcc;
int size;
rx_done |= 0x10 << port;
memcpy_fromio(&header, RX_BUF(card, port), sizeof(header));
size = le16_to_cpu(header.size);
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb) {
if (net_ratelimit())
dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n");
continue;
}
memcpy_fromio(skb_put(skb, size),
RX_BUF(card, port) + sizeof(header),
size);
if (atmdebug) {
dev_info(&card->dev->dev, "Received: device %d\n", port);
dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n",
size, le16_to_cpu(header.vpi),
le16_to_cpu(header.vci));
print_buffer(skb);
}
switch (le16_to_cpu(header.type)) {
case PKT_DATA:
vcc = find_vcc(card->atmdev[port], le16_to_cpu(header.vpi),
le16_to_cpu(header.vci));
if (!vcc) {
if (net_ratelimit())
dev_warn(&card->dev->dev, "Received packet for unknown VCI.VPI %d.%d on port %d\n",
le16_to_cpu(header.vci), le16_to_cpu(header.vpi),
port);
continue;
}
atm_charge(vcc, skb->truesize);
vcc->push(vcc, skb);
atomic_inc(&vcc->stats->rx);
break;
case PKT_COMMAND:
default: /* FIXME: Not really, surely? */
spin_lock(&card->cli_queue_lock);
if (skb_queue_len(&card->cli_queue[port]) > 10) {
if (net_ratelimit())
dev_warn(&card->dev->dev, "Dropping console response on port %d\n",
port);
} else
skb_queue_tail(&card->cli_queue[port], skb);
spin_unlock(&card->cli_queue_lock);
break;
}
}
}
if (rx_done)
iowrite32(rx_done, card->config_regs + FLAGS_ADDR);
return;
}
static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
{
struct hlist_head *head;
struct atm_vcc *vcc = NULL;
struct hlist_node *node;
struct sock *s;
read_lock(&vcc_sklist_lock);
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
sk_for_each(s, node, head) {
vcc = atm_sk(s);
if (vcc->dev == dev && vcc->vci == vci &&
vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE)
goto out;
}
vcc = NULL;
out:
read_unlock(&vcc_sklist_lock);
return vcc;
}
static int list_vccs(int vci)
{
struct hlist_head *head;
struct atm_vcc *vcc;
struct hlist_node *node;
struct sock *s;
int num_found = 0;
int i;
read_lock(&vcc_sklist_lock);
if (vci != 0){
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
sk_for_each(s, node, head) {
num_found ++;
vcc = atm_sk(s);
printk(KERN_DEBUG "Device: %d Vpi: %d Vci: %d\n",
vcc->dev->number,
vcc->vpi,
vcc->vci);
}
} else {
for(i=0; i<32; i++){
head = &vcc_hash[i];
sk_for_each(s, node, head) {
num_found ++;
vcc = atm_sk(s);
printk(KERN_DEBUG "Device: %d Vpi: %d Vci: %d\n",
vcc->dev->number,
vcc->vpi,
vcc->vci);
}
}
}
read_unlock(&vcc_sklist_lock);
return num_found;
}
static int popen(struct atm_vcc *vcc)
{
struct solos_card *card = vcc->dev->dev_data;
struct sk_buff *skb;
struct pkt_hdr *header;
skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
if (!skb && net_ratelimit()) {
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
return -ENOMEM;
}
header = (void *)skb_put(skb, sizeof(*header));
header->size = cpu_to_le16(sizeof(*header));
header->vpi = cpu_to_le16(vcc->vpi);
header->vci = cpu_to_le16(vcc->vci);
header->type = cpu_to_le16(PKT_POPEN);
fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, NULL);
// dev_dbg(&card->dev->dev, "Open for vpi %d and vci %d on interface %d\n", vcc->vpi, vcc->vci, SOLOS_CHAN(vcc->dev));
set_bit(ATM_VF_ADDR, &vcc->flags); // accept the vpi / vci
set_bit(ATM_VF_READY, &vcc->flags);
list_vccs(0);
if (!opens)
iowrite32(1, card->config_regs + IRQ_EN_ADDR);
opens++; //count open PVCs
return 0;
}
static void pclose(struct atm_vcc *vcc)
{
struct solos_card *card = vcc->dev->dev_data;
struct sk_buff *skb;
struct pkt_hdr *header;
skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
if (!skb) {
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in pclose()\n");
return;
}
header = (void *)skb_put(skb, sizeof(*header));
header->size = cpu_to_le16(sizeof(*header));
header->vpi = cpu_to_le16(vcc->vpi);
header->vci = cpu_to_le16(vcc->vci);
header->type = cpu_to_le16(PKT_PCLOSE);
fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, NULL);
// dev_dbg(&card->dev->dev, "Close for vpi %d and vci %d on interface %d\n", vcc->vpi, vcc->vci, SOLOS_CHAN(vcc->dev));
if (!--opens)
iowrite32(0, card->config_regs + IRQ_EN_ADDR);
clear_bit(ATM_VF_ADDR, &vcc->flags);
clear_bit(ATM_VF_READY, &vcc->flags);
return;
}
static int print_buffer(struct sk_buff *buf)
{
int len,i;
char msg[500];
char item[10];
len = buf->len;
for (i = 0; i < len; i++){
if(i % 8 == 0)
sprintf(msg, "%02X: ", i);
sprintf(item,"%02X ",*(buf->data + i));
strcat(msg, item);
if(i % 8 == 7) {
sprintf(item, "\n");
strcat(msg, item);
printk(KERN_DEBUG "%s", msg);
}
}
if (i % 8 != 0) {
sprintf(item, "\n");
strcat(msg, item);
printk(KERN_DEBUG "%s", msg);
}
printk(KERN_DEBUG "\n");
return 0;
}
static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb,
struct atm_vcc *vcc)
{
int old_len;
*(void **)skb->cb = vcc;
spin_lock(&card->tx_queue_lock);
old_len = skb_queue_len(&card->tx_queue[port]);
skb_queue_tail(&card->tx_queue[port], skb);
spin_unlock(&card->tx_queue_lock);
/* If TX might need to be started, do so */
if (!old_len)
fpga_tx(card);
}
static int fpga_tx(struct solos_card *card)
{
uint32_t tx_pending;
uint32_t tx_started = 0;
struct sk_buff *skb;
struct atm_vcc *vcc;
unsigned char port;
unsigned long flags;
spin_lock_irqsave(&card->tx_lock, flags);
tx_pending = ioread32(card->config_regs + FLAGS_ADDR);
dev_vdbg(&card->dev->dev, "TX Flags are %X\n", tx_pending);
for (port = 0; port < card->nr_ports; port++) {
if (!(tx_pending & (1 << port))) {
spin_lock(&card->tx_queue_lock);
skb = skb_dequeue(&card->tx_queue[port]);
spin_unlock(&card->tx_queue_lock);
if (!skb)
continue;
if (atmdebug) {
dev_info(&card->dev->dev, "Transmitted: port %d\n",
port);
print_buffer(skb);
}
memcpy_toio(TX_BUF(card, port), skb->data, skb->len);
vcc = *(void **)skb->cb;
if (vcc) {
atomic_inc(&vcc->stats->tx);
solos_pop(vcc, skb);
} else
dev_kfree_skb_irq(skb);
tx_started |= 1 << port; //Set TX full flag
}
}
if (tx_started)
iowrite32(tx_started, card->config_regs + FLAGS_ADDR);
spin_unlock_irqrestore(&card->tx_lock, flags);
return 0;
}
static int psend(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct solos_card *card = vcc->dev->dev_data;
struct sk_buff *skb2 = NULL;
struct pkt_hdr *header;
//dev_dbg(&card->dev->dev, "psend called.\n");
//dev_dbg(&card->dev->dev, "dev,vpi,vci = %d,%d,%d\n",SOLOS_CHAN(vcc->dev),vcc->vpi,vcc->vci);
if (debug) {
skb2 = atm_alloc_charge(vcc, skb->len, GFP_ATOMIC);
if (skb2) {
memcpy(skb2->data, skb->data, skb->len);
skb_put(skb2, skb->len);
vcc->push(vcc, skb2);
atomic_inc(&vcc->stats->rx);
}
atomic_inc(&vcc->stats->tx);
solos_pop(vcc, skb);
return 0;
}
if (skb->len > (BUF_SIZE - sizeof(*header))) {
dev_warn(&card->dev->dev, "Length of PDU is too large. Dropping PDU.\n");
solos_pop(vcc, skb);
return 0;
}
if (!skb_clone_writable(skb, sizeof(*header))) {
int expand_by = 0;
int ret;
if (skb_headroom(skb) < sizeof(*header))
expand_by = sizeof(*header) - skb_headroom(skb);
ret = pskb_expand_head(skb, expand_by, 0, GFP_ATOMIC);
if (ret) {
solos_pop(vcc, skb);
return ret;
}
}
header = (void *)skb_push(skb, sizeof(*header));
header->size = cpu_to_le16(skb->len);
header->vpi = cpu_to_le16(vcc->vpi);
header->vci = cpu_to_le16(vcc->vci);
header->type = cpu_to_le16(PKT_DATA);
fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, vcc);
return 0;
}
static struct atmdev_ops fpga_ops = {
.open = popen,
.close = pclose,
.ioctl = NULL,
.getsockopt = NULL,
.setsockopt = NULL,
.send = psend,
.send_oam = NULL,
.phy_put = NULL,
.phy_get = NULL,
.change_qos = NULL,
.proc_read = NULL,
.owner = THIS_MODULE
};
static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int err, i;
uint16_t fpga_ver;
uint8_t major_ver, minor_ver;
uint32_t data32;
struct solos_card *card;
if (debug)
return 0;
card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
card->dev = dev;
err = pci_enable_device(dev);
if (err) {
dev_warn(&dev->dev, "Failed to enable PCI device\n");
goto out;
}
err = pci_request_regions(dev, "solos");
if (err) {
dev_warn(&dev->dev, "Failed to request regions\n");
goto out;
}
card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE);
if (!card->config_regs) {
dev_warn(&dev->dev, "Failed to ioremap config registers\n");
goto out_release_regions;
}
card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE);
if (!card->buffers) {
dev_warn(&dev->dev, "Failed to ioremap data buffers\n");
goto out_unmap_config;
}
// for(i=0;i<64 ;i+=4){
// data32=ioread32(card->buffers + i);
// dev_dbg(&card->dev->dev, "%08lX\n",(unsigned long)data32);
// }
//Fill Config Mem with zeros
for(i = 0; i < 128; i += 4)
iowrite32(0, card->config_regs + i);
//Set RX empty flags
iowrite32(0xF0, card->config_regs + FLAGS_ADDR);
data32 = ioread32(card->config_regs + FPGA_VER);
fpga_ver = (data32 & 0x0000FFFF);
major_ver = ((data32 & 0xFF000000) >> 24);
minor_ver = ((data32 & 0x00FF0000) >> 16);
dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n",
major_ver, minor_ver, fpga_ver);
card->nr_ports = 2; /* FIXME: Detect daughterboard */
err = atm_init(card);
if (err)
goto out_unmap_both;
pci_set_drvdata(dev, card);
tasklet_init(&card->tlet, solos_bh, (unsigned long)card);
spin_lock_init(&card->tx_lock);
spin_lock_init(&card->tx_queue_lock);
spin_lock_init(&card->cli_queue_lock);
/*
// Set Loopback mode
data32 = 0x00010000;
iowrite32(data32,card->config_regs + FLAGS_ADDR);
*/
/*
// Fill Buffers with zeros
for (i = 0; i < BUF_SIZE * 8; i += 4)
iowrite32(0, card->buffers + i);
*/
/*
for(i = 0; i < (BUF_SIZE * 1); i += 4)
iowrite32(0x12345678, card->buffers + i + (0*BUF_SIZE));
for(i = 0; i < (BUF_SIZE * 1); i += 4)
iowrite32(0xabcdef98, card->buffers + i + (1*BUF_SIZE));
// Read Config Memory
printk(KERN_DEBUG "Reading Config MEM\n");
i = 0;
for(i = 0; i < 16; i++) {
data32=ioread32(card->buffers + i*(BUF_SIZE/2));
printk(KERN_ALERT "Addr: %lX Data: %08lX\n",
(unsigned long)(addr_start + i*(BUF_SIZE/2)),
(unsigned long)data32);
}
*/
//dev_dbg(&card->dev->dev, "Requesting IRQ: %d\n",dev->irq);
err = request_irq(dev->irq, solos_irq, IRQF_DISABLED|IRQF_SHARED,
"solos-pci", card);
if (err)
dev_dbg(&card->dev->dev, "Failed to request interrupt IRQ: %d\n", dev->irq);
// Enable IRQs
iowrite32(1, card->config_regs + IRQ_EN_ADDR);
return 0;
out_unmap_both:
pci_iounmap(dev, card->config_regs);
out_unmap_config:
pci_iounmap(dev, card->buffers);
out_release_regions:
pci_release_regions(dev);
out:
kfree(card);
return err;
}
static int atm_init(struct solos_card *card)
{
int i;
opens = 0;
for (i = 0; i < card->nr_ports; i++) {
skb_queue_head_init(&card->tx_queue[i]);
skb_queue_head_init(&card->cli_queue[i]);
card->atmdev[i] = atm_dev_register("solos-pci", &fpga_ops, -1, NULL);
if (!card->atmdev[i]) {
dev_err(&card->dev->dev, "Could not register ATM device %d\n", i);
atm_remove(card);
return -ENODEV;
}
if (device_create_file(&card->atmdev[i]->class_dev, &dev_attr_console))
dev_err(&card->dev->dev, "Could not register console for ATM device %d\n", i);
dev_info(&card->dev->dev, "Registered ATM device %d\n", card->atmdev[i]->number);
card->atmdev[i]->ci_range.vpi_bits = 8;
card->atmdev[i]->ci_range.vci_bits = 16;
card->atmdev[i]->dev_data = card;
card->atmdev[i]->phy_data = (void *)(unsigned long)i;
}
return 0;
}
static void atm_remove(struct solos_card *card)
{
int i;
for (i = 0; i < card->nr_ports; i++) {
if (card->atmdev[i]) {
dev_info(&card->dev->dev, "Unregistering ATM device %d\n", card->atmdev[i]->number);
atm_dev_deregister(card->atmdev[i]);
}
}
}
static void fpga_remove(struct pci_dev *dev)
{
struct solos_card *card = pci_get_drvdata(dev);
if (debug)
return;
atm_remove(card);
dev_vdbg(&dev->dev, "Freeing IRQ\n");
// Disable IRQs from FPGA
iowrite32(0, card->config_regs + IRQ_EN_ADDR);
free_irq(dev->irq, card);
tasklet_kill(&card->tlet);
// iowrite32(0x01,pciregs);
dev_vdbg(&dev->dev, "Unmapping PCI resource\n");
pci_iounmap(dev, card->buffers);
pci_iounmap(dev, card->config_regs);
dev_vdbg(&dev->dev, "Releasing PCI Region\n");
pci_release_regions(dev);
pci_disable_device(dev);
pci_set_drvdata(dev, NULL);
kfree(card);
// dev_dbg(&card->dev->dev, "fpga_remove\n");
return;
}
static struct pci_device_id fpga_pci_tbl[] __devinitdata = {
{ 0x10ee, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci,fpga_pci_tbl);
static struct pci_driver fpga_driver = {
.name = "solos",
.id_table = fpga_pci_tbl,
.probe = fpga_probe,
.remove = fpga_remove,
};
static int __init solos_pci_init(void)
{
printk(KERN_INFO "Solos PCI Driver Version %s\n", VERSION);
return pci_register_driver(&fpga_driver);
}
static void __exit solos_pci_exit(void)
{
pci_unregister_driver(&fpga_driver);
printk(KERN_INFO "Solos PCI Driver %s Unloaded\n", VERSION);
}
module_init(solos_pci_init);
module_exit(solos_pci_exit);
| gpl-2.0 |
akalongman/linux | arch/um/os-Linux/signal.c | 394 | 6885 | /*
* Copyright (C) 2004 PathScale, Inc
* Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <stdlib.h>
#include <stdarg.h>
#include <errno.h>
#include <signal.h>
#include <strings.h>
#include <as-layout.h>
#include <kern_util.h>
#include <os.h>
#include <sysdep/mcontext.h>
#include "internal.h"
void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
[SIGTRAP] = relay_signal,
[SIGFPE] = relay_signal,
[SIGILL] = relay_signal,
[SIGWINCH] = winch,
[SIGBUS] = bus_handler,
[SIGSEGV] = segv_handler,
[SIGIO] = sigio_handler,
[SIGVTALRM] = timer_handler };
static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
{
struct uml_pt_regs r;
int save_errno = errno;
r.is_user = 0;
if (sig == SIGSEGV) {
/* For segfaults, we want the data from the sigcontext. */
get_regs_from_mc(&r, mc);
GET_FAULTINFO_FROM_MC(r.faultinfo, mc);
}
/* enable signals if sig isn't IRQ signal */
if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGVTALRM))
unblock_signals();
(*sig_info[sig])(sig, si, &r);
errno = save_errno;
}
/*
* These are the asynchronous signals. SIGPROF is excluded because we want to
* be able to profile all of UML, not just the non-critical sections. If
* profiling is not thread-safe, then that is not my problem. We can disable
* profiling when SMP is enabled in that case.
*/
#define SIGIO_BIT 0
#define SIGIO_MASK (1 << SIGIO_BIT)
#define SIGVTALRM_BIT 1
#define SIGVTALRM_MASK (1 << SIGVTALRM_BIT)
static int signals_enabled;
static unsigned int signals_pending;
void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
{
int enabled;
enabled = signals_enabled;
if (!enabled && (sig == SIGIO)) {
signals_pending |= SIGIO_MASK;
return;
}
block_signals();
sig_handler_common(sig, si, mc);
set_signals(enabled);
}
static void real_alarm_handler(mcontext_t *mc)
{
struct uml_pt_regs regs;
if (mc != NULL)
get_regs_from_mc(®s, mc);
regs.is_user = 0;
unblock_signals();
timer_handler(SIGVTALRM, NULL, ®s);
}
void alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
{
int enabled;
enabled = signals_enabled;
if (!signals_enabled) {
signals_pending |= SIGVTALRM_MASK;
return;
}
block_signals();
real_alarm_handler(mc);
set_signals(enabled);
}
void timer_init(void)
{
set_handler(SIGVTALRM);
}
void set_sigstack(void *sig_stack, int size)
{
stack_t stack = {
.ss_flags = 0,
.ss_sp = sig_stack,
.ss_size = size - sizeof(void *)
};
if (sigaltstack(&stack, NULL) != 0)
panic("enabling signal stack failed, errno = %d\n", errno);
}
static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
[SIGSEGV] = sig_handler,
[SIGBUS] = sig_handler,
[SIGILL] = sig_handler,
[SIGFPE] = sig_handler,
[SIGTRAP] = sig_handler,
[SIGIO] = sig_handler,
[SIGWINCH] = sig_handler,
[SIGVTALRM] = alarm_handler
};
static void hard_handler(int sig, siginfo_t *si, void *p)
{
struct ucontext *uc = p;
mcontext_t *mc = &uc->uc_mcontext;
unsigned long pending = 1UL << sig;
do {
int nested, bail;
/*
* pending comes back with one bit set for each
* interrupt that arrived while setting up the stack,
* plus a bit for this interrupt, plus the zero bit is
* set if this is a nested interrupt.
* If bail is true, then we interrupted another
* handler setting up the stack. In this case, we
* have to return, and the upper handler will deal
* with this interrupt.
*/
bail = to_irq_stack(&pending);
if (bail)
return;
nested = pending & 1;
pending &= ~1;
while ((sig = ffs(pending)) != 0){
sig--;
pending &= ~(1 << sig);
(*handlers[sig])(sig, (struct siginfo *)si, mc);
}
/*
* Again, pending comes back with a mask of signals
* that arrived while tearing down the stack. If this
* is non-zero, we just go back, set up the stack
* again, and handle the new interrupts.
*/
if (!nested)
pending = from_irq_stack(nested);
} while (pending);
}
void set_handler(int sig)
{
struct sigaction action;
int flags = SA_SIGINFO | SA_ONSTACK;
sigset_t sig_mask;
action.sa_sigaction = hard_handler;
/* block irq ones */
sigemptyset(&action.sa_mask);
sigaddset(&action.sa_mask, SIGVTALRM);
sigaddset(&action.sa_mask, SIGIO);
sigaddset(&action.sa_mask, SIGWINCH);
if (sig == SIGSEGV)
flags |= SA_NODEFER;
if (sigismember(&action.sa_mask, sig))
flags |= SA_RESTART; /* if it's an irq signal */
action.sa_flags = flags;
action.sa_restorer = NULL;
if (sigaction(sig, &action, NULL) < 0)
panic("sigaction failed - errno = %d\n", errno);
sigemptyset(&sig_mask);
sigaddset(&sig_mask, sig);
if (sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0)
panic("sigprocmask failed - errno = %d\n", errno);
}
int change_sig(int signal, int on)
{
sigset_t sigset;
sigemptyset(&sigset);
sigaddset(&sigset, signal);
if (sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, NULL) < 0)
return -errno;
return 0;
}
void block_signals(void)
{
signals_enabled = 0;
/*
* This must return with signals disabled, so this barrier
* ensures that writes are flushed out before the return.
* This might matter if gcc figures out how to inline this and
* decides to shuffle this code into the caller.
*/
barrier();
}
void unblock_signals(void)
{
int save_pending;
if (signals_enabled == 1)
return;
/*
* We loop because the IRQ handler returns with interrupts off. So,
* interrupts may have arrived and we need to re-enable them and
* recheck signals_pending.
*/
while (1) {
/*
* Save and reset save_pending after enabling signals. This
* way, signals_pending won't be changed while we're reading it.
*/
signals_enabled = 1;
/*
* Setting signals_enabled and reading signals_pending must
* happen in this order.
*/
barrier();
save_pending = signals_pending;
if (save_pending == 0)
return;
signals_pending = 0;
/*
* We have pending interrupts, so disable signals, as the
* handlers expect them off when they are called. They will
* be enabled again above.
*/
signals_enabled = 0;
/*
* Deal with SIGIO first because the alarm handler might
* schedule, leaving the pending SIGIO stranded until we come
* back here.
*
* SIGIO's handler doesn't use siginfo or mcontext,
* so they can be NULL.
*/
if (save_pending & SIGIO_MASK)
sig_handler_common(SIGIO, NULL, NULL);
if (save_pending & SIGVTALRM_MASK)
real_alarm_handler(NULL);
}
}
int get_signals(void)
{
return signals_enabled;
}
int set_signals(int enable)
{
int ret;
if (signals_enabled == enable)
return enable;
ret = signals_enabled;
if (enable)
unblock_signals();
else block_signals();
return ret;
}
int os_is_signal_stack(void)
{
stack_t ss;
sigaltstack(NULL, &ss);
return ss.ss_flags & SS_ONSTACK;
}
| gpl-2.0 |
LokiWuh/android_kernel_asus_grouper | drivers/mmc/host/sdhci-pxav2.c | 394 | 6296 | /*
* Copyright (C) 2010 Marvell International Ltd.
* Zhangfei Gao <zhangfei.gao@marvell.com>
* Kevin Wang <dwang4@marvell.com>
* Jun Nie <njun@marvell.com>
* Qiming Wu <wuqm@marvell.com>
* Philip Rakity <prakity@marvell.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/platform_data/pxa_sdhci.h>
#include <linux/slab.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
#define SD_FIFO_PARAM 0xe0
#define DIS_PAD_SD_CLK_GATE 0x0400 /* Turn on/off Dynamic SD Clock Gating */
#define CLK_GATE_ON 0x0200 /* Disable/enable Clock Gate */
#define CLK_GATE_CTL 0x0100 /* Clock Gate Control */
#define CLK_GATE_SETTING_BITS (DIS_PAD_SD_CLK_GATE | \
CLK_GATE_ON | CLK_GATE_CTL)
#define SD_CLOCK_BURST_SIZE_SETUP 0xe6
#define SDCLK_SEL_SHIFT 8
#define SDCLK_SEL_MASK 0x3
#define SDCLK_DELAY_SHIFT 10
#define SDCLK_DELAY_MASK 0x3c
#define SD_CE_ATA_2 0xea
#define MMC_CARD 0x1000
#define MMC_WIDTH 0x0100
static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask)
{
struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
if (mask == SDHCI_RESET_ALL) {
u16 tmp = 0;
/*
* tune timing of read data/command when crc error happen
* no performance impact
*/
if (pdata->clk_delay_sel == 1) {
tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
tmp &= ~(SDCLK_DELAY_MASK << SDCLK_DELAY_SHIFT);
tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK)
<< SDCLK_DELAY_SHIFT;
tmp &= ~(SDCLK_SEL_MASK << SDCLK_SEL_SHIFT);
tmp |= (1 & SDCLK_SEL_MASK) << SDCLK_SEL_SHIFT;
writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
}
if (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING) {
tmp = readw(host->ioaddr + SD_FIFO_PARAM);
tmp &= ~CLK_GATE_SETTING_BITS;
writew(tmp, host->ioaddr + SD_FIFO_PARAM);
} else {
tmp = readw(host->ioaddr + SD_FIFO_PARAM);
tmp &= ~CLK_GATE_SETTING_BITS;
tmp |= CLK_GATE_SETTING_BITS;
writew(tmp, host->ioaddr + SD_FIFO_PARAM);
}
}
}
static int pxav2_mmc_set_width(struct sdhci_host *host, int width)
{
u8 ctrl;
u16 tmp;
ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
tmp = readw(host->ioaddr + SD_CE_ATA_2);
if (width == MMC_BUS_WIDTH_8) {
ctrl &= ~SDHCI_CTRL_4BITBUS;
tmp |= MMC_CARD | MMC_WIDTH;
} else {
tmp &= ~(MMC_CARD | MMC_WIDTH);
if (width == MMC_BUS_WIDTH_4)
ctrl |= SDHCI_CTRL_4BITBUS;
else
ctrl &= ~SDHCI_CTRL_4BITBUS;
}
writew(tmp, host->ioaddr + SD_CE_ATA_2);
writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
return 0;
}
static u32 pxav2_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
return clk_get_rate(pltfm_host->clk);
}
static struct sdhci_ops pxav2_sdhci_ops = {
.get_max_clock = pxav2_get_max_clock,
.platform_reset_exit = pxav2_set_private_registers,
.platform_8bit_width = pxav2_mmc_set_width,
};
static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct sdhci_host *host = NULL;
struct sdhci_pxa *pxa = NULL;
int ret;
struct clk *clk;
pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL);
if (!pxa)
return -ENOMEM;
host = sdhci_pltfm_init(pdev, NULL);
if (IS_ERR(host)) {
kfree(pxa);
return PTR_ERR(host);
}
pltfm_host = sdhci_priv(host);
pltfm_host->priv = pxa;
clk = clk_get(dev, "PXA-SDHCLK");
if (IS_ERR(clk)) {
dev_err(dev, "failed to get io clock\n");
ret = PTR_ERR(clk);
goto err_clk_get;
}
pltfm_host->clk = clk;
clk_enable(clk);
host->quirks = SDHCI_QUIRK_BROKEN_ADMA
| SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
| SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
if (pdata) {
if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
/* on-chip device */
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
host->mmc->caps |= MMC_CAP_NONREMOVABLE;
}
/* If slot design supports 8 bit data, indicate this to MMC. */
if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
if (pdata->quirks)
host->quirks |= pdata->quirks;
if (pdata->host_caps)
host->mmc->caps |= pdata->host_caps;
if (pdata->pm_caps)
host->mmc->pm_caps |= pdata->pm_caps;
}
host->ops = &pxav2_sdhci_ops;
ret = sdhci_add_host(host);
if (ret) {
dev_err(&pdev->dev, "failed to add host\n");
goto err_add_host;
}
platform_set_drvdata(pdev, host);
return 0;
err_add_host:
clk_disable(clk);
clk_put(clk);
err_clk_get:
sdhci_pltfm_free(pdev);
kfree(pxa);
return ret;
}
static int __devexit sdhci_pxav2_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_pxa *pxa = pltfm_host->priv;
sdhci_remove_host(host, 1);
clk_disable(pltfm_host->clk);
clk_put(pltfm_host->clk);
sdhci_pltfm_free(pdev);
kfree(pxa);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver sdhci_pxav2_driver = {
.driver = {
.name = "sdhci-pxav2",
.owner = THIS_MODULE,
},
.probe = sdhci_pxav2_probe,
.remove = __devexit_p(sdhci_pxav2_remove),
#ifdef CONFIG_PM
.suspend = sdhci_pltfm_suspend,
.resume = sdhci_pltfm_resume,
#endif
};
static int __init sdhci_pxav2_init(void)
{
return platform_driver_register(&sdhci_pxav2_driver);
}
static void __exit sdhci_pxav2_exit(void)
{
platform_driver_unregister(&sdhci_pxav2_driver);
}
module_init(sdhci_pxav2_init);
module_exit(sdhci_pxav2_exit);
MODULE_DESCRIPTION("SDHCI driver for pxav2");
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
stylemate/YP-GB70_KRKPN_Fog_Kernel | arch/m68knommu/kernel/setup.c | 906 | 9764 | /*
* linux/arch/m68knommu/kernel/setup.c
*
* Copyright (C) 1999-2007 Greg Ungerer (gerg@snapgear.com)
* Copyright (C) 1998,1999 D. Jeff Dionne <jeff@uClinux.org>
* Copyleft ()) 2000 James D. Schettine {james@telos-systems.com}
* Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>
* Copyright (C) 1995 Hamish Macdonald
* Copyright (C) 2000 Lineo Inc. (www.lineo.com)
* Copyright (C) 2001 Lineo, Inc. <www.lineo.com>
*
* 68VZ328 Fixes/support Evan Stawnyczy <e@lineo.ca>
*/
/*
* This file handles the architecture-dependent parts of system setup
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/pgtable.h>
unsigned long memory_start;
unsigned long memory_end;
EXPORT_SYMBOL(memory_start);
EXPORT_SYMBOL(memory_end);
char __initdata command_line[COMMAND_LINE_SIZE];
/* machine dependent timer functions */
void (*mach_gettod)(int*, int*, int*, int*, int*, int*);
int (*mach_set_clock_mmss)(unsigned long);
/* machine dependent reboot functions */
void (*mach_reset)(void);
void (*mach_halt)(void);
void (*mach_power_off)(void);
#ifdef CONFIG_M68000
#define CPU "MC68000"
#endif
#ifdef CONFIG_M68328
#define CPU "MC68328"
#endif
#ifdef CONFIG_M68EZ328
#define CPU "MC68EZ328"
#endif
#ifdef CONFIG_M68VZ328
#define CPU "MC68VZ328"
#endif
#ifdef CONFIG_M68360
#define CPU "MC68360"
#endif
#if defined(CONFIG_M5206)
#define CPU "COLDFIRE(m5206)"
#endif
#if defined(CONFIG_M5206e)
#define CPU "COLDFIRE(m5206e)"
#endif
#if defined(CONFIG_M520x)
#define CPU "COLDFIRE(m520x)"
#endif
#if defined(CONFIG_M523x)
#define CPU "COLDFIRE(m523x)"
#endif
#if defined(CONFIG_M5249)
#define CPU "COLDFIRE(m5249)"
#endif
#if defined(CONFIG_M5271)
#define CPU "COLDFIRE(m5270/5271)"
#endif
#if defined(CONFIG_M5272)
#define CPU "COLDFIRE(m5272)"
#endif
#if defined(CONFIG_M5275)
#define CPU "COLDFIRE(m5274/5275)"
#endif
#if defined(CONFIG_M528x)
#define CPU "COLDFIRE(m5280/5282)"
#endif
#if defined(CONFIG_M5307)
#define CPU "COLDFIRE(m5307)"
#endif
#if defined(CONFIG_M532x)
#define CPU "COLDFIRE(m532x)"
#endif
#if defined(CONFIG_M5407)
#define CPU "COLDFIRE(m5407)"
#endif
#ifndef CPU
#define CPU "UNKNOWN"
#endif
extern int _stext, _etext, _sdata, _edata, _sbss, _ebss, _end;
extern int _ramstart, _ramend;
#if defined(CONFIG_UBOOT)
/*
* parse_uboot_commandline
*
* Copies u-boot commandline arguments and store them in the proper linux
* variables.
*
* Assumes:
* _init_sp global contains the address in the stack pointer when the
* kernel starts (see head.S::_start)
*
* U-Boot calling convention:
* (*kernel) (kbd, initrd_start, initrd_end, cmd_start, cmd_end);
*
* _init_sp can be parsed as such
*
* _init_sp+00 = u-boot cmd after jsr into kernel (skip)
* _init_sp+04 = &kernel board_info (residual data)
* _init_sp+08 = &initrd_start
* _init_sp+12 = &initrd_end
* _init_sp+16 = &cmd_start
* _init_sp+20 = &cmd_end
*
* This also assumes that the memory locations pointed to are still
* unmodified. U-boot places them near the end of external SDRAM.
*
* Argument(s):
* commandp = the linux commandline arg container to fill.
* size = the sizeof commandp.
*
* Returns:
*/
void parse_uboot_commandline(char *commandp, int size)
{
extern unsigned long _init_sp;
unsigned long *sp;
unsigned long uboot_kbd;
unsigned long uboot_initrd_start, uboot_initrd_end;
unsigned long uboot_cmd_start, uboot_cmd_end;
sp = (unsigned long *)_init_sp;
uboot_kbd = sp[1];
uboot_initrd_start = sp[2];
uboot_initrd_end = sp[3];
uboot_cmd_start = sp[4];
uboot_cmd_end = sp[5];
if (uboot_cmd_start && uboot_cmd_end)
strncpy(commandp, (const char *)uboot_cmd_start, size);
#if defined(CONFIG_BLK_DEV_INITRD)
if (uboot_initrd_start && uboot_initrd_end &&
(uboot_initrd_end > uboot_initrd_start)) {
initrd_start = uboot_initrd_start;
initrd_end = uboot_initrd_end;
ROOT_DEV = Root_RAM0;
printk(KERN_INFO "initrd at 0x%lx:0x%lx\n",
initrd_start, initrd_end);
}
#endif /* if defined(CONFIG_BLK_DEV_INITRD) */
}
#endif /* #if defined(CONFIG_UBOOT) */
void __init setup_arch(char **cmdline_p)
{
int bootmap_size;
memory_start = PAGE_ALIGN(_ramstart);
memory_end = _ramend;
init_mm.start_code = (unsigned long) &_stext;
init_mm.end_code = (unsigned long) &_etext;
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) 0;
config_BSP(&command_line[0], sizeof(command_line));
#if defined(CONFIG_BOOTPARAM)
strncpy(&command_line[0], CONFIG_BOOTPARAM_STRING, sizeof(command_line));
command_line[sizeof(command_line) - 1] = 0;
#endif /* CONFIG_BOOTPARAM */
#if defined(CONFIG_UBOOT)
/* CONFIG_UBOOT and CONFIG_BOOTPARAM defined, concatenate cmdline */
#if defined(CONFIG_BOOTPARAM)
/* Add the whitespace separator */
command_line[strlen(CONFIG_BOOTPARAM_STRING)] = ' ';
/* Parse uboot command line into the rest of the buffer */
parse_uboot_commandline(
&command_line[(strlen(CONFIG_BOOTPARAM_STRING)+1)],
(sizeof(command_line) -
(strlen(CONFIG_BOOTPARAM_STRING)+1)));
/* Only CONFIG_UBOOT defined, create cmdline */
#else
parse_uboot_commandline(&command_line[0], sizeof(command_line));
#endif /* CONFIG_BOOTPARAM */
command_line[sizeof(command_line) - 1] = 0;
#endif /* CONFIG_UBOOT */
printk(KERN_INFO "\x0F\r\n\nuClinux/" CPU "\n");
#ifdef CONFIG_UCDIMM
printk(KERN_INFO "uCdimm by Lineo, Inc. <www.lineo.com>\n");
#endif
#ifdef CONFIG_M68VZ328
printk(KERN_INFO "M68VZ328 support by Evan Stawnyczy <e@lineo.ca>\n");
#endif
#ifdef CONFIG_COLDFIRE
printk(KERN_INFO "COLDFIRE port done by Greg Ungerer, gerg@snapgear.com\n");
#ifdef CONFIG_M5307
printk(KERN_INFO "Modified for M5307 by Dave Miller, dmiller@intellistor.com\n");
#endif
#ifdef CONFIG_ELITE
printk(KERN_INFO "Modified for M5206eLITE by Rob Scott, rscott@mtrob.fdns.net\n");
#endif
#endif
printk(KERN_INFO "Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n");
#if defined( CONFIG_PILOT ) && defined( CONFIG_M68328 )
printk(KERN_INFO "TRG SuperPilot FLASH card support <info@trgnet.com>\n");
#endif
#if defined( CONFIG_PILOT ) && defined( CONFIG_M68EZ328 )
printk(KERN_INFO "PalmV support by Lineo Inc. <jeff@uclinux.com>\n");
#endif
#if defined (CONFIG_M68360)
printk(KERN_INFO "QUICC port done by SED Systems <hamilton@sedsystems.ca>,\n");
printk(KERN_INFO "based on 2.0.38 port by Lineo Inc. <mleslie@lineo.com>.\n");
#endif
#ifdef CONFIG_DRAGEN2
printk(KERN_INFO "DragonEngine II board support by Georges Menie\n");
#endif
#ifdef CONFIG_M5235EVB
printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
#endif
pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
"BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
(int) &_sdata, (int) &_edata,
(int) &_sbss, (int) &_ebss);
pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ",
(int) &_ebss, (int) memory_start,
(int) memory_start, (int) memory_end);
/* Keep a copy of command line */
*cmdline_p = &command_line[0];
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
boot_command_line[COMMAND_LINE_SIZE-1] = 0;
#ifdef DEBUG
if (strlen(*cmdline_p))
printk(KERN_DEBUG "Command line: '%s'\n", *cmdline_p);
#endif
#if defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
/*
* Give all the memory to the bootmap allocator, tell it to put the
* boot mem_map at the start of memory.
*/
bootmap_size = init_bootmem_node(
NODE_DATA(0),
memory_start >> PAGE_SHIFT, /* map goes here */
PAGE_OFFSET >> PAGE_SHIFT, /* 0 on coldfire */
memory_end >> PAGE_SHIFT);
/*
* Free the usable memory, we have to make sure we do not free
* the bootmem bitmap so we then reserve it after freeing it :-)
*/
free_bootmem(memory_start, memory_end - memory_start);
reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
#if defined(CONFIG_UBOOT) && defined(CONFIG_BLK_DEV_INITRD)
if ((initrd_start > 0) && (initrd_start < initrd_end) &&
(initrd_end < memory_end))
reserve_bootmem(initrd_start, initrd_end - initrd_start,
BOOTMEM_DEFAULT);
#endif /* if defined(CONFIG_BLK_DEV_INITRD) */
/*
* Get kmalloc into gear.
*/
paging_init();
}
/*
* Get CPU information for use by the procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
char *cpu, *mmu, *fpu;
u_long clockfreq;
cpu = CPU;
mmu = "none";
fpu = "none";
#ifdef CONFIG_COLDFIRE
clockfreq = (loops_per_jiffy * HZ) * 3;
#else
clockfreq = (loops_per_jiffy * HZ) * 16;
#endif
seq_printf(m, "CPU:\t\t%s\n"
"MMU:\t\t%s\n"
"FPU:\t\t%s\n"
"Clocking:\t%lu.%1luMHz\n"
"BogoMips:\t%lu.%02lu\n"
"Calibration:\t%lu loops\n",
cpu, mmu, fpu,
clockfreq / 1000000,
(clockfreq / 100000) % 10,
(loops_per_jiffy * HZ) / 500000,
((loops_per_jiffy * HZ) / 5000) % 100,
(loops_per_jiffy * HZ));
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < NR_CPUS ? ((void *) 0x12345678) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
| gpl-2.0 |
Kali-/htc-kernel-msm7x30-exp | arch/arm/mach-kirkwood/sheevaplug-setup.c | 906 | 3976 | /*
* arch/arm/mach-kirkwood/sheevaplug-setup.c
*
* Marvell SheevaPlug Reference Board Setup
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/mtd/partitions.h>
#include <linux/mv643xx_eth.h>
#include <linux/gpio.h>
#include <linux/leds.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/kirkwood.h>
#include <plat/mvsdio.h>
#include "common.h"
#include "mpp.h"
static struct mtd_partition sheevaplug_nand_parts[] = {
{
.name = "u-boot",
.offset = 0,
.size = SZ_1M
}, {
.name = "uImage",
.offset = MTDPART_OFS_NXTBLK,
.size = SZ_4M
}, {
.name = "root",
.offset = MTDPART_OFS_NXTBLK,
.size = MTDPART_SIZ_FULL
},
};
static struct mv643xx_eth_platform_data sheevaplug_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(0),
};
static struct mv_sata_platform_data sheeva_esata_sata_data = {
.n_ports = 2,
};
static struct mvsdio_platform_data sheevaplug_mvsdio_data = {
/* unfortunately the CD signal has not been connected */
};
static struct mvsdio_platform_data sheeva_esata_mvsdio_data = {
.gpio_write_protect = 44, /* MPP44 used as SD write protect */
.gpio_card_detect = 47, /* MPP47 used as SD card detect */
};
static struct gpio_led sheevaplug_led_pins[] = {
{
.name = "plug:green:health",
.default_trigger = "default-on",
.gpio = 49,
.active_low = 1,
},
};
static struct gpio_led_platform_data sheevaplug_led_data = {
.leds = sheevaplug_led_pins,
.num_leds = ARRAY_SIZE(sheevaplug_led_pins),
};
static struct platform_device sheevaplug_leds = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &sheevaplug_led_data,
}
};
static unsigned int sheevaplug_mpp_config[] __initdata = {
MPP29_GPIO, /* USB Power Enable */
MPP49_GPIO, /* LED */
0
};
static unsigned int sheeva_esata_mpp_config[] __initdata = {
MPP29_GPIO, /* USB Power Enable */
MPP44_GPIO, /* SD Write Protect */
MPP47_GPIO, /* SD Card Detect */
MPP49_GPIO, /* LED Green */
0
};
static void __init sheevaplug_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
kirkwood_init();
/* setup gpio pin select */
if (machine_is_sheeva_esata())
kirkwood_mpp_conf(sheeva_esata_mpp_config);
else
kirkwood_mpp_conf(sheevaplug_mpp_config);
kirkwood_uart0_init();
kirkwood_nand_init(ARRAY_AND_SIZE(sheevaplug_nand_parts), 25);
if (gpio_request(29, "USB Power Enable") != 0 ||
gpio_direction_output(29, 1) != 0)
printk(KERN_ERR "can't set up GPIO 29 (USB Power Enable)\n");
kirkwood_ehci_init();
kirkwood_ge00_init(&sheevaplug_ge00_data);
/* honor lower power consumption for plugs with out eSATA */
if (machine_is_sheeva_esata())
kirkwood_sata_init(&sheeva_esata_sata_data);
/* enable sd wp and sd cd on plugs with esata */
if (machine_is_sheeva_esata())
kirkwood_sdio_init(&sheeva_esata_mvsdio_data);
else
kirkwood_sdio_init(&sheevaplug_mvsdio_data);
platform_device_register(&sheevaplug_leds);
}
#ifdef CONFIG_MACH_SHEEVAPLUG
MACHINE_START(SHEEVAPLUG, "Marvell SheevaPlug Reference Board")
/* Maintainer: shadi Ammouri <shadi@marvell.com> */
.phys_io = KIRKWOOD_REGS_PHYS_BASE,
.io_pg_offst = ((KIRKWOOD_REGS_VIRT_BASE) >> 18) & 0xfffc,
.boot_params = 0x00000100,
.init_machine = sheevaplug_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
MACHINE_END
#endif
#ifdef CONFIG_MACH_ESATA_SHEEVAPLUG
MACHINE_START(ESATA_SHEEVAPLUG, "Marvell eSATA SheevaPlug Reference Board")
.phys_io = KIRKWOOD_REGS_PHYS_BASE,
.io_pg_offst = ((KIRKWOOD_REGS_VIRT_BASE) >> 18) & 0xfffc,
.boot_params = 0x00000100,
.init_machine = sheevaplug_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
MACHINE_END
#endif
| gpl-2.0 |
Red--Code/android_kernel_sony_msm8974ac | scripts/dtc/libfdt/fdt.c | 1674 | 6414 | /*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libfdt_env.h"
#include <fdt.h>
#include <libfdt.h>
#include "libfdt_internal.h"
int fdt_check_header(const void *fdt)
{
if (fdt_magic(fdt) == FDT_MAGIC) {
/* Complete tree */
if (fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION)
return -FDT_ERR_BADVERSION;
if (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION)
return -FDT_ERR_BADVERSION;
} else if (fdt_magic(fdt) == FDT_SW_MAGIC) {
/* Unfinished sequential-write blob */
if (fdt_size_dt_struct(fdt) == 0)
return -FDT_ERR_BADSTATE;
} else {
return -FDT_ERR_BADMAGIC;
}
if (fdt_off_dt_struct(fdt) > (UINT_MAX - fdt_size_dt_struct(fdt)))
return FDT_ERR_BADOFFSET;
if (fdt_off_dt_strings(fdt) > (UINT_MAX - fdt_size_dt_strings(fdt)))
return FDT_ERR_BADOFFSET;
if ((fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt))
> fdt_totalsize(fdt))
return FDT_ERR_BADOFFSET;
if ((fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt))
> fdt_totalsize(fdt))
return FDT_ERR_BADOFFSET;
return 0;
}
const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int len)
{
const char *p;
if (fdt_version(fdt) >= 0x11)
if (((offset + len) < offset)
|| ((offset + len) > fdt_size_dt_struct(fdt)))
return NULL;
p = _fdt_offset_ptr(fdt, offset);
if (p + len < p)
return NULL;
return p;
}
uint32_t fdt_next_tag(const void *fdt, int startoffset, int *nextoffset)
{
const uint32_t *tagp, *lenp;
uint32_t tag;
int offset = startoffset;
const char *p;
*nextoffset = -FDT_ERR_TRUNCATED;
tagp = fdt_offset_ptr(fdt, offset, FDT_TAGSIZE);
if (!tagp)
return FDT_END; /* premature end */
tag = fdt32_to_cpu(*tagp);
offset += FDT_TAGSIZE;
*nextoffset = -FDT_ERR_BADSTRUCTURE;
switch (tag) {
case FDT_BEGIN_NODE:
/* skip name */
do {
p = fdt_offset_ptr(fdt, offset++, 1);
} while (p && (*p != '\0'));
if (!p)
return FDT_END; /* premature end */
break;
case FDT_PROP:
lenp = fdt_offset_ptr(fdt, offset, sizeof(*lenp));
if (!lenp)
return FDT_END; /* premature end */
/* skip-name offset, length and value */
offset += sizeof(struct fdt_property) - FDT_TAGSIZE
+ fdt32_to_cpu(*lenp);
break;
case FDT_END:
case FDT_END_NODE:
case FDT_NOP:
break;
default:
return FDT_END;
}
if (!fdt_offset_ptr(fdt, startoffset, offset - startoffset))
return FDT_END; /* premature end */
*nextoffset = FDT_TAGALIGN(offset);
return tag;
}
int _fdt_check_node_offset(const void *fdt, int offset)
{
if ((offset < 0) || (offset % FDT_TAGSIZE)
|| (fdt_next_tag(fdt, offset, &offset) != FDT_BEGIN_NODE))
return -FDT_ERR_BADOFFSET;
return offset;
}
int _fdt_check_prop_offset(const void *fdt, int offset)
{
if ((offset < 0) || (offset % FDT_TAGSIZE)
|| (fdt_next_tag(fdt, offset, &offset) != FDT_PROP))
return -FDT_ERR_BADOFFSET;
return offset;
}
int fdt_next_node(const void *fdt, int offset, int *depth)
{
int nextoffset = 0;
uint32_t tag;
if (offset >= 0)
if ((nextoffset = _fdt_check_node_offset(fdt, offset)) < 0)
return nextoffset;
do {
offset = nextoffset;
tag = fdt_next_tag(fdt, offset, &nextoffset);
switch (tag) {
case FDT_PROP:
case FDT_NOP:
break;
case FDT_BEGIN_NODE:
if (depth)
(*depth)++;
break;
case FDT_END_NODE:
if (depth && ((--(*depth)) < 0))
return nextoffset;
break;
case FDT_END:
if ((nextoffset >= 0)
|| ((nextoffset == -FDT_ERR_TRUNCATED) && !depth))
return -FDT_ERR_NOTFOUND;
else
return nextoffset;
}
} while (tag != FDT_BEGIN_NODE);
return offset;
}
const char *_fdt_find_string(const char *strtab, int tabsize, const char *s)
{
int len = strlen(s) + 1;
const char *last = strtab + tabsize - len;
const char *p;
for (p = strtab; p <= last; p++)
if (memcmp(p, s, len) == 0)
return p;
return NULL;
}
int fdt_move(const void *fdt, void *buf, int bufsize)
{
FDT_CHECK_HEADER(fdt);
if (fdt_totalsize(fdt) > bufsize)
return -FDT_ERR_NOSPACE;
memmove(buf, fdt, fdt_totalsize(fdt));
return 0;
}
| gpl-2.0 |
peterzhu0503/kernel_rk2926 | net/mac80211/mesh_pathtbl.c | 2442 | 22158 | /*
* Copyright (c) 2008, 2009 open80211s Ltd.
* Author: Luis Carlos Cobo <luisca@cozybit.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "mesh.h"
/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
#define INIT_PATHS_SIZE_ORDER 2
/* Keep the mean chain length below this constant */
#define MEAN_CHAIN_LEN 2
#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
time_after(jiffies, mpath->exp_time) && \
!(mpath->flags & MESH_PATH_FIXED))
struct mpath_node {
struct hlist_node list;
struct rcu_head rcu;
/* This indirection allows two different tables to point to the same
* mesh_path structure, useful when resizing
*/
struct mesh_path *mpath;
};
static struct mesh_table __rcu *mesh_paths;
static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
/* This lock will have the grow table function as writer and add / delete nodes
* as readers. When reading the table (i.e. doing lookups) we are well protected
* by RCU
*/
static DEFINE_RWLOCK(pathtbl_resize_lock);
static inline struct mesh_table *resize_dereference_mesh_paths(void)
{
return rcu_dereference_protected(mesh_paths,
lockdep_is_held(&pathtbl_resize_lock));
}
static inline struct mesh_table *resize_dereference_mpp_paths(void)
{
return rcu_dereference_protected(mpp_paths,
lockdep_is_held(&pathtbl_resize_lock));
}
/*
* CAREFUL -- "tbl" must not be an expression,
* in particular not an rcu_dereference(), since
* it's used twice. So it is illegal to do
* for_each_mesh_entry(rcu_dereference(...), ...)
*/
#define for_each_mesh_entry(tbl, p, node, i) \
for (i = 0; i <= tbl->hash_mask; i++) \
hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
static struct mesh_table *mesh_table_alloc(int size_order)
{
int i;
struct mesh_table *newtbl;
newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
if (!newtbl)
return NULL;
newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
(1 << size_order), GFP_ATOMIC);
if (!newtbl->hash_buckets) {
kfree(newtbl);
return NULL;
}
newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
(1 << size_order), GFP_ATOMIC);
if (!newtbl->hashwlock) {
kfree(newtbl->hash_buckets);
kfree(newtbl);
return NULL;
}
newtbl->size_order = size_order;
newtbl->hash_mask = (1 << size_order) - 1;
atomic_set(&newtbl->entries, 0);
get_random_bytes(&newtbl->hash_rnd,
sizeof(newtbl->hash_rnd));
for (i = 0; i <= newtbl->hash_mask; i++)
spin_lock_init(&newtbl->hashwlock[i]);
return newtbl;
}
static void __mesh_table_free(struct mesh_table *tbl)
{
kfree(tbl->hash_buckets);
kfree(tbl->hashwlock);
kfree(tbl);
}
static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
{
struct hlist_head *mesh_hash;
struct hlist_node *p, *q;
int i;
mesh_hash = tbl->hash_buckets;
for (i = 0; i <= tbl->hash_mask; i++) {
spin_lock_bh(&tbl->hashwlock[i]);
hlist_for_each_safe(p, q, &mesh_hash[i]) {
tbl->free_node(p, free_leafs);
atomic_dec(&tbl->entries);
}
spin_unlock_bh(&tbl->hashwlock[i]);
}
__mesh_table_free(tbl);
}
static int mesh_table_grow(struct mesh_table *oldtbl,
struct mesh_table *newtbl)
{
struct hlist_head *oldhash;
struct hlist_node *p, *q;
int i;
if (atomic_read(&oldtbl->entries)
< oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
return -EAGAIN;
newtbl->free_node = oldtbl->free_node;
newtbl->mean_chain_len = oldtbl->mean_chain_len;
newtbl->copy_node = oldtbl->copy_node;
atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
oldhash = oldtbl->hash_buckets;
for (i = 0; i <= oldtbl->hash_mask; i++)
hlist_for_each(p, &oldhash[i])
if (oldtbl->copy_node(p, newtbl) < 0)
goto errcopy;
return 0;
errcopy:
for (i = 0; i <= newtbl->hash_mask; i++) {
hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
oldtbl->free_node(p, 0);
}
return -ENOMEM;
}
static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
struct mesh_table *tbl)
{
/* Use last four bytes of hw addr and interface index as hash index */
return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
& tbl->hash_mask;
}
/**
*
* mesh_path_assign_nexthop - update mesh path next hop
*
* @mpath: mesh path to update
* @sta: next hop to assign
*
* Locking: mpath->state_lock must be held when calling this function
*/
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
{
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
struct sk_buff_head tmpq;
unsigned long flags;
rcu_assign_pointer(mpath->next_hop, sta);
__skb_queue_head_init(&tmpq);
spin_lock_irqsave(&mpath->frame_queue.lock, flags);
while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
hdr = (struct ieee80211_hdr *) skb->data;
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
__skb_queue_tail(&tmpq, skb);
}
skb_queue_splice(&tmpq, &mpath->frame_queue);
spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
}
/**
* mesh_path_lookup - look up a path in the mesh path table
* @dst: hardware address (ETH_ALEN length) of destination
* @sdata: local subif
*
* Returns: pointer to the mesh path structure, or NULL if not found
*
* Locking: must be called within a read rcu section.
*/
struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
struct hlist_node *n;
struct hlist_head *bucket;
struct mesh_table *tbl;
struct mpath_node *node;
tbl = rcu_dereference(mesh_paths);
bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
hlist_for_each_entry_rcu(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
if (MPATH_EXPIRED(mpath)) {
spin_lock_bh(&mpath->state_lock);
if (MPATH_EXPIRED(mpath))
mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&mpath->state_lock);
}
return mpath;
}
}
return NULL;
}
struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
struct hlist_node *n;
struct hlist_head *bucket;
struct mesh_table *tbl;
struct mpath_node *node;
tbl = rcu_dereference(mpp_paths);
bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
hlist_for_each_entry_rcu(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
if (MPATH_EXPIRED(mpath)) {
spin_lock_bh(&mpath->state_lock);
if (MPATH_EXPIRED(mpath))
mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&mpath->state_lock);
}
return mpath;
}
}
return NULL;
}
/**
* mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
* @idx: index
* @sdata: local subif, or NULL for all entries
*
* Returns: pointer to the mesh path structure, or NULL if not found.
*
* Locking: must be called within a read rcu section.
*/
struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl = rcu_dereference(mesh_paths);
struct mpath_node *node;
struct hlist_node *p;
int i;
int j = 0;
for_each_mesh_entry(tbl, p, node, i) {
if (sdata && node->mpath->sdata != sdata)
continue;
if (j++ == idx) {
if (MPATH_EXPIRED(node->mpath)) {
spin_lock_bh(&node->mpath->state_lock);
if (MPATH_EXPIRED(node->mpath))
node->mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&node->mpath->state_lock);
}
return node->mpath;
}
}
return NULL;
}
/**
* mesh_path_add - allocate and add a new path to the mesh path table
* @addr: destination address of the path (ETH_ALEN length)
* @sdata: local subif
*
* Returns: 0 on success
*
* State: the initial state of the new path is set to 0
*/
int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
struct mesh_table *tbl;
struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node;
struct hlist_head *bucket;
struct hlist_node *n;
int grow = 0;
int err = 0;
u32 hash_idx;
if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
if (is_multicast_ether_addr(dst))
return -ENOTSUPP;
if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
return -ENOSPC;
err = -ENOMEM;
new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
if (!new_mpath)
goto err_path_alloc;
new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (!new_node)
goto err_node_alloc;
read_lock_bh(&pathtbl_resize_lock);
memcpy(new_mpath->dst, dst, ETH_ALEN);
new_mpath->sdata = sdata;
new_mpath->flags = 0;
skb_queue_head_init(&new_mpath->frame_queue);
new_node->mpath = new_mpath;
new_mpath->timer.data = (unsigned long) new_mpath;
new_mpath->timer.function = mesh_path_timer;
new_mpath->exp_time = jiffies;
spin_lock_init(&new_mpath->state_lock);
init_timer(&new_mpath->timer);
tbl = resize_dereference_mesh_paths();
hash_idx = mesh_table_hash(dst, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock_bh(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
goto err_exists;
}
hlist_add_head_rcu(&new_node->list, bucket);
if (atomic_inc_return(&tbl->entries) >=
tbl->mean_chain_len * (tbl->hash_mask + 1))
grow = 1;
mesh_paths_generation++;
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work);
}
return 0;
err_exists:
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
kfree(new_mpath);
err_path_alloc:
atomic_dec(&sdata->u.mesh.mpaths);
return err;
}
static void mesh_table_free_rcu(struct rcu_head *rcu)
{
struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
mesh_table_free(tbl, false);
}
void mesh_mpath_table_grow(void)
{
struct mesh_table *oldtbl, *newtbl;
write_lock_bh(&pathtbl_resize_lock);
oldtbl = resize_dereference_mesh_paths();
newtbl = mesh_table_alloc(oldtbl->size_order + 1);
if (!newtbl)
goto out;
if (mesh_table_grow(oldtbl, newtbl) < 0) {
__mesh_table_free(newtbl);
goto out;
}
rcu_assign_pointer(mesh_paths, newtbl);
call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
out:
write_unlock_bh(&pathtbl_resize_lock);
}
void mesh_mpp_table_grow(void)
{
struct mesh_table *oldtbl, *newtbl;
write_lock_bh(&pathtbl_resize_lock);
oldtbl = resize_dereference_mpp_paths();
newtbl = mesh_table_alloc(oldtbl->size_order + 1);
if (!newtbl)
goto out;
if (mesh_table_grow(oldtbl, newtbl) < 0) {
__mesh_table_free(newtbl);
goto out;
}
rcu_assign_pointer(mpp_paths, newtbl);
call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
out:
write_unlock_bh(&pathtbl_resize_lock);
}
int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
struct mesh_table *tbl;
struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node;
struct hlist_head *bucket;
struct hlist_node *n;
int grow = 0;
int err = 0;
u32 hash_idx;
if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
if (is_multicast_ether_addr(dst))
return -ENOTSUPP;
err = -ENOMEM;
new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
if (!new_mpath)
goto err_path_alloc;
new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (!new_node)
goto err_node_alloc;
read_lock_bh(&pathtbl_resize_lock);
memcpy(new_mpath->dst, dst, ETH_ALEN);
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
new_mpath->sdata = sdata;
new_mpath->flags = 0;
skb_queue_head_init(&new_mpath->frame_queue);
new_node->mpath = new_mpath;
new_mpath->exp_time = jiffies;
spin_lock_init(&new_mpath->state_lock);
tbl = resize_dereference_mpp_paths();
hash_idx = mesh_table_hash(dst, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock_bh(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
goto err_exists;
}
hlist_add_head_rcu(&new_node->list, bucket);
if (atomic_inc_return(&tbl->entries) >=
tbl->mean_chain_len * (tbl->hash_mask + 1))
grow = 1;
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work);
}
return 0;
err_exists:
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
kfree(new_mpath);
err_path_alloc:
return err;
}
/**
* mesh_plink_broken - deactivates paths and sends perr when a link breaks
*
* @sta: broken peer link
*
* This function must be called from the rate control algorithm if enough
* delivery errors suggest that a peer link is no longer usable.
*/
void mesh_plink_broken(struct sta_info *sta)
{
struct mesh_table *tbl;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
struct ieee80211_sub_if_data *sdata = sta->sdata;
int i;
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
spin_lock_bh(&mpath->state_lock);
if (rcu_dereference(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) {
mpath->flags &= ~MESH_PATH_ACTIVE;
++mpath->sn;
spin_unlock_bh(&mpath->state_lock);
mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
mpath->dst, cpu_to_le32(mpath->sn),
cpu_to_le16(PERR_RCODE_DEST_UNREACH),
bcast, sdata);
} else
spin_unlock_bh(&mpath->state_lock);
}
rcu_read_unlock();
}
/**
* mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
*
* @sta - mesh peer to match
*
* RCU notes: this function is called when a mesh plink transitions from
* PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
* allows path creation. This will happen before the sta can be freed (because
* sta_info_destroy() calls this) so any reader in a rcu read block will be
* protected against the plink disappearing.
*/
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
if (rcu_dereference(mpath->next_hop) == sta)
mesh_path_del(mpath->dst, mpath->sdata);
}
rcu_read_unlock();
}
void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
if (mpath->sdata == sdata)
mesh_path_del(mpath->dst, mpath->sdata);
}
rcu_read_unlock();
}
static void mesh_path_node_reclaim(struct rcu_head *rp)
{
struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
del_timer_sync(&node->mpath->timer);
atomic_dec(&sdata->u.mesh.mpaths);
kfree(node->mpath);
kfree(node);
}
/**
* mesh_path_del - delete a mesh path from the table
*
* @addr: dst address (ETH_ALEN length)
* @sdata: local subif
*
* Returns: 0 if successful
*/
int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_head *bucket;
struct hlist_node *n;
int hash_idx;
int err = 0;
read_lock_bh(&pathtbl_resize_lock);
tbl = resize_dereference_mesh_paths();
hash_idx = mesh_table_hash(addr, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock_bh(&tbl->hashwlock[hash_idx]);
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
spin_lock_bh(&mpath->state_lock);
mpath->flags |= MESH_PATH_RESOLVING;
hlist_del_rcu(&node->list);
call_rcu(&node->rcu, mesh_path_node_reclaim);
atomic_dec(&tbl->entries);
spin_unlock_bh(&mpath->state_lock);
goto enddel;
}
}
err = -ENXIO;
enddel:
mesh_paths_generation++;
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
return err;
}
/**
* mesh_path_tx_pending - sends pending frames in a mesh path queue
*
* @mpath: mesh path to activate
*
* Locking: the state_lock of the mpath structure must NOT be held when calling
* this function.
*/
void mesh_path_tx_pending(struct mesh_path *mpath)
{
if (mpath->flags & MESH_PATH_ACTIVE)
ieee80211_add_pending_skbs(mpath->sdata->local,
&mpath->frame_queue);
}
/**
* mesh_path_discard_frame - discard a frame whose path could not be resolved
*
* @skb: frame to discard
* @sdata: network subif the frame was to be sent through
*
* If the frame was being forwarded from another MP, a PERR frame will be sent
* to the precursor. The precursor's address (i.e. the previous hop) was saved
* in addr1 of the frame-to-be-forwarded, and would only be overwritten once
* the destination is successfully resolved.
*
* Locking: the function must me called within a rcu_read_lock region
*/
void mesh_path_discard_frame(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct mesh_path *mpath;
u32 sn = 0;
if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
u8 *ra, *da;
da = hdr->addr3;
ra = hdr->addr1;
mpath = mesh_path_lookup(da, sdata);
if (mpath)
sn = ++mpath->sn;
mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
cpu_to_le32(sn),
cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
}
kfree_skb(skb);
sdata->u.mesh.mshstats.dropped_frames_no_route++;
}
/**
* mesh_path_flush_pending - free the pending queue of a mesh path
*
* @mpath: mesh path whose queue has to be freed
*
* Locking: the function must me called within a rcu_read_lock region
*/
void mesh_path_flush_pending(struct mesh_path *mpath)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&mpath->frame_queue)) &&
(mpath->flags & MESH_PATH_ACTIVE))
mesh_path_discard_frame(skb, mpath->sdata);
}
/**
* mesh_path_fix_nexthop - force a specific next hop for a mesh path
*
* @mpath: the mesh path to modify
* @next_hop: the next hop to force
*
* Locking: this function must be called holding mpath->state_lock
*/
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
{
spin_lock_bh(&mpath->state_lock);
mesh_path_assign_nexthop(mpath, next_hop);
mpath->sn = 0xffff;
mpath->metric = 0;
mpath->hop_count = 0;
mpath->exp_time = 0;
mpath->flags |= MESH_PATH_FIXED;
mesh_path_activate(mpath);
spin_unlock_bh(&mpath->state_lock);
mesh_path_tx_pending(mpath);
}
static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
{
struct mesh_path *mpath;
struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
mpath = node->mpath;
hlist_del_rcu(p);
if (free_leafs) {
del_timer_sync(&mpath->timer);
kfree(mpath);
}
kfree(node);
}
static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
{
struct mesh_path *mpath;
struct mpath_node *node, *new_node;
u32 hash_idx;
new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (new_node == NULL)
return -ENOMEM;
node = hlist_entry(p, struct mpath_node, list);
mpath = node->mpath;
new_node->mpath = mpath;
hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
hlist_add_head(&new_node->list,
&newtbl->hash_buckets[hash_idx]);
return 0;
}
int mesh_pathtbl_init(void)
{
struct mesh_table *tbl_path, *tbl_mpp;
tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
if (!tbl_path)
return -ENOMEM;
tbl_path->free_node = &mesh_path_node_free;
tbl_path->copy_node = &mesh_path_node_copy;
tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
if (!tbl_mpp) {
mesh_table_free(tbl_path, true);
return -ENOMEM;
}
tbl_mpp->free_node = &mesh_path_node_free;
tbl_mpp->copy_node = &mesh_path_node_copy;
tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
/* Need no locking since this is during init */
RCU_INIT_POINTER(mesh_paths, tbl_path);
RCU_INIT_POINTER(mpp_paths, tbl_mpp);
return 0;
}
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
for_each_mesh_entry(tbl, p, node, i) {
if (node->mpath->sdata != sdata)
continue;
mpath = node->mpath;
spin_lock_bh(&mpath->state_lock);
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
(!(mpath->flags & MESH_PATH_FIXED)) &&
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) {
spin_unlock_bh(&mpath->state_lock);
mesh_path_del(mpath->dst, mpath->sdata);
} else
spin_unlock_bh(&mpath->state_lock);
}
rcu_read_unlock();
}
void mesh_pathtbl_unregister(void)
{
/* no need for locking during exit path */
mesh_table_free(rcu_dereference_raw(mesh_paths), true);
mesh_table_free(rcu_dereference_raw(mpp_paths), true);
}
| gpl-2.0 |
ptmr3/GalaxyS2_Kernel | drivers/misc/eeprom/at25.c | 4234 | 10401 | /*
* at25.c -- support most SPI EEPROMs, such as Atmel AT25 models
*
* Copyright (C) 2006 David Brownell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
/*
* NOTE: this is an *EEPROM* driver. The vagaries of product naming
* mean that some AT25 products are EEPROMs, and others are FLASH.
* Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver,
* not this one!
*/
struct at25_data {
struct spi_device *spi;
struct memory_accessor mem;
struct mutex lock;
struct spi_eeprom chip;
struct bin_attribute bin;
unsigned addrlen;
};
#define AT25_WREN 0x06 /* latch the write enable */
#define AT25_WRDI 0x04 /* reset the write enable */
#define AT25_RDSR 0x05 /* read status register */
#define AT25_WRSR 0x01 /* write status register */
#define AT25_READ 0x03 /* read byte(s) */
#define AT25_WRITE 0x02 /* write byte(s)/sector */
#define AT25_SR_nRDY 0x01 /* nRDY = write-in-progress */
#define AT25_SR_WEN 0x02 /* write enable (latched) */
#define AT25_SR_BP0 0x04 /* BP for software writeprotect */
#define AT25_SR_BP1 0x08
#define AT25_SR_WPEN 0x80 /* writeprotect enable */
#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */
/* Specs often allow 5 msec for a page write, sometimes 20 msec;
* it's important to recover from write timeouts.
*/
#define EE_TIMEOUT 25
/*-------------------------------------------------------------------------*/
#define io_limit PAGE_SIZE /* bytes */
static ssize_t
at25_ee_read(
struct at25_data *at25,
char *buf,
unsigned offset,
size_t count
)
{
u8 command[EE_MAXADDRLEN + 1];
u8 *cp;
ssize_t status;
struct spi_transfer t[2];
struct spi_message m;
if (unlikely(offset >= at25->bin.size))
return 0;
if ((offset + count) > at25->bin.size)
count = at25->bin.size - offset;
if (unlikely(!count))
return count;
cp = command;
*cp++ = AT25_READ;
/* 8/16/24-bit address is written MSB first */
switch (at25->addrlen) {
default: /* case 3 */
*cp++ = offset >> 16;
case 2:
*cp++ = offset >> 8;
case 1:
case 0: /* can't happen: for better codegen */
*cp++ = offset >> 0;
}
spi_message_init(&m);
memset(t, 0, sizeof t);
t[0].tx_buf = command;
t[0].len = at25->addrlen + 1;
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
t[1].len = count;
spi_message_add_tail(&t[1], &m);
mutex_lock(&at25->lock);
/* Read it all at once.
*
* REVISIT that's potentially a problem with large chips, if
* other devices on the bus need to be accessed regularly or
* this chip is clocked very slowly
*/
status = spi_sync(at25->spi, &m);
dev_dbg(&at25->spi->dev,
"read %Zd bytes at %d --> %d\n",
count, offset, (int) status);
mutex_unlock(&at25->lock);
return status ? status : count;
}
static ssize_t
at25_bin_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev;
struct at25_data *at25;
dev = container_of(kobj, struct device, kobj);
at25 = dev_get_drvdata(dev);
return at25_ee_read(at25, buf, off, count);
}
static ssize_t
at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
size_t count)
{
ssize_t status = 0;
unsigned written = 0;
unsigned buf_size;
u8 *bounce;
if (unlikely(off >= at25->bin.size))
return -EFBIG;
if ((off + count) > at25->bin.size)
count = at25->bin.size - off;
if (unlikely(!count))
return count;
/* Temp buffer starts with command and address */
buf_size = at25->chip.page_size;
if (buf_size > io_limit)
buf_size = io_limit;
bounce = kmalloc(buf_size + at25->addrlen + 1, GFP_KERNEL);
if (!bounce)
return -ENOMEM;
/* For write, rollover is within the page ... so we write at
* most one page, then manually roll over to the next page.
*/
bounce[0] = AT25_WRITE;
mutex_lock(&at25->lock);
do {
unsigned long timeout, retries;
unsigned segment;
unsigned offset = (unsigned) off;
u8 *cp = bounce + 1;
int sr;
*cp = AT25_WREN;
status = spi_write(at25->spi, cp, 1);
if (status < 0) {
dev_dbg(&at25->spi->dev, "WREN --> %d\n",
(int) status);
break;
}
/* 8/16/24-bit address is written MSB first */
switch (at25->addrlen) {
default: /* case 3 */
*cp++ = offset >> 16;
case 2:
*cp++ = offset >> 8;
case 1:
case 0: /* can't happen: for better codegen */
*cp++ = offset >> 0;
}
/* Write as much of a page as we can */
segment = buf_size - (offset % buf_size);
if (segment > count)
segment = count;
memcpy(cp, buf, segment);
status = spi_write(at25->spi, bounce,
segment + at25->addrlen + 1);
dev_dbg(&at25->spi->dev,
"write %u bytes at %u --> %d\n",
segment, offset, (int) status);
if (status < 0)
break;
/* REVISIT this should detect (or prevent) failed writes
* to readonly sections of the EEPROM...
*/
/* Wait for non-busy status */
timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT);
retries = 0;
do {
sr = spi_w8r8(at25->spi, AT25_RDSR);
if (sr < 0 || (sr & AT25_SR_nRDY)) {
dev_dbg(&at25->spi->dev,
"rdsr --> %d (%02x)\n", sr, sr);
/* at HZ=100, this is sloooow */
msleep(1);
continue;
}
if (!(sr & AT25_SR_nRDY))
break;
} while (retries++ < 3 || time_before_eq(jiffies, timeout));
if ((sr < 0) || (sr & AT25_SR_nRDY)) {
dev_err(&at25->spi->dev,
"write %d bytes offset %d, "
"timeout after %u msecs\n",
segment, offset,
jiffies_to_msecs(jiffies -
(timeout - EE_TIMEOUT)));
status = -ETIMEDOUT;
break;
}
off += segment;
buf += segment;
count -= segment;
written += segment;
} while (count > 0);
mutex_unlock(&at25->lock);
kfree(bounce);
return written ? written : status;
}
static ssize_t
at25_bin_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev;
struct at25_data *at25;
dev = container_of(kobj, struct device, kobj);
at25 = dev_get_drvdata(dev);
return at25_ee_write(at25, buf, off, count);
}
/*-------------------------------------------------------------------------*/
/* Let in-kernel code access the eeprom data. */
static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf,
off_t offset, size_t count)
{
struct at25_data *at25 = container_of(mem, struct at25_data, mem);
return at25_ee_read(at25, buf, offset, count);
}
static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf,
off_t offset, size_t count)
{
struct at25_data *at25 = container_of(mem, struct at25_data, mem);
return at25_ee_write(at25, buf, offset, count);
}
/*-------------------------------------------------------------------------*/
static int at25_probe(struct spi_device *spi)
{
struct at25_data *at25 = NULL;
const struct spi_eeprom *chip;
int err;
int sr;
int addrlen;
/* Chip description */
chip = spi->dev.platform_data;
if (!chip) {
dev_dbg(&spi->dev, "no chip description\n");
err = -ENODEV;
goto fail;
}
/* For now we only support 8/16/24 bit addressing */
if (chip->flags & EE_ADDR1)
addrlen = 1;
else if (chip->flags & EE_ADDR2)
addrlen = 2;
else if (chip->flags & EE_ADDR3)
addrlen = 3;
else {
dev_dbg(&spi->dev, "unsupported address type\n");
err = -EINVAL;
goto fail;
}
/* Ping the chip ... the status register is pretty portable,
* unlike probing manufacturer IDs. We do expect that system
* firmware didn't write it in the past few milliseconds!
*/
sr = spi_w8r8(spi, AT25_RDSR);
if (sr < 0 || sr & AT25_SR_nRDY) {
dev_dbg(&spi->dev, "rdsr --> %d (%02x)\n", sr, sr);
err = -ENXIO;
goto fail;
}
if (!(at25 = kzalloc(sizeof *at25, GFP_KERNEL))) {
err = -ENOMEM;
goto fail;
}
mutex_init(&at25->lock);
at25->chip = *chip;
at25->spi = spi_dev_get(spi);
dev_set_drvdata(&spi->dev, at25);
at25->addrlen = addrlen;
/* Export the EEPROM bytes through sysfs, since that's convenient.
* And maybe to other kernel code; it might hold a board's Ethernet
* address, or board-specific calibration data generated on the
* manufacturing floor.
*
* Default to root-only access to the data; EEPROMs often hold data
* that's sensitive for read and/or write, like ethernet addresses,
* security codes, board-specific manufacturing calibrations, etc.
*/
sysfs_bin_attr_init(&at25->bin);
at25->bin.attr.name = "eeprom";
at25->bin.attr.mode = S_IRUSR;
at25->bin.read = at25_bin_read;
at25->mem.read = at25_mem_read;
at25->bin.size = at25->chip.byte_len;
if (!(chip->flags & EE_READONLY)) {
at25->bin.write = at25_bin_write;
at25->bin.attr.mode |= S_IWUSR;
at25->mem.write = at25_mem_write;
}
err = sysfs_create_bin_file(&spi->dev.kobj, &at25->bin);
if (err)
goto fail;
if (chip->setup)
chip->setup(&at25->mem, chip->context);
dev_info(&spi->dev, "%Zd %s %s eeprom%s, pagesize %u\n",
(at25->bin.size < 1024)
? at25->bin.size
: (at25->bin.size / 1024),
(at25->bin.size < 1024) ? "Byte" : "KByte",
at25->chip.name,
(chip->flags & EE_READONLY) ? " (readonly)" : "",
at25->chip.page_size);
return 0;
fail:
dev_dbg(&spi->dev, "probe err %d\n", err);
kfree(at25);
return err;
}
static int __devexit at25_remove(struct spi_device *spi)
{
struct at25_data *at25;
at25 = dev_get_drvdata(&spi->dev);
sysfs_remove_bin_file(&spi->dev.kobj, &at25->bin);
kfree(at25);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct spi_driver at25_driver = {
.driver = {
.name = "at25",
.owner = THIS_MODULE,
},
.probe = at25_probe,
.remove = __devexit_p(at25_remove),
};
static int __init at25_init(void)
{
return spi_register_driver(&at25_driver);
}
module_init(at25_init);
static void __exit at25_exit(void)
{
spi_unregister_driver(&at25_driver);
}
module_exit(at25_exit);
MODULE_DESCRIPTION("Driver for most SPI EEPROMs");
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:at25");
| gpl-2.0 |
XirXes/PyramidION | drivers/misc/eeprom/at25.c | 4234 | 10401 | /*
* at25.c -- support most SPI EEPROMs, such as Atmel AT25 models
*
* Copyright (C) 2006 David Brownell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
/*
* NOTE: this is an *EEPROM* driver. The vagaries of product naming
* mean that some AT25 products are EEPROMs, and others are FLASH.
* Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver,
* not this one!
*/
struct at25_data {
struct spi_device *spi;
struct memory_accessor mem;
struct mutex lock;
struct spi_eeprom chip;
struct bin_attribute bin;
unsigned addrlen;
};
#define AT25_WREN 0x06 /* latch the write enable */
#define AT25_WRDI 0x04 /* reset the write enable */
#define AT25_RDSR 0x05 /* read status register */
#define AT25_WRSR 0x01 /* write status register */
#define AT25_READ 0x03 /* read byte(s) */
#define AT25_WRITE 0x02 /* write byte(s)/sector */
#define AT25_SR_nRDY 0x01 /* nRDY = write-in-progress */
#define AT25_SR_WEN 0x02 /* write enable (latched) */
#define AT25_SR_BP0 0x04 /* BP for software writeprotect */
#define AT25_SR_BP1 0x08
#define AT25_SR_WPEN 0x80 /* writeprotect enable */
#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */
/* Specs often allow 5 msec for a page write, sometimes 20 msec;
* it's important to recover from write timeouts.
*/
#define EE_TIMEOUT 25
/*-------------------------------------------------------------------------*/
#define io_limit PAGE_SIZE /* bytes */
static ssize_t
at25_ee_read(
struct at25_data *at25,
char *buf,
unsigned offset,
size_t count
)
{
u8 command[EE_MAXADDRLEN + 1];
u8 *cp;
ssize_t status;
struct spi_transfer t[2];
struct spi_message m;
if (unlikely(offset >= at25->bin.size))
return 0;
if ((offset + count) > at25->bin.size)
count = at25->bin.size - offset;
if (unlikely(!count))
return count;
cp = command;
*cp++ = AT25_READ;
/* 8/16/24-bit address is written MSB first */
switch (at25->addrlen) {
default: /* case 3 */
*cp++ = offset >> 16;
case 2:
*cp++ = offset >> 8;
case 1:
case 0: /* can't happen: for better codegen */
*cp++ = offset >> 0;
}
spi_message_init(&m);
memset(t, 0, sizeof t);
t[0].tx_buf = command;
t[0].len = at25->addrlen + 1;
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
t[1].len = count;
spi_message_add_tail(&t[1], &m);
mutex_lock(&at25->lock);
/* Read it all at once.
*
* REVISIT that's potentially a problem with large chips, if
* other devices on the bus need to be accessed regularly or
* this chip is clocked very slowly
*/
status = spi_sync(at25->spi, &m);
dev_dbg(&at25->spi->dev,
"read %Zd bytes at %d --> %d\n",
count, offset, (int) status);
mutex_unlock(&at25->lock);
return status ? status : count;
}
static ssize_t
at25_bin_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev;
struct at25_data *at25;
dev = container_of(kobj, struct device, kobj);
at25 = dev_get_drvdata(dev);
return at25_ee_read(at25, buf, off, count);
}
static ssize_t
at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
size_t count)
{
ssize_t status = 0;
unsigned written = 0;
unsigned buf_size;
u8 *bounce;
if (unlikely(off >= at25->bin.size))
return -EFBIG;
if ((off + count) > at25->bin.size)
count = at25->bin.size - off;
if (unlikely(!count))
return count;
/* Temp buffer starts with command and address */
buf_size = at25->chip.page_size;
if (buf_size > io_limit)
buf_size = io_limit;
bounce = kmalloc(buf_size + at25->addrlen + 1, GFP_KERNEL);
if (!bounce)
return -ENOMEM;
/* For write, rollover is within the page ... so we write at
* most one page, then manually roll over to the next page.
*/
bounce[0] = AT25_WRITE;
mutex_lock(&at25->lock);
do {
unsigned long timeout, retries;
unsigned segment;
unsigned offset = (unsigned) off;
u8 *cp = bounce + 1;
int sr;
*cp = AT25_WREN;
status = spi_write(at25->spi, cp, 1);
if (status < 0) {
dev_dbg(&at25->spi->dev, "WREN --> %d\n",
(int) status);
break;
}
/* 8/16/24-bit address is written MSB first */
switch (at25->addrlen) {
default: /* case 3 */
*cp++ = offset >> 16;
case 2:
*cp++ = offset >> 8;
case 1:
case 0: /* can't happen: for better codegen */
*cp++ = offset >> 0;
}
/* Write as much of a page as we can */
segment = buf_size - (offset % buf_size);
if (segment > count)
segment = count;
memcpy(cp, buf, segment);
status = spi_write(at25->spi, bounce,
segment + at25->addrlen + 1);
dev_dbg(&at25->spi->dev,
"write %u bytes at %u --> %d\n",
segment, offset, (int) status);
if (status < 0)
break;
/* REVISIT this should detect (or prevent) failed writes
* to readonly sections of the EEPROM...
*/
/* Wait for non-busy status */
timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT);
retries = 0;
do {
sr = spi_w8r8(at25->spi, AT25_RDSR);
if (sr < 0 || (sr & AT25_SR_nRDY)) {
dev_dbg(&at25->spi->dev,
"rdsr --> %d (%02x)\n", sr, sr);
/* at HZ=100, this is sloooow */
msleep(1);
continue;
}
if (!(sr & AT25_SR_nRDY))
break;
} while (retries++ < 3 || time_before_eq(jiffies, timeout));
if ((sr < 0) || (sr & AT25_SR_nRDY)) {
dev_err(&at25->spi->dev,
"write %d bytes offset %d, "
"timeout after %u msecs\n",
segment, offset,
jiffies_to_msecs(jiffies -
(timeout - EE_TIMEOUT)));
status = -ETIMEDOUT;
break;
}
off += segment;
buf += segment;
count -= segment;
written += segment;
} while (count > 0);
mutex_unlock(&at25->lock);
kfree(bounce);
return written ? written : status;
}
static ssize_t
at25_bin_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev;
struct at25_data *at25;
dev = container_of(kobj, struct device, kobj);
at25 = dev_get_drvdata(dev);
return at25_ee_write(at25, buf, off, count);
}
/*-------------------------------------------------------------------------*/
/* Let in-kernel code access the eeprom data. */
static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf,
off_t offset, size_t count)
{
struct at25_data *at25 = container_of(mem, struct at25_data, mem);
return at25_ee_read(at25, buf, offset, count);
}
static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf,
off_t offset, size_t count)
{
struct at25_data *at25 = container_of(mem, struct at25_data, mem);
return at25_ee_write(at25, buf, offset, count);
}
/*-------------------------------------------------------------------------*/
static int at25_probe(struct spi_device *spi)
{
struct at25_data *at25 = NULL;
const struct spi_eeprom *chip;
int err;
int sr;
int addrlen;
/* Chip description */
chip = spi->dev.platform_data;
if (!chip) {
dev_dbg(&spi->dev, "no chip description\n");
err = -ENODEV;
goto fail;
}
/* For now we only support 8/16/24 bit addressing */
if (chip->flags & EE_ADDR1)
addrlen = 1;
else if (chip->flags & EE_ADDR2)
addrlen = 2;
else if (chip->flags & EE_ADDR3)
addrlen = 3;
else {
dev_dbg(&spi->dev, "unsupported address type\n");
err = -EINVAL;
goto fail;
}
/* Ping the chip ... the status register is pretty portable,
* unlike probing manufacturer IDs. We do expect that system
* firmware didn't write it in the past few milliseconds!
*/
sr = spi_w8r8(spi, AT25_RDSR);
if (sr < 0 || sr & AT25_SR_nRDY) {
dev_dbg(&spi->dev, "rdsr --> %d (%02x)\n", sr, sr);
err = -ENXIO;
goto fail;
}
if (!(at25 = kzalloc(sizeof *at25, GFP_KERNEL))) {
err = -ENOMEM;
goto fail;
}
mutex_init(&at25->lock);
at25->chip = *chip;
at25->spi = spi_dev_get(spi);
dev_set_drvdata(&spi->dev, at25);
at25->addrlen = addrlen;
/* Export the EEPROM bytes through sysfs, since that's convenient.
* And maybe to other kernel code; it might hold a board's Ethernet
* address, or board-specific calibration data generated on the
* manufacturing floor.
*
* Default to root-only access to the data; EEPROMs often hold data
* that's sensitive for read and/or write, like ethernet addresses,
* security codes, board-specific manufacturing calibrations, etc.
*/
sysfs_bin_attr_init(&at25->bin);
at25->bin.attr.name = "eeprom";
at25->bin.attr.mode = S_IRUSR;
at25->bin.read = at25_bin_read;
at25->mem.read = at25_mem_read;
at25->bin.size = at25->chip.byte_len;
if (!(chip->flags & EE_READONLY)) {
at25->bin.write = at25_bin_write;
at25->bin.attr.mode |= S_IWUSR;
at25->mem.write = at25_mem_write;
}
err = sysfs_create_bin_file(&spi->dev.kobj, &at25->bin);
if (err)
goto fail;
if (chip->setup)
chip->setup(&at25->mem, chip->context);
dev_info(&spi->dev, "%Zd %s %s eeprom%s, pagesize %u\n",
(at25->bin.size < 1024)
? at25->bin.size
: (at25->bin.size / 1024),
(at25->bin.size < 1024) ? "Byte" : "KByte",
at25->chip.name,
(chip->flags & EE_READONLY) ? " (readonly)" : "",
at25->chip.page_size);
return 0;
fail:
dev_dbg(&spi->dev, "probe err %d\n", err);
kfree(at25);
return err;
}
static int __devexit at25_remove(struct spi_device *spi)
{
struct at25_data *at25;
at25 = dev_get_drvdata(&spi->dev);
sysfs_remove_bin_file(&spi->dev.kobj, &at25->bin);
kfree(at25);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct spi_driver at25_driver = {
.driver = {
.name = "at25",
.owner = THIS_MODULE,
},
.probe = at25_probe,
.remove = __devexit_p(at25_remove),
};
static int __init at25_init(void)
{
return spi_register_driver(&at25_driver);
}
module_init(at25_init);
static void __exit at25_exit(void)
{
spi_unregister_driver(&at25_driver);
}
module_exit(at25_exit);
MODULE_DESCRIPTION("Driver for most SPI EEPROMs");
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:at25");
| gpl-2.0 |
RenderBroken/msm8974_Victara-Stock_render_kernel | drivers/mfd/tps65090.c | 4746 | 9506 | /*
* Core driver for TI TPS65090 PMIC family
*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps65090.h>
#include <linux/regmap.h>
#include <linux/err.h>
#define NUM_INT_REG 2
#define TOTAL_NUM_REG 0x18
/* interrupt status registers */
#define TPS65090_INT_STS 0x0
#define TPS65090_INT_STS2 0x1
/* interrupt mask registers */
#define TPS65090_INT_MSK 0x2
#define TPS65090_INT_MSK2 0x3
struct tps65090_irq_data {
u8 mask_reg;
u8 mask_pos;
};
#define TPS65090_IRQ(_reg, _mask_pos) \
{ \
.mask_reg = (_reg), \
.mask_pos = (_mask_pos), \
}
static const struct tps65090_irq_data tps65090_irqs[] = {
[0] = TPS65090_IRQ(0, 0),
[1] = TPS65090_IRQ(0, 1),
[2] = TPS65090_IRQ(0, 2),
[3] = TPS65090_IRQ(0, 3),
[4] = TPS65090_IRQ(0, 4),
[5] = TPS65090_IRQ(0, 5),
[6] = TPS65090_IRQ(0, 6),
[7] = TPS65090_IRQ(0, 7),
[8] = TPS65090_IRQ(1, 0),
[9] = TPS65090_IRQ(1, 1),
[10] = TPS65090_IRQ(1, 2),
[11] = TPS65090_IRQ(1, 3),
[12] = TPS65090_IRQ(1, 4),
[13] = TPS65090_IRQ(1, 5),
[14] = TPS65090_IRQ(1, 6),
[15] = TPS65090_IRQ(1, 7),
};
static struct mfd_cell tps65090s[] = {
{
.name = "tps65910-pmic",
},
{
.name = "tps65910-regulator",
},
};
struct tps65090 {
struct mutex lock;
struct device *dev;
struct i2c_client *client;
struct regmap *rmap;
struct irq_chip irq_chip;
struct mutex irq_lock;
int irq_base;
unsigned int id;
};
int tps65090_write(struct device *dev, int reg, uint8_t val)
{
struct tps65090 *tps = dev_get_drvdata(dev);
return regmap_write(tps->rmap, reg, val);
}
EXPORT_SYMBOL_GPL(tps65090_write);
int tps65090_read(struct device *dev, int reg, uint8_t *val)
{
struct tps65090 *tps = dev_get_drvdata(dev);
unsigned int temp_val;
int ret;
ret = regmap_read(tps->rmap, reg, &temp_val);
if (!ret)
*val = temp_val;
return ret;
}
EXPORT_SYMBOL_GPL(tps65090_read);
int tps65090_set_bits(struct device *dev, int reg, uint8_t bit_num)
{
struct tps65090 *tps = dev_get_drvdata(dev);
return regmap_update_bits(tps->rmap, reg, BIT(bit_num), ~0u);
}
EXPORT_SYMBOL_GPL(tps65090_set_bits);
int tps65090_clr_bits(struct device *dev, int reg, uint8_t bit_num)
{
struct tps65090 *tps = dev_get_drvdata(dev);
return regmap_update_bits(tps->rmap, reg, BIT(bit_num), 0u);
}
EXPORT_SYMBOL_GPL(tps65090_clr_bits);
static void tps65090_irq_lock(struct irq_data *data)
{
struct tps65090 *tps65090 = irq_data_get_irq_chip_data(data);
mutex_lock(&tps65090->irq_lock);
}
static void tps65090_irq_mask(struct irq_data *irq_data)
{
struct tps65090 *tps65090 = irq_data_get_irq_chip_data(irq_data);
unsigned int __irq = irq_data->hwirq;
const struct tps65090_irq_data *data = &tps65090_irqs[__irq];
tps65090_set_bits(tps65090->dev, (TPS65090_INT_MSK + data->mask_reg),
data->mask_pos);
}
static void tps65090_irq_unmask(struct irq_data *irq_data)
{
struct tps65090 *tps65090 = irq_data_get_irq_chip_data(irq_data);
unsigned int __irq = irq_data->irq - tps65090->irq_base;
const struct tps65090_irq_data *data = &tps65090_irqs[__irq];
tps65090_clr_bits(tps65090->dev, (TPS65090_INT_MSK + data->mask_reg),
data->mask_pos);
}
static void tps65090_irq_sync_unlock(struct irq_data *data)
{
struct tps65090 *tps65090 = irq_data_get_irq_chip_data(data);
mutex_unlock(&tps65090->irq_lock);
}
static irqreturn_t tps65090_irq(int irq, void *data)
{
struct tps65090 *tps65090 = data;
int ret = 0;
u8 status, mask;
unsigned long int acks = 0;
int i;
for (i = 0; i < NUM_INT_REG; i++) {
ret = tps65090_read(tps65090->dev, TPS65090_INT_MSK + i, &mask);
if (ret < 0) {
dev_err(tps65090->dev,
"failed to read mask reg [addr:%d]\n",
TPS65090_INT_MSK + i);
return IRQ_NONE;
}
ret = tps65090_read(tps65090->dev, TPS65090_INT_STS + i,
&status);
if (ret < 0) {
dev_err(tps65090->dev,
"failed to read status reg [addr:%d]\n",
TPS65090_INT_STS + i);
return IRQ_NONE;
}
if (status) {
/* Ack only those interrupts which are not masked */
status &= (~mask);
ret = tps65090_write(tps65090->dev,
TPS65090_INT_STS + i, status);
if (ret < 0) {
dev_err(tps65090->dev,
"failed to write interrupt status\n");
return IRQ_NONE;
}
acks |= (status << (i * 8));
}
}
for_each_set_bit(i, &acks, ARRAY_SIZE(tps65090_irqs))
handle_nested_irq(tps65090->irq_base + i);
return acks ? IRQ_HANDLED : IRQ_NONE;
}
static int __devinit tps65090_irq_init(struct tps65090 *tps65090, int irq,
int irq_base)
{
int i, ret;
if (!irq_base) {
dev_err(tps65090->dev, "IRQ base not set\n");
return -EINVAL;
}
mutex_init(&tps65090->irq_lock);
for (i = 0; i < NUM_INT_REG; i++)
tps65090_write(tps65090->dev, TPS65090_INT_MSK + i, 0xFF);
for (i = 0; i < NUM_INT_REG; i++)
tps65090_write(tps65090->dev, TPS65090_INT_STS + i, 0xff);
tps65090->irq_base = irq_base;
tps65090->irq_chip.name = "tps65090";
tps65090->irq_chip.irq_mask = tps65090_irq_mask;
tps65090->irq_chip.irq_unmask = tps65090_irq_unmask;
tps65090->irq_chip.irq_bus_lock = tps65090_irq_lock;
tps65090->irq_chip.irq_bus_sync_unlock = tps65090_irq_sync_unlock;
for (i = 0; i < ARRAY_SIZE(tps65090_irqs); i++) {
int __irq = i + tps65090->irq_base;
irq_set_chip_data(__irq, tps65090);
irq_set_chip_and_handler(__irq, &tps65090->irq_chip,
handle_simple_irq);
irq_set_nested_thread(__irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(__irq, IRQF_VALID);
#endif
}
ret = request_threaded_irq(irq, NULL, tps65090_irq, IRQF_ONESHOT,
"tps65090", tps65090);
if (!ret) {
device_init_wakeup(tps65090->dev, 1);
enable_irq_wake(irq);
}
return ret;
}
static bool is_volatile_reg(struct device *dev, unsigned int reg)
{
if ((reg == TPS65090_INT_STS) || (reg == TPS65090_INT_STS))
return true;
else
return false;
}
static const struct regmap_config tps65090_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = TOTAL_NUM_REG,
.num_reg_defaults_raw = TOTAL_NUM_REG,
.cache_type = REGCACHE_RBTREE,
.volatile_reg = is_volatile_reg,
};
static int __devinit tps65090_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tps65090_platform_data *pdata = client->dev.platform_data;
struct tps65090 *tps65090;
int ret;
if (!pdata) {
dev_err(&client->dev, "tps65090 requires platform data\n");
return -EINVAL;
}
tps65090 = devm_kzalloc(&client->dev, sizeof(struct tps65090),
GFP_KERNEL);
if (tps65090 == NULL)
return -ENOMEM;
tps65090->client = client;
tps65090->dev = &client->dev;
i2c_set_clientdata(client, tps65090);
mutex_init(&tps65090->lock);
if (client->irq) {
ret = tps65090_irq_init(tps65090, client->irq, pdata->irq_base);
if (ret) {
dev_err(&client->dev, "IRQ init failed with err: %d\n",
ret);
goto err_exit;
}
}
tps65090->rmap = regmap_init_i2c(tps65090->client,
&tps65090_regmap_config);
if (IS_ERR(tps65090->rmap)) {
dev_err(&client->dev, "regmap_init failed with err: %ld\n",
PTR_ERR(tps65090->rmap));
goto err_irq_exit;
};
ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
ARRAY_SIZE(tps65090s), NULL, 0);
if (ret) {
dev_err(&client->dev, "add mfd devices failed with err: %d\n",
ret);
goto err_regmap_exit;
}
return 0;
err_regmap_exit:
regmap_exit(tps65090->rmap);
err_irq_exit:
if (client->irq)
free_irq(client->irq, tps65090);
err_exit:
return ret;
}
static int __devexit tps65090_i2c_remove(struct i2c_client *client)
{
struct tps65090 *tps65090 = i2c_get_clientdata(client);
mfd_remove_devices(tps65090->dev);
regmap_exit(tps65090->rmap);
if (client->irq)
free_irq(client->irq, tps65090);
return 0;
}
#ifdef CONFIG_PM
static int tps65090_i2c_suspend(struct i2c_client *client, pm_message_t state)
{
if (client->irq)
disable_irq(client->irq);
return 0;
}
static int tps65090_i2c_resume(struct i2c_client *client)
{
if (client->irq)
enable_irq(client->irq);
return 0;
}
#endif
static const struct i2c_device_id tps65090_id_table[] = {
{ "tps65090", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, tps65090_id_table);
static struct i2c_driver tps65090_driver = {
.driver = {
.name = "tps65090",
.owner = THIS_MODULE,
},
.probe = tps65090_i2c_probe,
.remove = __devexit_p(tps65090_i2c_remove),
#ifdef CONFIG_PM
.suspend = tps65090_i2c_suspend,
.resume = tps65090_i2c_resume,
#endif
.id_table = tps65090_id_table,
};
static int __init tps65090_init(void)
{
return i2c_add_driver(&tps65090_driver);
}
subsys_initcall(tps65090_init);
static void __exit tps65090_exit(void)
{
i2c_del_driver(&tps65090_driver);
}
module_exit(tps65090_exit);
MODULE_DESCRIPTION("TPS65090 core driver");
MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@nvidia.com>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
alteredlikeness/android_kernel_msm | drivers/mfd/tps65090.c | 4746 | 9506 | /*
* Core driver for TI TPS65090 PMIC family
*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps65090.h>
#include <linux/regmap.h>
#include <linux/err.h>
#define NUM_INT_REG 2
#define TOTAL_NUM_REG 0x18
/* interrupt status registers */
#define TPS65090_INT_STS 0x0
#define TPS65090_INT_STS2 0x1
/* interrupt mask registers */
#define TPS65090_INT_MSK 0x2
#define TPS65090_INT_MSK2 0x3
struct tps65090_irq_data {
u8 mask_reg;
u8 mask_pos;
};
#define TPS65090_IRQ(_reg, _mask_pos) \
{ \
.mask_reg = (_reg), \
.mask_pos = (_mask_pos), \
}
static const struct tps65090_irq_data tps65090_irqs[] = {
[0] = TPS65090_IRQ(0, 0),
[1] = TPS65090_IRQ(0, 1),
[2] = TPS65090_IRQ(0, 2),
[3] = TPS65090_IRQ(0, 3),
[4] = TPS65090_IRQ(0, 4),
[5] = TPS65090_IRQ(0, 5),
[6] = TPS65090_IRQ(0, 6),
[7] = TPS65090_IRQ(0, 7),
[8] = TPS65090_IRQ(1, 0),
[9] = TPS65090_IRQ(1, 1),
[10] = TPS65090_IRQ(1, 2),
[11] = TPS65090_IRQ(1, 3),
[12] = TPS65090_IRQ(1, 4),
[13] = TPS65090_IRQ(1, 5),
[14] = TPS65090_IRQ(1, 6),
[15] = TPS65090_IRQ(1, 7),
};
static struct mfd_cell tps65090s[] = {
{
.name = "tps65910-pmic",
},
{
.name = "tps65910-regulator",
},
};
struct tps65090 {
struct mutex lock;
struct device *dev;
struct i2c_client *client;
struct regmap *rmap;
struct irq_chip irq_chip;
struct mutex irq_lock;
int irq_base;
unsigned int id;
};
int tps65090_write(struct device *dev, int reg, uint8_t val)
{
struct tps65090 *tps = dev_get_drvdata(dev);
return regmap_write(tps->rmap, reg, val);
}
EXPORT_SYMBOL_GPL(tps65090_write);
int tps65090_read(struct device *dev, int reg, uint8_t *val)
{
struct tps65090 *tps = dev_get_drvdata(dev);
unsigned int temp_val;
int ret;
ret = regmap_read(tps->rmap, reg, &temp_val);
if (!ret)
*val = temp_val;
return ret;
}
EXPORT_SYMBOL_GPL(tps65090_read);
int tps65090_set_bits(struct device *dev, int reg, uint8_t bit_num)
{
struct tps65090 *tps = dev_get_drvdata(dev);
return regmap_update_bits(tps->rmap, reg, BIT(bit_num), ~0u);
}
EXPORT_SYMBOL_GPL(tps65090_set_bits);
int tps65090_clr_bits(struct device *dev, int reg, uint8_t bit_num)
{
struct tps65090 *tps = dev_get_drvdata(dev);
return regmap_update_bits(tps->rmap, reg, BIT(bit_num), 0u);
}
EXPORT_SYMBOL_GPL(tps65090_clr_bits);
static void tps65090_irq_lock(struct irq_data *data)
{
struct tps65090 *tps65090 = irq_data_get_irq_chip_data(data);
mutex_lock(&tps65090->irq_lock);
}
static void tps65090_irq_mask(struct irq_data *irq_data)
{
struct tps65090 *tps65090 = irq_data_get_irq_chip_data(irq_data);
unsigned int __irq = irq_data->hwirq;
const struct tps65090_irq_data *data = &tps65090_irqs[__irq];
tps65090_set_bits(tps65090->dev, (TPS65090_INT_MSK + data->mask_reg),
data->mask_pos);
}
static void tps65090_irq_unmask(struct irq_data *irq_data)
{
struct tps65090 *tps65090 = irq_data_get_irq_chip_data(irq_data);
unsigned int __irq = irq_data->irq - tps65090->irq_base;
const struct tps65090_irq_data *data = &tps65090_irqs[__irq];
tps65090_clr_bits(tps65090->dev, (TPS65090_INT_MSK + data->mask_reg),
data->mask_pos);
}
static void tps65090_irq_sync_unlock(struct irq_data *data)
{
struct tps65090 *tps65090 = irq_data_get_irq_chip_data(data);
mutex_unlock(&tps65090->irq_lock);
}
static irqreturn_t tps65090_irq(int irq, void *data)
{
struct tps65090 *tps65090 = data;
int ret = 0;
u8 status, mask;
unsigned long int acks = 0;
int i;
for (i = 0; i < NUM_INT_REG; i++) {
ret = tps65090_read(tps65090->dev, TPS65090_INT_MSK + i, &mask);
if (ret < 0) {
dev_err(tps65090->dev,
"failed to read mask reg [addr:%d]\n",
TPS65090_INT_MSK + i);
return IRQ_NONE;
}
ret = tps65090_read(tps65090->dev, TPS65090_INT_STS + i,
&status);
if (ret < 0) {
dev_err(tps65090->dev,
"failed to read status reg [addr:%d]\n",
TPS65090_INT_STS + i);
return IRQ_NONE;
}
if (status) {
/* Ack only those interrupts which are not masked */
status &= (~mask);
ret = tps65090_write(tps65090->dev,
TPS65090_INT_STS + i, status);
if (ret < 0) {
dev_err(tps65090->dev,
"failed to write interrupt status\n");
return IRQ_NONE;
}
acks |= (status << (i * 8));
}
}
for_each_set_bit(i, &acks, ARRAY_SIZE(tps65090_irqs))
handle_nested_irq(tps65090->irq_base + i);
return acks ? IRQ_HANDLED : IRQ_NONE;
}
static int __devinit tps65090_irq_init(struct tps65090 *tps65090, int irq,
int irq_base)
{
int i, ret;
if (!irq_base) {
dev_err(tps65090->dev, "IRQ base not set\n");
return -EINVAL;
}
mutex_init(&tps65090->irq_lock);
for (i = 0; i < NUM_INT_REG; i++)
tps65090_write(tps65090->dev, TPS65090_INT_MSK + i, 0xFF);
for (i = 0; i < NUM_INT_REG; i++)
tps65090_write(tps65090->dev, TPS65090_INT_STS + i, 0xff);
tps65090->irq_base = irq_base;
tps65090->irq_chip.name = "tps65090";
tps65090->irq_chip.irq_mask = tps65090_irq_mask;
tps65090->irq_chip.irq_unmask = tps65090_irq_unmask;
tps65090->irq_chip.irq_bus_lock = tps65090_irq_lock;
tps65090->irq_chip.irq_bus_sync_unlock = tps65090_irq_sync_unlock;
for (i = 0; i < ARRAY_SIZE(tps65090_irqs); i++) {
int __irq = i + tps65090->irq_base;
irq_set_chip_data(__irq, tps65090);
irq_set_chip_and_handler(__irq, &tps65090->irq_chip,
handle_simple_irq);
irq_set_nested_thread(__irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(__irq, IRQF_VALID);
#endif
}
ret = request_threaded_irq(irq, NULL, tps65090_irq, IRQF_ONESHOT,
"tps65090", tps65090);
if (!ret) {
device_init_wakeup(tps65090->dev, 1);
enable_irq_wake(irq);
}
return ret;
}
static bool is_volatile_reg(struct device *dev, unsigned int reg)
{
if ((reg == TPS65090_INT_STS) || (reg == TPS65090_INT_STS))
return true;
else
return false;
}
static const struct regmap_config tps65090_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = TOTAL_NUM_REG,
.num_reg_defaults_raw = TOTAL_NUM_REG,
.cache_type = REGCACHE_RBTREE,
.volatile_reg = is_volatile_reg,
};
static int __devinit tps65090_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tps65090_platform_data *pdata = client->dev.platform_data;
struct tps65090 *tps65090;
int ret;
if (!pdata) {
dev_err(&client->dev, "tps65090 requires platform data\n");
return -EINVAL;
}
tps65090 = devm_kzalloc(&client->dev, sizeof(struct tps65090),
GFP_KERNEL);
if (tps65090 == NULL)
return -ENOMEM;
tps65090->client = client;
tps65090->dev = &client->dev;
i2c_set_clientdata(client, tps65090);
mutex_init(&tps65090->lock);
if (client->irq) {
ret = tps65090_irq_init(tps65090, client->irq, pdata->irq_base);
if (ret) {
dev_err(&client->dev, "IRQ init failed with err: %d\n",
ret);
goto err_exit;
}
}
tps65090->rmap = regmap_init_i2c(tps65090->client,
&tps65090_regmap_config);
if (IS_ERR(tps65090->rmap)) {
dev_err(&client->dev, "regmap_init failed with err: %ld\n",
PTR_ERR(tps65090->rmap));
goto err_irq_exit;
};
ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
ARRAY_SIZE(tps65090s), NULL, 0);
if (ret) {
dev_err(&client->dev, "add mfd devices failed with err: %d\n",
ret);
goto err_regmap_exit;
}
return 0;
err_regmap_exit:
regmap_exit(tps65090->rmap);
err_irq_exit:
if (client->irq)
free_irq(client->irq, tps65090);
err_exit:
return ret;
}
static int __devexit tps65090_i2c_remove(struct i2c_client *client)
{
struct tps65090 *tps65090 = i2c_get_clientdata(client);
mfd_remove_devices(tps65090->dev);
regmap_exit(tps65090->rmap);
if (client->irq)
free_irq(client->irq, tps65090);
return 0;
}
#ifdef CONFIG_PM
static int tps65090_i2c_suspend(struct i2c_client *client, pm_message_t state)
{
if (client->irq)
disable_irq(client->irq);
return 0;
}
static int tps65090_i2c_resume(struct i2c_client *client)
{
if (client->irq)
enable_irq(client->irq);
return 0;
}
#endif
static const struct i2c_device_id tps65090_id_table[] = {
{ "tps65090", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, tps65090_id_table);
static struct i2c_driver tps65090_driver = {
.driver = {
.name = "tps65090",
.owner = THIS_MODULE,
},
.probe = tps65090_i2c_probe,
.remove = __devexit_p(tps65090_i2c_remove),
#ifdef CONFIG_PM
.suspend = tps65090_i2c_suspend,
.resume = tps65090_i2c_resume,
#endif
.id_table = tps65090_id_table,
};
static int __init tps65090_init(void)
{
return i2c_add_driver(&tps65090_driver);
}
subsys_initcall(tps65090_init);
static void __exit tps65090_exit(void)
{
i2c_del_driver(&tps65090_driver);
}
module_exit(tps65090_exit);
MODULE_DESCRIPTION("TPS65090 core driver");
MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@nvidia.com>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
Fechinator/FechdaKernel_V500 | arch/sh/kernel/cpu/init.c | 6538 | 8619 | /*
* arch/sh/kernel/cpu/init.c
*
* CPU init code
*
* Copyright (C) 2002 - 2009 Paul Mundt
* Copyright (C) 2003 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/log2.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>
#include <asm/elf.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/sh_bios.h>
#include <asm/setup.h>
#ifdef CONFIG_SH_FPU
#define cpu_has_fpu 1
#else
#define cpu_has_fpu 0
#endif
#ifdef CONFIG_SH_DSP
#define cpu_has_dsp 1
#else
#define cpu_has_dsp 0
#endif
/*
* Generic wrapper for command line arguments to disable on-chip
* peripherals (nofpu, nodsp, and so forth).
*/
#define onchip_setup(x) \
static int x##_disabled __cpuinitdata = !cpu_has_##x; \
\
static int __cpuinit x##_setup(char *opts) \
{ \
x##_disabled = 1; \
return 1; \
} \
__setup("no" __stringify(x), x##_setup);
onchip_setup(fpu);
onchip_setup(dsp);
#ifdef CONFIG_SPECULATIVE_EXECUTION
#define CPUOPM 0xff2f0000
#define CPUOPM_RABD (1 << 5)
static void __cpuinit speculative_execution_init(void)
{
/* Clear RABD */
__raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
/* Flush the update */
(void)__raw_readl(CPUOPM);
ctrl_barrier();
}
#else
#define speculative_execution_init() do { } while (0)
#endif
#ifdef CONFIG_CPU_SH4A
#define EXPMASK 0xff2f0004
#define EXPMASK_RTEDS (1 << 0)
#define EXPMASK_BRDSSLP (1 << 1)
#define EXPMASK_MMCAW (1 << 4)
static void __cpuinit expmask_init(void)
{
unsigned long expmask = __raw_readl(EXPMASK);
/*
* Future proofing.
*
* Disable support for slottable sleep instruction, non-nop
* instructions in the rte delay slot, and associative writes to
* the memory-mapped cache array.
*/
expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW);
__raw_writel(expmask, EXPMASK);
ctrl_barrier();
}
#else
#define expmask_init() do { } while (0)
#endif
/* 2nd-level cache init */
void __attribute__ ((weak)) l2_cache_init(void)
{
}
/*
* Generic first-level cache init
*/
#ifdef CONFIG_SUPERH32
static void cache_init(void)
{
unsigned long ccr, flags;
jump_to_uncached();
ccr = __raw_readl(CCR);
/*
* At this point we don't know whether the cache is enabled or not - a
* bootloader may have enabled it. There are at least 2 things that
* could be dirty in the cache at this point:
* 1. kernel command line set up by boot loader
* 2. spilled registers from the prolog of this function
* => before re-initialising the cache, we must do a purge of the whole
* cache out to memory for safety. As long as nothing is spilled
* during the loop to lines that have already been done, this is safe.
* - RPC
*/
if (ccr & CCR_CACHE_ENABLE) {
unsigned long ways, waysize, addrstart;
waysize = current_cpu_data.dcache.sets;
#ifdef CCR_CACHE_ORA
/*
* If the OC is already in RAM mode, we only have
* half of the entries to flush..
*/
if (ccr & CCR_CACHE_ORA)
waysize >>= 1;
#endif
waysize <<= current_cpu_data.dcache.entry_shift;
#ifdef CCR_CACHE_EMODE
/* If EMODE is not set, we only have 1 way to flush. */
if (!(ccr & CCR_CACHE_EMODE))
ways = 1;
else
#endif
ways = current_cpu_data.dcache.ways;
addrstart = CACHE_OC_ADDRESS_ARRAY;
do {
unsigned long addr;
for (addr = addrstart;
addr < addrstart + waysize;
addr += current_cpu_data.dcache.linesz)
__raw_writel(0, addr);
addrstart += current_cpu_data.dcache.way_incr;
} while (--ways);
}
/*
* Default CCR values .. enable the caches
* and invalidate them immediately..
*/
flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE;
#ifdef CCR_CACHE_EMODE
/* Force EMODE if possible */
if (current_cpu_data.dcache.ways > 1)
flags |= CCR_CACHE_EMODE;
else
flags &= ~CCR_CACHE_EMODE;
#endif
#if defined(CONFIG_CACHE_WRITETHROUGH)
/* Write-through */
flags |= CCR_CACHE_WT;
#elif defined(CONFIG_CACHE_WRITEBACK)
/* Write-back */
flags |= CCR_CACHE_CB;
#else
/* Off */
flags &= ~CCR_CACHE_ENABLE;
#endif
l2_cache_init();
__raw_writel(flags, CCR);
back_to_cached();
}
#else
#define cache_init() do { } while (0)
#endif
#define CSHAPE(totalsize, linesize, assoc) \
((totalsize & ~0xff) | (linesize << 4) | assoc)
#define CACHE_DESC_SHAPE(desc) \
CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)
static void detect_cache_shape(void)
{
l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);
if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
l1i_cache_shape = l1d_cache_shape;
else
l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);
if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
else
l2_cache_shape = -1; /* No S-cache */
}
static void __cpuinit fpu_init(void)
{
/* Disable the FPU */
if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
printk("FPU Disabled\n");
current_cpu_data.flags &= ~CPU_HAS_FPU;
}
disable_fpu();
clear_used_math();
}
#ifdef CONFIG_SH_DSP
static void __cpuinit release_dsp(void)
{
unsigned long sr;
/* Clear SR.DSP bit */
__asm__ __volatile__ (
"stc\tsr, %0\n\t"
"and\t%1, %0\n\t"
"ldc\t%0, sr\n\t"
: "=&r" (sr)
: "r" (~SR_DSP)
);
}
static void __cpuinit dsp_init(void)
{
unsigned long sr;
/*
* Set the SR.DSP bit, wait for one instruction, and then read
* back the SR value.
*/
__asm__ __volatile__ (
"stc\tsr, %0\n\t"
"or\t%1, %0\n\t"
"ldc\t%0, sr\n\t"
"nop\n\t"
"stc\tsr, %0\n\t"
: "=&r" (sr)
: "r" (SR_DSP)
);
/* If the DSP bit is still set, this CPU has a DSP */
if (sr & SR_DSP)
current_cpu_data.flags |= CPU_HAS_DSP;
/* Disable the DSP */
if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) {
printk("DSP Disabled\n");
current_cpu_data.flags &= ~CPU_HAS_DSP;
}
/* Now that we've determined the DSP status, clear the DSP bit. */
release_dsp();
}
#else
static inline void __cpuinit dsp_init(void) { }
#endif /* CONFIG_SH_DSP */
/**
* cpu_init
*
* This is our initial entry point for each CPU, and is invoked on the
* boot CPU prior to calling start_kernel(). For SMP, a combination of
* this and start_secondary() will bring up each processor to a ready
* state prior to hand forking the idle loop.
*
* We do all of the basic processor init here, including setting up
* the caches, FPU, DSP, etc. By the time start_kernel() is hit (and
* subsequently platform_setup()) things like determining the CPU
* subtype and initial configuration will all be done.
*
* Each processor family is still responsible for doing its own probing
* and cache configuration in cpu_probe().
*/
asmlinkage void __cpuinit cpu_init(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
/* First, probe the CPU */
cpu_probe();
if (current_cpu_data.type == CPU_SH_NONE)
panic("Unknown CPU");
/* First setup the rest of the I-cache info */
current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
current_cpu_data.icache.linesz;
current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
current_cpu_data.icache.linesz;
/* And the D-cache too */
current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
current_cpu_data.dcache.linesz;
current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
current_cpu_data.dcache.linesz;
/* Init the cache */
cache_init();
if (raw_smp_processor_id() == 0) {
shm_align_mask = max_t(unsigned long,
current_cpu_data.dcache.way_size - 1,
PAGE_SIZE - 1);
/* Boot CPU sets the cache shape */
detect_cache_shape();
}
fpu_init();
dsp_init();
/*
* Initialize the per-CPU ASID cache very early, since the
* TLB flushing routines depend on this being setup.
*/
current_cpu_data.asid_cache = NO_CONTEXT;
current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32;
speculative_execution_init();
expmask_init();
/* Do the rest of the boot processor setup */
if (raw_smp_processor_id() == 0) {
/* Save off the BIOS VBR, if there is one */
sh_bios_vbr_init();
/*
* Setup VBR for boot CPU. Secondary CPUs do this through
* start_secondary().
*/
per_cpu_trap_init();
/*
* Boot processor to setup the FP and extended state
* context info.
*/
init_thread_xstate();
}
}
| gpl-2.0 |
ztotherad/nd7 | drivers/scsi/stex.c | 8330 | 44285 | /*
* SuperTrak EX Series Storage Controller driver for Linux
*
* Copyright (C) 2005-2009 Promise Technology Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Written By:
* Ed Lin <promise_linux@promise.com>
*
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_eh.h>
#define DRV_NAME "stex"
#define ST_DRIVER_VERSION "4.6.0000.4"
#define ST_VER_MAJOR 4
#define ST_VER_MINOR 6
#define ST_OEM 0
#define ST_BUILD_VER 4
enum {
/* MU register offset */
IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */
IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */
OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */
OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */
IDBL = 0x20, /* MU_INBOUND_DOORBELL */
IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */
IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */
ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */
OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */
OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */
YIOA_STATUS = 0x00,
YH2I_INT = 0x20,
YINT_EN = 0x34,
YI2H_INT = 0x9c,
YI2H_INT_C = 0xa0,
YH2I_REQ = 0xc0,
YH2I_REQ_HI = 0xc4,
/* MU register value */
MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0),
MU_INBOUND_DOORBELL_REQHEADCHANGED = (1 << 1),
MU_INBOUND_DOORBELL_STATUSTAILCHANGED = (1 << 2),
MU_INBOUND_DOORBELL_HMUSTOPPED = (1 << 3),
MU_INBOUND_DOORBELL_RESET = (1 << 4),
MU_OUTBOUND_DOORBELL_HANDSHAKE = (1 << 0),
MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = (1 << 1),
MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = (1 << 2),
MU_OUTBOUND_DOORBELL_BUSCHANGE = (1 << 3),
MU_OUTBOUND_DOORBELL_HASEVENT = (1 << 4),
MU_OUTBOUND_DOORBELL_REQUEST_RESET = (1 << 27),
/* MU status code */
MU_STATE_STARTING = 1,
MU_STATE_STARTED = 2,
MU_STATE_RESETTING = 3,
MU_STATE_FAILED = 4,
MU_MAX_DELAY = 120,
MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000,
MU_HARD_RESET_WAIT = 30000,
HMU_PARTNER_TYPE = 2,
/* firmware returned values */
SRB_STATUS_SUCCESS = 0x01,
SRB_STATUS_ERROR = 0x04,
SRB_STATUS_BUSY = 0x05,
SRB_STATUS_INVALID_REQUEST = 0x06,
SRB_STATUS_SELECTION_TIMEOUT = 0x0A,
SRB_SEE_SENSE = 0x80,
/* task attribute */
TASK_ATTRIBUTE_SIMPLE = 0x0,
TASK_ATTRIBUTE_HEADOFQUEUE = 0x1,
TASK_ATTRIBUTE_ORDERED = 0x2,
TASK_ATTRIBUTE_ACA = 0x4,
SS_STS_NORMAL = 0x80000000,
SS_STS_DONE = 0x40000000,
SS_STS_HANDSHAKE = 0x20000000,
SS_HEAD_HANDSHAKE = 0x80,
SS_H2I_INT_RESET = 0x100,
SS_I2H_REQUEST_RESET = 0x2000,
SS_MU_OPERATIONAL = 0x80000000,
STEX_CDB_LENGTH = 16,
STATUS_VAR_LEN = 128,
/* sg flags */
SG_CF_EOT = 0x80, /* end of table */
SG_CF_64B = 0x40, /* 64 bit item */
SG_CF_HOST = 0x20, /* sg in host memory */
MSG_DATA_DIR_ND = 0,
MSG_DATA_DIR_IN = 1,
MSG_DATA_DIR_OUT = 2,
st_shasta = 0,
st_vsc = 1,
st_yosemite = 2,
st_seq = 3,
st_yel = 4,
PASSTHRU_REQ_TYPE = 0x00000001,
PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
ST_INTERNAL_TIMEOUT = 180,
ST_TO_CMD = 0,
ST_FROM_CMD = 1,
/* vendor specific commands of Promise */
MGT_CMD = 0xd8,
SINBAND_MGT_CMD = 0xd9,
ARRAY_CMD = 0xe0,
CONTROLLER_CMD = 0xe1,
DEBUGGING_CMD = 0xe2,
PASSTHRU_CMD = 0xe3,
PASSTHRU_GET_ADAPTER = 0x05,
PASSTHRU_GET_DRVVER = 0x10,
CTLR_CONFIG_CMD = 0x03,
CTLR_SHUTDOWN = 0x0d,
CTLR_POWER_STATE_CHANGE = 0x0e,
CTLR_POWER_SAVING = 0x01,
PASSTHRU_SIGNATURE = 0x4e415041,
MGT_CMD_SIGNATURE = 0xba,
INQUIRY_EVPD = 0x01,
ST_ADDITIONAL_MEM = 0x200000,
ST_ADDITIONAL_MEM_MIN = 0x80000,
};
struct st_sgitem {
u8 ctrl; /* SG_CF_xxx */
u8 reserved[3];
__le32 count;
__le64 addr;
};
struct st_ss_sgitem {
__le32 addr;
__le32 addr_hi;
__le32 count;
};
struct st_sgtable {
__le16 sg_count;
__le16 max_sg_count;
__le32 sz_in_byte;
};
struct st_msg_header {
__le64 handle;
u8 flag;
u8 channel;
__le16 timeout;
u32 reserved;
};
struct handshake_frame {
__le64 rb_phy; /* request payload queue physical address */
__le16 req_sz; /* size of each request payload */
__le16 req_cnt; /* count of reqs the buffer can hold */
__le16 status_sz; /* size of each status payload */
__le16 status_cnt; /* count of status the buffer can hold */
__le64 hosttime; /* seconds from Jan 1, 1970 (GMT) */
u8 partner_type; /* who sends this frame */
u8 reserved0[7];
__le32 partner_ver_major;
__le32 partner_ver_minor;
__le32 partner_ver_oem;
__le32 partner_ver_build;
__le32 extra_offset; /* NEW */
__le32 extra_size; /* NEW */
__le32 scratch_size;
u32 reserved1;
};
struct req_msg {
__le16 tag;
u8 lun;
u8 target;
u8 task_attr;
u8 task_manage;
u8 data_dir;
u8 payload_sz; /* payload size in 4-byte, not used */
u8 cdb[STEX_CDB_LENGTH];
u32 variable[0];
};
struct status_msg {
__le16 tag;
u8 lun;
u8 target;
u8 srb_status;
u8 scsi_status;
u8 reserved;
u8 payload_sz; /* payload size in 4-byte */
u8 variable[STATUS_VAR_LEN];
};
struct ver_info {
u32 major;
u32 minor;
u32 oem;
u32 build;
u32 reserved[2];
};
struct st_frame {
u32 base[6];
u32 rom_addr;
struct ver_info drv_ver;
struct ver_info bios_ver;
u32 bus;
u32 slot;
u32 irq_level;
u32 irq_vec;
u32 id;
u32 subid;
u32 dimm_size;
u8 dimm_type;
u8 reserved[3];
u32 channel;
u32 reserved1;
};
struct st_drvver {
u32 major;
u32 minor;
u32 oem;
u32 build;
u32 signature[2];
u8 console_id;
u8 host_no;
u8 reserved0[2];
u32 reserved[3];
};
struct st_ccb {
struct req_msg *req;
struct scsi_cmnd *cmd;
void *sense_buffer;
unsigned int sense_bufflen;
int sg_count;
u32 req_type;
u8 srb_status;
u8 scsi_status;
u8 reserved[2];
};
struct st_hba {
void __iomem *mmio_base; /* iomapped PCI memory space */
void *dma_mem;
dma_addr_t dma_handle;
size_t dma_size;
struct Scsi_Host *host;
struct pci_dev *pdev;
struct req_msg * (*alloc_rq) (struct st_hba *);
int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
void (*send) (struct st_hba *, struct req_msg *, u16);
u32 req_head;
u32 req_tail;
u32 status_head;
u32 status_tail;
struct status_msg *status_buffer;
void *copy_buffer; /* temp buffer for driver-handled commands */
struct st_ccb *ccb;
struct st_ccb *wait_ccb;
__le32 *scratch;
char work_q_name[20];
struct workqueue_struct *work_q;
struct work_struct reset_work;
wait_queue_head_t reset_waitq;
unsigned int mu_status;
unsigned int cardtype;
int msi_enabled;
int out_req_cnt;
u32 extra_offset;
u16 rq_count;
u16 rq_size;
u16 sts_count;
};
struct st_card_info {
struct req_msg * (*alloc_rq) (struct st_hba *);
int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
void (*send) (struct st_hba *, struct req_msg *, u16);
unsigned int max_id;
unsigned int max_lun;
unsigned int max_channel;
u16 rq_count;
u16 rq_size;
u16 sts_count;
};
static int msi;
module_param(msi, int, 0);
MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)");
static const char console_inq_page[] =
{
0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */
0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */
0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */
0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */
0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */
0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */
0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
};
MODULE_AUTHOR("Ed Lin");
MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
MODULE_LICENSE("GPL");
MODULE_VERSION(ST_DRIVER_VERSION);
static void stex_gettime(__le64 *time)
{
struct timeval tv;
do_gettimeofday(&tv);
*time = cpu_to_le64(tv.tv_sec);
}
static struct status_msg *stex_get_status(struct st_hba *hba)
{
struct status_msg *status = hba->status_buffer + hba->status_tail;
++hba->status_tail;
hba->status_tail %= hba->sts_count+1;
return status;
}
static void stex_invalid_field(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
/* "Invalid field in cdb" */
scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
0x0);
done(cmd);
}
static struct req_msg *stex_alloc_req(struct st_hba *hba)
{
struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size;
++hba->req_head;
hba->req_head %= hba->rq_count+1;
return req;
}
static struct req_msg *stex_ss_alloc_req(struct st_hba *hba)
{
return (struct req_msg *)(hba->dma_mem +
hba->req_head * hba->rq_size + sizeof(struct st_msg_header));
}
static int stex_map_sg(struct st_hba *hba,
struct req_msg *req, struct st_ccb *ccb)
{
struct scsi_cmnd *cmd;
struct scatterlist *sg;
struct st_sgtable *dst;
struct st_sgitem *table;
int i, nseg;
cmd = ccb->cmd;
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
if (nseg) {
dst = (struct st_sgtable *)req->variable;
ccb->sg_count = nseg;
dst->sg_count = cpu_to_le16((u16)nseg);
dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
table = (struct st_sgitem *)(dst + 1);
scsi_for_each_sg(cmd, sg, nseg, i) {
table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
table[i].addr = cpu_to_le64(sg_dma_address(sg));
table[i].ctrl = SG_CF_64B | SG_CF_HOST;
}
table[--i].ctrl |= SG_CF_EOT;
}
return nseg;
}
static int stex_ss_map_sg(struct st_hba *hba,
struct req_msg *req, struct st_ccb *ccb)
{
struct scsi_cmnd *cmd;
struct scatterlist *sg;
struct st_sgtable *dst;
struct st_ss_sgitem *table;
int i, nseg;
cmd = ccb->cmd;
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
if (nseg) {
dst = (struct st_sgtable *)req->variable;
ccb->sg_count = nseg;
dst->sg_count = cpu_to_le16((u16)nseg);
dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
table = (struct st_ss_sgitem *)(dst + 1);
scsi_for_each_sg(cmd, sg, nseg, i) {
table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
table[i].addr =
cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
table[i].addr_hi =
cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
}
}
return nseg;
}
static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
{
struct st_frame *p;
size_t count = sizeof(struct st_frame);
p = hba->copy_buffer;
scsi_sg_copy_to_buffer(ccb->cmd, p, count);
memset(p->base, 0, sizeof(u32)*6);
*(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
p->rom_addr = 0;
p->drv_ver.major = ST_VER_MAJOR;
p->drv_ver.minor = ST_VER_MINOR;
p->drv_ver.oem = ST_OEM;
p->drv_ver.build = ST_BUILD_VER;
p->bus = hba->pdev->bus->number;
p->slot = hba->pdev->devfn;
p->irq_level = 0;
p->irq_vec = hba->pdev->irq;
p->id = hba->pdev->vendor << 16 | hba->pdev->device;
p->subid =
hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
scsi_sg_copy_from_buffer(ccb->cmd, p, count);
}
static void
stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
{
req->tag = cpu_to_le16(tag);
hba->ccb[tag].req = req;
hba->out_req_cnt++;
writel(hba->req_head, hba->mmio_base + IMR0);
writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
readl(hba->mmio_base + IDBL); /* flush */
}
static void
stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
{
struct scsi_cmnd *cmd;
struct st_msg_header *msg_h;
dma_addr_t addr;
req->tag = cpu_to_le16(tag);
hba->ccb[tag].req = req;
hba->out_req_cnt++;
cmd = hba->ccb[tag].cmd;
msg_h = (struct st_msg_header *)req - 1;
if (likely(cmd)) {
msg_h->channel = (u8)cmd->device->channel;
msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ);
}
addr = hba->dma_handle + hba->req_head * hba->rq_size;
addr += (hba->ccb[tag].sg_count+4)/11;
msg_h->handle = cpu_to_le64(addr);
++hba->req_head;
hba->req_head %= hba->rq_count+1;
writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
writel(addr, hba->mmio_base + YH2I_REQ);
readl(hba->mmio_base + YH2I_REQ); /* flush */
}
static int
stex_slave_alloc(struct scsi_device *sdev)
{
/* Cheat: usually extracted from Inquiry data */
sdev->tagged_supported = 1;
scsi_activate_tcq(sdev, sdev->host->can_queue);
return 0;
}
static int
stex_slave_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
sdev->tagged_supported = 1;
return 0;
}
static void
stex_slave_destroy(struct scsi_device *sdev)
{
scsi_deactivate_tcq(sdev, 1);
}
static int
stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct st_hba *hba;
struct Scsi_Host *host;
unsigned int id, lun;
struct req_msg *req;
u16 tag;
host = cmd->device->host;
id = cmd->device->id;
lun = cmd->device->lun;
hba = (struct st_hba *) &host->hostdata[0];
if (unlikely(hba->mu_status == MU_STATE_RESETTING))
return SCSI_MLQUEUE_HOST_BUSY;
switch (cmd->cmnd[0]) {
case MODE_SENSE_10:
{
static char ms10_caching_page[12] =
{ 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
unsigned char page;
page = cmd->cmnd[2] & 0x3f;
if (page == 0x8 || page == 0x3f) {
scsi_sg_copy_from_buffer(cmd, ms10_caching_page,
sizeof(ms10_caching_page));
cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
done(cmd);
} else
stex_invalid_field(cmd, done);
return 0;
}
case REPORT_LUNS:
/*
* The shasta firmware does not report actual luns in the
* target, so fail the command to force sequential lun scan.
* Also, the console device does not support this command.
*/
if (hba->cardtype == st_shasta || id == host->max_id - 1) {
stex_invalid_field(cmd, done);
return 0;
}
break;
case TEST_UNIT_READY:
if (id == host->max_id - 1) {
cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
done(cmd);
return 0;
}
break;
case INQUIRY:
if (lun >= host->max_lun) {
cmd->result = DID_NO_CONNECT << 16;
done(cmd);
return 0;
}
if (id != host->max_id - 1)
break;
if (!lun && !cmd->device->channel &&
(cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page,
sizeof(console_inq_page));
cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
done(cmd);
} else
stex_invalid_field(cmd, done);
return 0;
case PASSTHRU_CMD:
if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
struct st_drvver ver;
size_t cp_len = sizeof(ver);
ver.major = ST_VER_MAJOR;
ver.minor = ST_VER_MINOR;
ver.oem = ST_OEM;
ver.build = ST_BUILD_VER;
ver.signature[0] = PASSTHRU_SIGNATURE;
ver.console_id = host->max_id - 1;
ver.host_no = hba->host->host_no;
cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
cmd->result = sizeof(ver) == cp_len ?
DID_OK << 16 | COMMAND_COMPLETE << 8 :
DID_ERROR << 16 | COMMAND_COMPLETE << 8;
done(cmd);
return 0;
}
default:
break;
}
cmd->scsi_done = done;
tag = cmd->request->tag;
if (unlikely(tag >= host->can_queue))
return SCSI_MLQUEUE_HOST_BUSY;
req = hba->alloc_rq(hba);
req->lun = lun;
req->target = id;
/* cdb */
memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
if (cmd->sc_data_direction == DMA_FROM_DEVICE)
req->data_dir = MSG_DATA_DIR_IN;
else if (cmd->sc_data_direction == DMA_TO_DEVICE)
req->data_dir = MSG_DATA_DIR_OUT;
else
req->data_dir = MSG_DATA_DIR_ND;
hba->ccb[tag].cmd = cmd;
hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
hba->ccb[tag].sense_buffer = cmd->sense_buffer;
if (!hba->map_sg(hba, req, &hba->ccb[tag])) {
hba->ccb[tag].sg_count = 0;
memset(&req->variable[0], 0, 8);
}
hba->send(hba, req, tag);
return 0;
}
static DEF_SCSI_QCMD(stex_queuecommand)
static void stex_scsi_done(struct st_ccb *ccb)
{
struct scsi_cmnd *cmd = ccb->cmd;
int result;
if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) {
result = ccb->scsi_status;
switch (ccb->scsi_status) {
case SAM_STAT_GOOD:
result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
break;
case SAM_STAT_CHECK_CONDITION:
result |= DRIVER_SENSE << 24;
break;
case SAM_STAT_BUSY:
result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
break;
default:
result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
break;
}
}
else if (ccb->srb_status & SRB_SEE_SENSE)
result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
else switch (ccb->srb_status) {
case SRB_STATUS_SELECTION_TIMEOUT:
result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_BUSY:
result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_INVALID_REQUEST:
case SRB_STATUS_ERROR:
default:
result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
break;
}
cmd->result = result;
cmd->scsi_done(cmd);
}
static void stex_copy_data(struct st_ccb *ccb,
struct status_msg *resp, unsigned int variable)
{
if (resp->scsi_status != SAM_STAT_GOOD) {
if (ccb->sense_buffer != NULL)
memcpy(ccb->sense_buffer, resp->variable,
min(variable, ccb->sense_bufflen));
return;
}
if (ccb->cmd == NULL)
return;
scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, variable);
}
static void stex_check_cmd(struct st_hba *hba,
struct st_ccb *ccb, struct status_msg *resp)
{
if (ccb->cmd->cmnd[0] == MGT_CMD &&
resp->scsi_status != SAM_STAT_CHECK_CONDITION)
scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
le32_to_cpu(*(__le32 *)&resp->variable[0]));
}
static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
{
void __iomem *base = hba->mmio_base;
struct status_msg *resp;
struct st_ccb *ccb;
unsigned int size;
u16 tag;
if (unlikely(!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED)))
return;
/* status payloads */
hba->status_head = readl(base + OMR1);
if (unlikely(hba->status_head > hba->sts_count)) {
printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
pci_name(hba->pdev));
return;
}
/*
* it's not a valid status payload if:
* 1. there are no pending requests(e.g. during init stage)
* 2. there are some pending requests, but the controller is in
* reset status, and its type is not st_yosemite
* firmware of st_yosemite in reset status will return pending requests
* to driver, so we allow it to pass
*/
if (unlikely(hba->out_req_cnt <= 0 ||
(hba->mu_status == MU_STATE_RESETTING &&
hba->cardtype != st_yosemite))) {
hba->status_tail = hba->status_head;
goto update_status;
}
while (hba->status_tail != hba->status_head) {
resp = stex_get_status(hba);
tag = le16_to_cpu(resp->tag);
if (unlikely(tag >= hba->host->can_queue)) {
printk(KERN_WARNING DRV_NAME
"(%s): invalid tag\n", pci_name(hba->pdev));
continue;
}
hba->out_req_cnt--;
ccb = &hba->ccb[tag];
if (unlikely(hba->wait_ccb == ccb))
hba->wait_ccb = NULL;
if (unlikely(ccb->req == NULL)) {
printk(KERN_WARNING DRV_NAME
"(%s): lagging req\n", pci_name(hba->pdev));
continue;
}
size = resp->payload_sz * sizeof(u32); /* payload size */
if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
size > sizeof(*resp))) {
printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
pci_name(hba->pdev));
} else {
size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
if (size)
stex_copy_data(ccb, resp, size);
}
ccb->req = NULL;
ccb->srb_status = resp->srb_status;
ccb->scsi_status = resp->scsi_status;
if (likely(ccb->cmd != NULL)) {
if (hba->cardtype == st_yosemite)
stex_check_cmd(hba, ccb, resp);
if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
stex_controller_info(hba, ccb);
scsi_dma_unmap(ccb->cmd);
stex_scsi_done(ccb);
} else
ccb->req_type = 0;
}
update_status:
writel(hba->status_head, base + IMR1);
readl(base + IMR1); /* flush */
}
static irqreturn_t stex_intr(int irq, void *__hba)
{
struct st_hba *hba = __hba;
void __iomem *base = hba->mmio_base;
u32 data;
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
data = readl(base + ODBL);
if (data && data != 0xffffffff) {
/* clear the interrupt */
writel(data, base + ODBL);
readl(base + ODBL); /* flush */
stex_mu_intr(hba, data);
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET &&
hba->cardtype == st_shasta))
queue_work(hba->work_q, &hba->reset_work);
return IRQ_HANDLED;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
return IRQ_NONE;
}
static void stex_ss_mu_intr(struct st_hba *hba)
{
struct status_msg *resp;
struct st_ccb *ccb;
__le32 *scratch;
unsigned int size;
int count = 0;
u32 value;
u16 tag;
if (unlikely(hba->out_req_cnt <= 0 ||
hba->mu_status == MU_STATE_RESETTING))
return;
while (count < hba->sts_count) {
scratch = hba->scratch + hba->status_tail;
value = le32_to_cpu(*scratch);
if (unlikely(!(value & SS_STS_NORMAL)))
return;
resp = hba->status_buffer + hba->status_tail;
*scratch = 0;
++count;
++hba->status_tail;
hba->status_tail %= hba->sts_count+1;
tag = (u16)value;
if (unlikely(tag >= hba->host->can_queue)) {
printk(KERN_WARNING DRV_NAME
"(%s): invalid tag\n", pci_name(hba->pdev));
continue;
}
hba->out_req_cnt--;
ccb = &hba->ccb[tag];
if (unlikely(hba->wait_ccb == ccb))
hba->wait_ccb = NULL;
if (unlikely(ccb->req == NULL)) {
printk(KERN_WARNING DRV_NAME
"(%s): lagging req\n", pci_name(hba->pdev));
continue;
}
ccb->req = NULL;
if (likely(value & SS_STS_DONE)) { /* normal case */
ccb->srb_status = SRB_STATUS_SUCCESS;
ccb->scsi_status = SAM_STAT_GOOD;
} else {
ccb->srb_status = resp->srb_status;
ccb->scsi_status = resp->scsi_status;
size = resp->payload_sz * sizeof(u32);
if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
size > sizeof(*resp))) {
printk(KERN_WARNING DRV_NAME
"(%s): bad status size\n",
pci_name(hba->pdev));
} else {
size -= sizeof(*resp) - STATUS_VAR_LEN;
if (size)
stex_copy_data(ccb, resp, size);
}
if (likely(ccb->cmd != NULL))
stex_check_cmd(hba, ccb, resp);
}
if (likely(ccb->cmd != NULL)) {
scsi_dma_unmap(ccb->cmd);
stex_scsi_done(ccb);
} else
ccb->req_type = 0;
}
}
static irqreturn_t stex_ss_intr(int irq, void *__hba)
{
struct st_hba *hba = __hba;
void __iomem *base = hba->mmio_base;
u32 data;
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
data = readl(base + YI2H_INT);
if (data && data != 0xffffffff) {
/* clear the interrupt */
writel(data, base + YI2H_INT_C);
stex_ss_mu_intr(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (unlikely(data & SS_I2H_REQUEST_RESET))
queue_work(hba->work_q, &hba->reset_work);
return IRQ_HANDLED;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
return IRQ_NONE;
}
static int stex_common_handshake(struct st_hba *hba)
{
void __iomem *base = hba->mmio_base;
struct handshake_frame *h;
dma_addr_t status_phys;
u32 data;
unsigned long before;
if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
readl(base + IDBL);
before = jiffies;
while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
printk(KERN_ERR DRV_NAME
"(%s): no handshake signature\n",
pci_name(hba->pdev));
return -1;
}
rmb();
msleep(1);
}
}
udelay(10);
data = readl(base + OMR1);
if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
data &= 0x0000ffff;
if (hba->host->can_queue > data) {
hba->host->can_queue = data;
hba->host->cmd_per_lun = data;
}
}
h = (struct handshake_frame *)hba->status_buffer;
h->rb_phy = cpu_to_le64(hba->dma_handle);
h->req_sz = cpu_to_le16(hba->rq_size);
h->req_cnt = cpu_to_le16(hba->rq_count+1);
h->status_sz = cpu_to_le16(sizeof(struct status_msg));
h->status_cnt = cpu_to_le16(hba->sts_count+1);
stex_gettime(&h->hosttime);
h->partner_type = HMU_PARTNER_TYPE;
if (hba->extra_offset) {
h->extra_offset = cpu_to_le32(hba->extra_offset);
h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
} else
h->extra_offset = h->extra_size = 0;
status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size;
writel(status_phys, base + IMR0);
readl(base + IMR0);
writel((status_phys >> 16) >> 16, base + IMR1);
readl(base + IMR1);
writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
readl(base + OMR0);
writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
readl(base + IDBL); /* flush */
udelay(10);
before = jiffies;
while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
printk(KERN_ERR DRV_NAME
"(%s): no signature after handshake frame\n",
pci_name(hba->pdev));
return -1;
}
rmb();
msleep(1);
}
writel(0, base + IMR0);
readl(base + IMR0);
writel(0, base + OMR0);
readl(base + OMR0);
writel(0, base + IMR1);
readl(base + IMR1);
writel(0, base + OMR1);
readl(base + OMR1); /* flush */
return 0;
}
static int stex_ss_handshake(struct st_hba *hba)
{
void __iomem *base = hba->mmio_base;
struct st_msg_header *msg_h;
struct handshake_frame *h;
__le32 *scratch;
u32 data, scratch_size;
unsigned long before;
int ret = 0;
before = jiffies;
while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) {
if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
printk(KERN_ERR DRV_NAME
"(%s): firmware not operational\n",
pci_name(hba->pdev));
return -1;
}
msleep(1);
}
msg_h = (struct st_msg_header *)hba->dma_mem;
msg_h->handle = cpu_to_le64(hba->dma_handle);
msg_h->flag = SS_HEAD_HANDSHAKE;
h = (struct handshake_frame *)(msg_h + 1);
h->rb_phy = cpu_to_le64(hba->dma_handle);
h->req_sz = cpu_to_le16(hba->rq_size);
h->req_cnt = cpu_to_le16(hba->rq_count+1);
h->status_sz = cpu_to_le16(sizeof(struct status_msg));
h->status_cnt = cpu_to_le16(hba->sts_count+1);
stex_gettime(&h->hosttime);
h->partner_type = HMU_PARTNER_TYPE;
h->extra_offset = h->extra_size = 0;
scratch_size = (hba->sts_count+1)*sizeof(u32);
h->scratch_size = cpu_to_le32(scratch_size);
data = readl(base + YINT_EN);
data &= ~4;
writel(data, base + YINT_EN);
writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
readl(base + YH2I_REQ_HI);
writel(hba->dma_handle, base + YH2I_REQ);
readl(base + YH2I_REQ); /* flush */
scratch = hba->scratch;
before = jiffies;
while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) {
if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
printk(KERN_ERR DRV_NAME
"(%s): no signature after handshake frame\n",
pci_name(hba->pdev));
ret = -1;
break;
}
rmb();
msleep(1);
}
memset(scratch, 0, scratch_size);
msg_h->flag = 0;
return ret;
}
static int stex_handshake(struct st_hba *hba)
{
int err;
unsigned long flags;
unsigned int mu_status;
err = (hba->cardtype == st_yel) ?
stex_ss_handshake(hba) : stex_common_handshake(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
mu_status = hba->mu_status;
if (err == 0) {
hba->req_head = 0;
hba->req_tail = 0;
hba->status_head = 0;
hba->status_tail = 0;
hba->out_req_cnt = 0;
hba->mu_status = MU_STATE_STARTED;
} else
hba->mu_status = MU_STATE_FAILED;
if (mu_status == MU_STATE_RESETTING)
wake_up_all(&hba->reset_waitq);
spin_unlock_irqrestore(hba->host->host_lock, flags);
return err;
}
static int stex_abort(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct st_hba *hba = (struct st_hba *)host->hostdata;
u16 tag = cmd->request->tag;
void __iomem *base;
u32 data;
int result = SUCCESS;
unsigned long flags;
printk(KERN_INFO DRV_NAME
"(%s): aborting command\n", pci_name(hba->pdev));
scsi_print_command(cmd);
base = hba->mmio_base;
spin_lock_irqsave(host->host_lock, flags);
if (tag < host->can_queue &&
hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
hba->wait_ccb = &hba->ccb[tag];
else
goto out;
if (hba->cardtype == st_yel) {
data = readl(base + YI2H_INT);
if (data == 0 || data == 0xffffffff)
goto fail_out;
writel(data, base + YI2H_INT_C);
stex_ss_mu_intr(hba);
} else {
data = readl(base + ODBL);
if (data == 0 || data == 0xffffffff)
goto fail_out;
writel(data, base + ODBL);
readl(base + ODBL); /* flush */
stex_mu_intr(hba, data);
}
if (hba->wait_ccb == NULL) {
printk(KERN_WARNING DRV_NAME
"(%s): lost interrupt\n", pci_name(hba->pdev));
goto out;
}
fail_out:
scsi_dma_unmap(cmd);
hba->wait_ccb->req = NULL; /* nullify the req's future return */
hba->wait_ccb = NULL;
result = FAILED;
out:
spin_unlock_irqrestore(host->host_lock, flags);
return result;
}
static void stex_hard_reset(struct st_hba *hba)
{
struct pci_bus *bus;
int i;
u16 pci_cmd;
u8 pci_bctl;
for (i = 0; i < 16; i++)
pci_read_config_dword(hba->pdev, i * 4,
&hba->pdev->saved_config_space[i]);
/* Reset secondary bus. Our controller(MU/ATU) is the only device on
secondary bus. Consult Intel 80331/3 developer's manual for detail */
bus = hba->pdev->bus;
pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
/*
* 1 ms may be enough for 8-port controllers. But 16-port controllers
* require more time to finish bus reset. Use 100 ms here for safety
*/
msleep(100);
pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
break;
msleep(1);
}
ssleep(5);
for (i = 0; i < 16; i++)
pci_write_config_dword(hba->pdev, i * 4,
hba->pdev->saved_config_space[i]);
}
static int stex_yos_reset(struct st_hba *hba)
{
void __iomem *base;
unsigned long flags, before;
int ret = 0;
base = hba->mmio_base;
writel(MU_INBOUND_DOORBELL_RESET, base + IDBL);
readl(base + IDBL); /* flush */
before = jiffies;
while (hba->out_req_cnt > 0) {
if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
printk(KERN_WARNING DRV_NAME
"(%s): reset timeout\n", pci_name(hba->pdev));
ret = -1;
break;
}
msleep(1);
}
spin_lock_irqsave(hba->host->host_lock, flags);
if (ret == -1)
hba->mu_status = MU_STATE_FAILED;
else
hba->mu_status = MU_STATE_STARTED;
wake_up_all(&hba->reset_waitq);
spin_unlock_irqrestore(hba->host->host_lock, flags);
return ret;
}
static void stex_ss_reset(struct st_hba *hba)
{
writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
readl(hba->mmio_base + YH2I_INT);
ssleep(5);
}
static int stex_do_reset(struct st_hba *hba)
{
struct st_ccb *ccb;
unsigned long flags;
unsigned int mu_status = MU_STATE_RESETTING;
u16 tag;
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->mu_status == MU_STATE_STARTING) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
printk(KERN_INFO DRV_NAME "(%s): request reset during init\n",
pci_name(hba->pdev));
return 0;
}
while (hba->mu_status == MU_STATE_RESETTING) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
wait_event_timeout(hba->reset_waitq,
hba->mu_status != MU_STATE_RESETTING,
MU_MAX_DELAY * HZ);
spin_lock_irqsave(hba->host->host_lock, flags);
mu_status = hba->mu_status;
}
if (mu_status != MU_STATE_RESETTING) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
return (mu_status == MU_STATE_STARTED) ? 0 : -1;
}
hba->mu_status = MU_STATE_RESETTING;
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (hba->cardtype == st_yosemite)
return stex_yos_reset(hba);
if (hba->cardtype == st_shasta)
stex_hard_reset(hba);
else if (hba->cardtype == st_yel)
stex_ss_reset(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
for (tag = 0; tag < hba->host->can_queue; tag++) {
ccb = &hba->ccb[tag];
if (ccb->req == NULL)
continue;
ccb->req = NULL;
if (ccb->cmd) {
scsi_dma_unmap(ccb->cmd);
ccb->cmd->result = DID_RESET << 16;
ccb->cmd->scsi_done(ccb->cmd);
ccb->cmd = NULL;
}
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (stex_handshake(hba) == 0)
return 0;
printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n",
pci_name(hba->pdev));
return -1;
}
static int stex_reset(struct scsi_cmnd *cmd)
{
struct st_hba *hba;
hba = (struct st_hba *) &cmd->device->host->hostdata[0];
printk(KERN_INFO DRV_NAME
"(%s): resetting host\n", pci_name(hba->pdev));
scsi_print_command(cmd);
return stex_do_reset(hba) ? FAILED : SUCCESS;
}
static void stex_reset_work(struct work_struct *work)
{
struct st_hba *hba = container_of(work, struct st_hba, reset_work);
stex_do_reset(hba);
}
static int stex_biosparam(struct scsi_device *sdev,
struct block_device *bdev, sector_t capacity, int geom[])
{
int heads = 255, sectors = 63;
if (capacity < 0x200000) {
heads = 64;
sectors = 32;
}
sector_div(capacity, heads * sectors);
geom[0] = heads;
geom[1] = sectors;
geom[2] = capacity;
return 0;
}
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = DRV_NAME,
.proc_name = DRV_NAME,
.bios_param = stex_biosparam,
.queuecommand = stex_queuecommand,
.slave_alloc = stex_slave_alloc,
.slave_configure = stex_slave_config,
.slave_destroy = stex_slave_destroy,
.eh_abort_handler = stex_abort,
.eh_host_reset_handler = stex_reset,
.this_id = -1,
};
static struct pci_device_id stex_pci_tbl[] = {
/* st_shasta */
{ 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
{ 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX12350 */
{ 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX4350 */
{ 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX24350 */
/* st_vsc */
{ 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
/* st_yosemite */
{ 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite },
/* st_seq */
{ 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq },
/* st_yel */
{ 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel },
{ 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel },
{ } /* terminate list */
};
static struct st_card_info stex_card_info[] = {
/* st_shasta */
{
.max_id = 17,
.max_lun = 8,
.max_channel = 0,
.rq_count = 32,
.rq_size = 1048,
.sts_count = 32,
.alloc_rq = stex_alloc_req,
.map_sg = stex_map_sg,
.send = stex_send_cmd,
},
/* st_vsc */
{
.max_id = 129,
.max_lun = 1,
.max_channel = 0,
.rq_count = 32,
.rq_size = 1048,
.sts_count = 32,
.alloc_rq = stex_alloc_req,
.map_sg = stex_map_sg,
.send = stex_send_cmd,
},
/* st_yosemite */
{
.max_id = 2,
.max_lun = 256,
.max_channel = 0,
.rq_count = 256,
.rq_size = 1048,
.sts_count = 256,
.alloc_rq = stex_alloc_req,
.map_sg = stex_map_sg,
.send = stex_send_cmd,
},
/* st_seq */
{
.max_id = 129,
.max_lun = 1,
.max_channel = 0,
.rq_count = 32,
.rq_size = 1048,
.sts_count = 32,
.alloc_rq = stex_alloc_req,
.map_sg = stex_map_sg,
.send = stex_send_cmd,
},
/* st_yel */
{
.max_id = 129,
.max_lun = 256,
.max_channel = 3,
.rq_count = 801,
.rq_size = 512,
.sts_count = 801,
.alloc_rq = stex_ss_alloc_req,
.map_sg = stex_ss_map_sg,
.send = stex_ss_send_cmd,
},
};
static int stex_set_dma_mask(struct pci_dev * pdev)
{
int ret;
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
return 0;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!ret)
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
return ret;
}
static int stex_request_irq(struct st_hba *hba)
{
struct pci_dev *pdev = hba->pdev;
int status;
if (msi) {
status = pci_enable_msi(pdev);
if (status != 0)
printk(KERN_ERR DRV_NAME
"(%s): error %d setting up MSI\n",
pci_name(pdev), status);
else
hba->msi_enabled = 1;
} else
hba->msi_enabled = 0;
status = request_irq(pdev->irq, hba->cardtype == st_yel ?
stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba);
if (status != 0) {
if (hba->msi_enabled)
pci_disable_msi(pdev);
}
return status;
}
static void stex_free_irq(struct st_hba *hba)
{
struct pci_dev *pdev = hba->pdev;
free_irq(pdev->irq, hba);
if (hba->msi_enabled)
pci_disable_msi(pdev);
}
static int __devinit
stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct st_hba *hba;
struct Scsi_Host *host;
const struct st_card_info *ci = NULL;
u32 sts_offset, cp_offset, scratch_offset;
int err;
err = pci_enable_device(pdev);
if (err)
return err;
pci_set_master(pdev);
host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
if (!host) {
printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
pci_name(pdev));
err = -ENOMEM;
goto out_disable;
}
hba = (struct st_hba *)host->hostdata;
memset(hba, 0, sizeof(struct st_hba));
err = pci_request_regions(pdev, DRV_NAME);
if (err < 0) {
printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
pci_name(pdev));
goto out_scsi_host_put;
}
hba->mmio_base = pci_ioremap_bar(pdev, 0);
if ( !hba->mmio_base) {
printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
pci_name(pdev));
err = -ENOMEM;
goto out_release_regions;
}
err = stex_set_dma_mask(pdev);
if (err) {
printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
pci_name(pdev));
goto out_iounmap;
}
hba->cardtype = (unsigned int) id->driver_data;
ci = &stex_card_info[hba->cardtype];
sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size;
if (hba->cardtype == st_yel)
sts_offset += (ci->sts_count+1) * sizeof(u32);
cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg);
hba->dma_size = cp_offset + sizeof(struct st_frame);
if (hba->cardtype == st_seq ||
(hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
hba->extra_offset = hba->dma_size;
hba->dma_size += ST_ADDITIONAL_MEM;
}
hba->dma_mem = dma_alloc_coherent(&pdev->dev,
hba->dma_size, &hba->dma_handle, GFP_KERNEL);
if (!hba->dma_mem) {
/* Retry minimum coherent mapping for st_seq and st_vsc */
if (hba->cardtype == st_seq ||
(hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
printk(KERN_WARNING DRV_NAME
"(%s): allocating min buffer for controller\n",
pci_name(pdev));
hba->dma_size = hba->extra_offset
+ ST_ADDITIONAL_MEM_MIN;
hba->dma_mem = dma_alloc_coherent(&pdev->dev,
hba->dma_size, &hba->dma_handle, GFP_KERNEL);
}
if (!hba->dma_mem) {
err = -ENOMEM;
printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
pci_name(pdev));
goto out_iounmap;
}
}
hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
if (!hba->ccb) {
err = -ENOMEM;
printk(KERN_ERR DRV_NAME "(%s): ccb alloc failed\n",
pci_name(pdev));
goto out_pci_free;
}
if (hba->cardtype == st_yel)
hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset);
hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset);
hba->copy_buffer = hba->dma_mem + cp_offset;
hba->rq_count = ci->rq_count;
hba->rq_size = ci->rq_size;
hba->sts_count = ci->sts_count;
hba->alloc_rq = ci->alloc_rq;
hba->map_sg = ci->map_sg;
hba->send = ci->send;
hba->mu_status = MU_STATE_STARTING;
if (hba->cardtype == st_yel)
host->sg_tablesize = 38;
else
host->sg_tablesize = 32;
host->can_queue = ci->rq_count;
host->cmd_per_lun = ci->rq_count;
host->max_id = ci->max_id;
host->max_lun = ci->max_lun;
host->max_channel = ci->max_channel;
host->unique_id = host->host_no;
host->max_cmd_len = STEX_CDB_LENGTH;
hba->host = host;
hba->pdev = pdev;
init_waitqueue_head(&hba->reset_waitq);
snprintf(hba->work_q_name, sizeof(hba->work_q_name),
"stex_wq_%d", host->host_no);
hba->work_q = create_singlethread_workqueue(hba->work_q_name);
if (!hba->work_q) {
printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n",
pci_name(pdev));
err = -ENOMEM;
goto out_ccb_free;
}
INIT_WORK(&hba->reset_work, stex_reset_work);
err = stex_request_irq(hba);
if (err) {
printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
pci_name(pdev));
goto out_free_wq;
}
err = stex_handshake(hba);
if (err)
goto out_free_irq;
err = scsi_init_shared_tag_map(host, host->can_queue);
if (err) {
printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
pci_name(pdev));
goto out_free_irq;
}
pci_set_drvdata(pdev, hba);
err = scsi_add_host(host, &pdev->dev);
if (err) {
printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
pci_name(pdev));
goto out_free_irq;
}
scsi_scan_host(host);
return 0;
out_free_irq:
stex_free_irq(hba);
out_free_wq:
destroy_workqueue(hba->work_q);
out_ccb_free:
kfree(hba->ccb);
out_pci_free:
dma_free_coherent(&pdev->dev, hba->dma_size,
hba->dma_mem, hba->dma_handle);
out_iounmap:
iounmap(hba->mmio_base);
out_release_regions:
pci_release_regions(pdev);
out_scsi_host_put:
scsi_host_put(host);
out_disable:
pci_disable_device(pdev);
return err;
}
static void stex_hba_stop(struct st_hba *hba)
{
struct req_msg *req;
struct st_msg_header *msg_h;
unsigned long flags;
unsigned long before;
u16 tag = 0;
spin_lock_irqsave(hba->host->host_lock, flags);
req = hba->alloc_rq(hba);
if (hba->cardtype == st_yel) {
msg_h = (struct st_msg_header *)req - 1;
memset(msg_h, 0, hba->rq_size);
} else
memset(req, 0, hba->rq_size);
if (hba->cardtype == st_yosemite || hba->cardtype == st_yel) {
req->cdb[0] = MGT_CMD;
req->cdb[1] = MGT_CMD_SIGNATURE;
req->cdb[2] = CTLR_CONFIG_CMD;
req->cdb[3] = CTLR_SHUTDOWN;
} else {
req->cdb[0] = CONTROLLER_CMD;
req->cdb[1] = CTLR_POWER_STATE_CHANGE;
req->cdb[2] = CTLR_POWER_SAVING;
}
hba->ccb[tag].cmd = NULL;
hba->ccb[tag].sg_count = 0;
hba->ccb[tag].sense_bufflen = 0;
hba->ccb[tag].sense_buffer = NULL;
hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE;
hba->send(hba, req, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
before = jiffies;
while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
hba->ccb[tag].req_type = 0;
return;
}
msleep(1);
}
}
static void stex_hba_free(struct st_hba *hba)
{
stex_free_irq(hba);
destroy_workqueue(hba->work_q);
iounmap(hba->mmio_base);
pci_release_regions(hba->pdev);
kfree(hba->ccb);
dma_free_coherent(&hba->pdev->dev, hba->dma_size,
hba->dma_mem, hba->dma_handle);
}
static void stex_remove(struct pci_dev *pdev)
{
struct st_hba *hba = pci_get_drvdata(pdev);
scsi_remove_host(hba->host);
pci_set_drvdata(pdev, NULL);
stex_hba_stop(hba);
stex_hba_free(hba);
scsi_host_put(hba->host);
pci_disable_device(pdev);
}
static void stex_shutdown(struct pci_dev *pdev)
{
struct st_hba *hba = pci_get_drvdata(pdev);
stex_hba_stop(hba);
}
MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
static struct pci_driver stex_pci_driver = {
.name = DRV_NAME,
.id_table = stex_pci_tbl,
.probe = stex_probe,
.remove = __devexit_p(stex_remove),
.shutdown = stex_shutdown,
};
static int __init stex_init(void)
{
printk(KERN_INFO DRV_NAME
": Promise SuperTrak EX Driver version: %s\n",
ST_DRIVER_VERSION);
return pci_register_driver(&stex_pci_driver);
}
static void __exit stex_exit(void)
{
pci_unregister_driver(&stex_pci_driver);
}
module_init(stex_init);
module_exit(stex_exit);
| gpl-2.0 |
ashishtanwer/mptcp_android-4.2.2_Nexus4 | arch/mips/pci/ops-msc.c | 9610 | 4072 | /*
* Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <carstenl@mips.com>
* Maciej W. Rozycki <macro@mips.com>
* Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* MIPS boards specific PCI support.
*
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/mips-boards/msc01_pci.h>
#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
/*
* PCI configuration cycle AD bus definition
*/
/* Type 0 */
#define PCI_CFG_TYPE0_REG_SHF 0
#define PCI_CFG_TYPE0_FUNC_SHF 8
/* Type 1 */
#define PCI_CFG_TYPE1_REG_SHF 0
#define PCI_CFG_TYPE1_FUNC_SHF 8
#define PCI_CFG_TYPE1_DEV_SHF 11
#define PCI_CFG_TYPE1_BUS_SHF 16
static int msc_pcibios_config_access(unsigned char access_type,
struct pci_bus *bus, unsigned int devfn, int where, u32 * data)
{
unsigned char busnum = bus->number;
u32 intr;
/* Clear status register bits. */
MSC_WRITE(MSC01_PCI_INTSTAT,
(MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT));
MSC_WRITE(MSC01_PCI_CFGADDR,
((busnum << MSC01_PCI_CFGADDR_BNUM_SHF) |
(PCI_SLOT(devfn) << MSC01_PCI_CFGADDR_DNUM_SHF) |
(PCI_FUNC(devfn) << MSC01_PCI_CFGADDR_FNUM_SHF) |
((where / 4) << MSC01_PCI_CFGADDR_RNUM_SHF)));
/* Perform access */
if (access_type == PCI_ACCESS_WRITE)
MSC_WRITE(MSC01_PCI_CFGDATA, *data);
else
MSC_READ(MSC01_PCI_CFGDATA, *data);
/* Detect Master/Target abort */
MSC_READ(MSC01_PCI_INTSTAT, intr);
if (intr & (MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT)) {
/* Error occurred */
/* Clear bits */
MSC_WRITE(MSC01_PCI_INTSTAT,
(MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT));
return -1;
}
return 0;
}
/*
* We can't address 8 and 16 bit words directly. Instead we have to
* read/write a 32bit word and mask/modify the data we actually want.
*/
static int msc_pcibios_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
&data))
return -1;
if (size == 1)
*val = (data >> ((where & 3) << 3)) & 0xff;
else if (size == 2)
*val = (data >> ((where & 3) << 3)) & 0xffff;
else
*val = data;
return PCIBIOS_SUCCESSFUL;
}
static int msc_pcibios_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (size == 4)
data = val;
else {
if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
where, &data))
return -1;
if (size == 1)
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else if (size == 2)
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
}
if (msc_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where,
&data))
return -1;
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops msc_pci_ops = {
.read = msc_pcibios_read,
.write = msc_pcibios_write
};
| gpl-2.0 |
glfernando/kernel_m7 | drivers/xen/xen-pciback/conf_space_quirks.c | 9866 | 3132 | /*
* PCI Backend - Handle special overlays for broken devices.
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
* Author: Chris Bookholt <hap10@epoch.ncsc.mil>
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include "pciback.h"
#include "conf_space.h"
#include "conf_space_quirks.h"
LIST_HEAD(xen_pcibk_quirks);
static inline const struct pci_device_id *
match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
{
if ((id->vendor == PCI_ANY_ID || id->vendor == dev->vendor) &&
(id->device == PCI_ANY_ID || id->device == dev->device) &&
(id->subvendor == PCI_ANY_ID ||
id->subvendor == dev->subsystem_vendor) &&
(id->subdevice == PCI_ANY_ID ||
id->subdevice == dev->subsystem_device) &&
!((id->class ^ dev->class) & id->class_mask))
return id;
return NULL;
}
static struct xen_pcibk_config_quirk *xen_pcibk_find_quirk(struct pci_dev *dev)
{
struct xen_pcibk_config_quirk *tmp_quirk;
list_for_each_entry(tmp_quirk, &xen_pcibk_quirks, quirks_list)
if (match_one_device(&tmp_quirk->devid, dev) != NULL)
goto out;
tmp_quirk = NULL;
printk(KERN_DEBUG DRV_NAME
": quirk didn't match any device known\n");
out:
return tmp_quirk;
}
static inline void register_quirk(struct xen_pcibk_config_quirk *quirk)
{
list_add_tail(&quirk->quirks_list, &xen_pcibk_quirks);
}
int xen_pcibk_field_is_dup(struct pci_dev *dev, unsigned int reg)
{
int ret = 0;
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
struct config_field_entry *cfg_entry;
list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
if (OFFSET(cfg_entry) == reg) {
ret = 1;
break;
}
}
return ret;
}
int xen_pcibk_config_quirks_add_field(struct pci_dev *dev, struct config_field
*field)
{
int err = 0;
switch (field->size) {
case 1:
field->u.b.read = xen_pcibk_read_config_byte;
field->u.b.write = xen_pcibk_write_config_byte;
break;
case 2:
field->u.w.read = xen_pcibk_read_config_word;
field->u.w.write = xen_pcibk_write_config_word;
break;
case 4:
field->u.dw.read = xen_pcibk_read_config_dword;
field->u.dw.write = xen_pcibk_write_config_dword;
break;
default:
err = -EINVAL;
goto out;
}
xen_pcibk_config_add_field(dev, field);
out:
return err;
}
int xen_pcibk_config_quirks_init(struct pci_dev *dev)
{
struct xen_pcibk_config_quirk *quirk;
int ret = 0;
quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
if (!quirk) {
ret = -ENOMEM;
goto out;
}
quirk->devid.vendor = dev->vendor;
quirk->devid.device = dev->device;
quirk->devid.subvendor = dev->subsystem_vendor;
quirk->devid.subdevice = dev->subsystem_device;
quirk->devid.class = 0;
quirk->devid.class_mask = 0;
quirk->devid.driver_data = 0UL;
quirk->pdev = dev;
register_quirk(quirk);
out:
return ret;
}
void xen_pcibk_config_field_free(struct config_field *field)
{
kfree(field);
}
int xen_pcibk_config_quirk_release(struct pci_dev *dev)
{
struct xen_pcibk_config_quirk *quirk;
int ret = 0;
quirk = xen_pcibk_find_quirk(dev);
if (!quirk) {
ret = -ENXIO;
goto out;
}
list_del(&quirk->quirks_list);
kfree(quirk);
out:
return ret;
}
| gpl-2.0 |
SharkBa1t/cse524 | arch/powerpc/platforms/85xx/twr_p102x.c | 395 | 3717 | /*
* Copyright 2010-2011, 2013 Freescale Semiconductor, Inc.
*
* Author: Michael Johnston <michael.johnston@freescale.com>
*
* Description:
* TWR-P102x Board Setup
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/of_platform.h>
#include <asm/pci-bridge.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/qe.h>
#include <asm/qe_ic.h>
#include <asm/fsl_guts.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include "smp.h"
#include "mpc85xx.h"
static void __init twr_p1025_pic_init(void)
{
struct mpic *mpic;
#ifdef CONFIG_QUICC_ENGINE
struct device_node *np;
#endif
mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
#ifdef CONFIG_QUICC_ENGINE
np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
if (np) {
qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
qe_ic_cascade_high_mpic);
of_node_put(np);
} else
pr_err("Could not find qe-ic node\n");
#endif
}
/* ************************************************************************
*
* Setup the architecture
*
*/
static void __init twr_p1025_setup_arch(void)
{
#ifdef CONFIG_QUICC_ENGINE
struct device_node *np;
#endif
if (ppc_md.progress)
ppc_md.progress("twr_p1025_setup_arch()", 0);
mpc85xx_smp_init();
fsl_pci_assign_primary();
#ifdef CONFIG_QUICC_ENGINE
mpc85xx_qe_init();
mpc85xx_qe_par_io_init();
#if IS_ENABLED(CONFIG_UCC_GETH) || IS_ENABLED(CONFIG_SERIAL_QE)
if (machine_is(twr_p1025)) {
struct ccsr_guts __iomem *guts;
np = of_find_compatible_node(NULL, NULL, "fsl,p1021-guts");
if (np) {
guts = of_iomap(np, 0);
if (!guts)
pr_err("twr_p1025: could not map global utilities register\n");
else {
/* P1025 has pins muxed for QE and other functions. To
* enable QE UEC mode, we need to set bit QE0 for UCC1
* in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9
* and QE12 for QE MII management signals in PMUXCR
* register.
* Set QE mux bits in PMUXCR */
setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) |
MPC85xx_PMUXCR_QE(3) |
MPC85xx_PMUXCR_QE(9) |
MPC85xx_PMUXCR_QE(12));
iounmap(guts);
#if IS_ENABLED(CONFIG_SERIAL_QE)
/* On P1025TWR board, the UCC7 acted as UART port.
* However, The UCC7's CTS pin is low level in default,
* it will impact the transmission in full duplex
* communication. So disable the Flow control pin PA18.
* The UCC7 UART just can use RXD and TXD pins.
*/
par_io_config_pin(0, 18, 0, 0, 0, 0);
#endif
/* Drive PB29 to CPLD low - CPLD will then change
* muxing from LBC to QE */
par_io_config_pin(1, 29, 1, 0, 0, 0);
par_io_data_set(1, 29, 0);
}
of_node_put(np);
}
}
#endif
#endif /* CONFIG_QUICC_ENGINE */
pr_info("TWR-P1025 board from Freescale Semiconductor\n");
}
machine_arch_initcall(twr_p1025, mpc85xx_common_publish_devices);
static int __init twr_p1025_probe(void)
{
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "fsl,TWR-P1025");
}
define_machine(twr_p1025) {
.name = "TWR-P1025",
.probe = twr_p1025_probe,
.setup_arch = twr_p1025_setup_arch,
.init_IRQ = twr_p1025_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
| gpl-2.0 |
virt2real/v2r_kernel | drivers/staging/hv/RingBuffer.c | 395 | 12336 | /*
*
* Copyright (c) 2009, Microsoft Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
* Hank Janssen <hjanssen@microsoft.com>
*
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include "osd.h"
#include "logging.h"
#include "RingBuffer.h"
/* #defines */
/* Amount of space to write to */
#define BYTES_AVAIL_TO_WRITE(r, w, z) ((w) >= (r))?((z) - ((w) - (r))):((r) - (w))
/*++
Name:
GetRingBufferAvailBytes()
Description:
Get number of bytes available to read and to write to
for the specified ring buffer
--*/
static inline void
GetRingBufferAvailBytes(RING_BUFFER_INFO *rbi, u32 *read, u32 *write)
{
u32 read_loc,write_loc;
/* Capture the read/write indices before they changed */
read_loc = rbi->RingBuffer->ReadIndex;
write_loc = rbi->RingBuffer->WriteIndex;
*write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->RingDataSize);
*read = rbi->RingDataSize - *write;
}
/*++
Name:
GetNextWriteLocation()
Description:
Get the next write location for the specified ring buffer
--*/
static inline u32
GetNextWriteLocation(RING_BUFFER_INFO* RingInfo)
{
u32 next = RingInfo->RingBuffer->WriteIndex;
ASSERT(next < RingInfo->RingDataSize);
return next;
}
/*++
Name:
SetNextWriteLocation()
Description:
Set the next write location for the specified ring buffer
--*/
static inline void
SetNextWriteLocation(RING_BUFFER_INFO* RingInfo, u32 NextWriteLocation)
{
RingInfo->RingBuffer->WriteIndex = NextWriteLocation;
}
/*++
Name:
GetNextReadLocation()
Description:
Get the next read location for the specified ring buffer
--*/
static inline u32
GetNextReadLocation(RING_BUFFER_INFO* RingInfo)
{
u32 next = RingInfo->RingBuffer->ReadIndex;
ASSERT(next < RingInfo->RingDataSize);
return next;
}
/*++
Name:
GetNextReadLocationWithOffset()
Description:
Get the next read location + offset for the specified ring buffer.
This allows the caller to skip
--*/
static inline u32
GetNextReadLocationWithOffset(RING_BUFFER_INFO* RingInfo, u32 Offset)
{
u32 next = RingInfo->RingBuffer->ReadIndex;
ASSERT(next < RingInfo->RingDataSize);
next += Offset;
next %= RingInfo->RingDataSize;
return next;
}
/*++
Name:
SetNextReadLocation()
Description:
Set the next read location for the specified ring buffer
--*/
static inline void
SetNextReadLocation(RING_BUFFER_INFO* RingInfo, u32 NextReadLocation)
{
RingInfo->RingBuffer->ReadIndex = NextReadLocation;
}
/*++
Name:
GetRingBuffer()
Description:
Get the start of the ring buffer
--*/
static inline void *
GetRingBuffer(RING_BUFFER_INFO* RingInfo)
{
return (void *)RingInfo->RingBuffer->Buffer;
}
/*++
Name:
GetRingBufferSize()
Description:
Get the size of the ring buffer
--*/
static inline u32
GetRingBufferSize(RING_BUFFER_INFO* RingInfo)
{
return RingInfo->RingDataSize;
}
/*++
Name:
GetRingBufferIndices()
Description:
Get the read and write indices as u64 of the specified ring buffer
--*/
static inline u64
GetRingBufferIndices(RING_BUFFER_INFO* RingInfo)
{
return ((u64)RingInfo->RingBuffer->WriteIndex << 32) || RingInfo->RingBuffer->ReadIndex;
}
/*++
Name:
DumpRingInfo()
Description:
Dump out to console the ring buffer info
--*/
void DumpRingInfo(RING_BUFFER_INFO *RingInfo, char *Prefix)
{
u32 bytesAvailToWrite;
u32 bytesAvailToRead;
GetRingBufferAvailBytes(RingInfo, &bytesAvailToRead, &bytesAvailToWrite);
DPRINT(VMBUS, DEBUG_RING_LVL, "%s <<ringinfo %p buffer %p avail write %u avail read %u read idx %u write idx %u>>",
Prefix,
RingInfo,
RingInfo->RingBuffer->Buffer,
bytesAvailToWrite,
bytesAvailToRead,
RingInfo->RingBuffer->ReadIndex,
RingInfo->RingBuffer->WriteIndex);
}
/* Internal routines */
static u32
CopyToRingBuffer(
RING_BUFFER_INFO *RingInfo,
u32 StartWriteOffset,
void * Src,
u32 SrcLen);
static u32
CopyFromRingBuffer(
RING_BUFFER_INFO *RingInfo,
void * Dest,
u32 DestLen,
u32 StartReadOffset);
/*++
Name:
RingBufferGetDebugInfo()
Description:
Get various debug metrics for the specified ring buffer
--*/
void RingBufferGetDebugInfo(RING_BUFFER_INFO *RingInfo,
RING_BUFFER_DEBUG_INFO *DebugInfo)
{
u32 bytesAvailToWrite;
u32 bytesAvailToRead;
if (RingInfo->RingBuffer)
{
GetRingBufferAvailBytes(RingInfo, &bytesAvailToRead, &bytesAvailToWrite);
DebugInfo->BytesAvailToRead = bytesAvailToRead;
DebugInfo->BytesAvailToWrite = bytesAvailToWrite;
DebugInfo->CurrentReadIndex = RingInfo->RingBuffer->ReadIndex;
DebugInfo->CurrentWriteIndex = RingInfo->RingBuffer->WriteIndex;
DebugInfo->CurrentInterruptMask = RingInfo->RingBuffer->InterruptMask;
}
}
/*++
Name:
GetRingBufferInterruptMask()
Description:
Get the interrupt mask for the specified ring buffer
--*/
u32 GetRingBufferInterruptMask(RING_BUFFER_INFO *rbi)
{
return rbi->RingBuffer->InterruptMask;
}
/*++
Name:
RingBufferInit()
Description:
Initialize the ring buffer
--*/
int RingBufferInit(RING_BUFFER_INFO *RingInfo, void *Buffer, u32 BufferLen)
{
ASSERT(sizeof(RING_BUFFER) == PAGE_SIZE);
memset(RingInfo, 0, sizeof(RING_BUFFER_INFO));
RingInfo->RingBuffer = (RING_BUFFER*)Buffer;
RingInfo->RingBuffer->ReadIndex = RingInfo->RingBuffer->WriteIndex = 0;
RingInfo->RingSize = BufferLen;
RingInfo->RingDataSize = BufferLen - sizeof(RING_BUFFER);
spin_lock_init(&RingInfo->ring_lock);
return 0;
}
/*++
Name:
RingBufferCleanup()
Description:
Cleanup the ring buffer
--*/
void RingBufferCleanup(RING_BUFFER_INFO* RingInfo)
{
}
/*++
Name:
RingBufferWrite()
Description:
Write to the ring buffer
--*/
int RingBufferWrite(RING_BUFFER_INFO *OutRingInfo,
struct scatterlist *sglist, u32 sgcount)
{
int i=0;
u32 byteAvailToWrite;
u32 byteAvailToRead;
u32 totalBytesToWrite=0;
struct scatterlist *sg;
volatile u32 nextWriteLocation;
u64 prevIndices=0;
unsigned long flags;
DPRINT_ENTER(VMBUS);
for_each_sg(sglist, sg, sgcount, i)
{
totalBytesToWrite += sg->length;
}
totalBytesToWrite += sizeof(u64);
spin_lock_irqsave(&OutRingInfo->ring_lock, flags);
GetRingBufferAvailBytes(OutRingInfo, &byteAvailToRead, &byteAvailToWrite);
DPRINT_DBG(VMBUS, "Writing %u bytes...", totalBytesToWrite);
/* DumpRingInfo(OutRingInfo, "BEFORE "); */
/* If there is only room for the packet, assume it is full. Otherwise, the next time around, we think the ring buffer */
/* is empty since the read index == write index */
if (byteAvailToWrite <= totalBytesToWrite)
{
DPRINT_DBG(VMBUS, "No more space left on outbound ring buffer (needed %u, avail %u)", totalBytesToWrite, byteAvailToWrite);
spin_unlock_irqrestore(&OutRingInfo->ring_lock, flags);
DPRINT_EXIT(VMBUS);
return -1;
}
/* Write to the ring buffer */
nextWriteLocation = GetNextWriteLocation(OutRingInfo);
for_each_sg(sglist, sg, sgcount, i)
{
nextWriteLocation = CopyToRingBuffer(OutRingInfo,
nextWriteLocation,
sg_virt(sg),
sg->length);
}
/* Set previous packet start */
prevIndices = GetRingBufferIndices(OutRingInfo);
nextWriteLocation = CopyToRingBuffer(OutRingInfo,
nextWriteLocation,
&prevIndices,
sizeof(u64));
/* Make sure we flush all writes before updating the writeIndex */
mb();
/* Now, update the write location */
SetNextWriteLocation(OutRingInfo, nextWriteLocation);
/* DumpRingInfo(OutRingInfo, "AFTER "); */
spin_unlock_irqrestore(&OutRingInfo->ring_lock, flags);
DPRINT_EXIT(VMBUS);
return 0;
}
/*++
Name:
RingBufferPeek()
Description:
Read without advancing the read index
--*/
int RingBufferPeek(RING_BUFFER_INFO *InRingInfo, void *Buffer, u32 BufferLen)
{
u32 bytesAvailToWrite;
u32 bytesAvailToRead;
u32 nextReadLocation=0;
unsigned long flags;
spin_lock_irqsave(&InRingInfo->ring_lock, flags);
GetRingBufferAvailBytes(InRingInfo, &bytesAvailToRead, &bytesAvailToWrite);
/* Make sure there is something to read */
if (bytesAvailToRead < BufferLen )
{
/* DPRINT_DBG(VMBUS, "got callback but not enough to read <avail to read %d read size %d>!!", bytesAvailToRead, BufferLen); */
spin_unlock_irqrestore(&InRingInfo->ring_lock, flags);
return -1;
}
/* Convert to byte offset */
nextReadLocation = GetNextReadLocation(InRingInfo);
nextReadLocation = CopyFromRingBuffer(InRingInfo,
Buffer,
BufferLen,
nextReadLocation);
spin_unlock_irqrestore(&InRingInfo->ring_lock, flags);
return 0;
}
/*++
Name:
RingBufferRead()
Description:
Read and advance the read index
--*/
int RingBufferRead(RING_BUFFER_INFO *InRingInfo, void *Buffer,
u32 BufferLen, u32 Offset)
{
u32 bytesAvailToWrite;
u32 bytesAvailToRead;
u32 nextReadLocation=0;
u64 prevIndices=0;
unsigned long flags;
ASSERT(BufferLen > 0);
spin_lock_irqsave(&InRingInfo->ring_lock, flags);
GetRingBufferAvailBytes(InRingInfo, &bytesAvailToRead, &bytesAvailToWrite);
DPRINT_DBG(VMBUS, "Reading %u bytes...", BufferLen);
/* DumpRingInfo(InRingInfo, "BEFORE "); */
/* Make sure there is something to read */
if (bytesAvailToRead < BufferLen )
{
DPRINT_DBG(VMBUS, "got callback but not enough to read <avail to read %d read size %d>!!", bytesAvailToRead, BufferLen);
spin_unlock_irqrestore(&InRingInfo->ring_lock, flags);
return -1;
}
nextReadLocation = GetNextReadLocationWithOffset(InRingInfo, Offset);
nextReadLocation = CopyFromRingBuffer(InRingInfo,
Buffer,
BufferLen,
nextReadLocation);
nextReadLocation = CopyFromRingBuffer(InRingInfo,
&prevIndices,
sizeof(u64),
nextReadLocation);
/* Make sure all reads are done before we update the read index since */
/* the writer may start writing to the read area once the read index is updated */
mb();
/* Update the read index */
SetNextReadLocation(InRingInfo, nextReadLocation);
/* DumpRingInfo(InRingInfo, "AFTER "); */
spin_unlock_irqrestore(&InRingInfo->ring_lock, flags);
return 0;
}
/*++
Name:
CopyToRingBuffer()
Description:
Helper routine to copy from source to ring buffer.
Assume there is enough room. Handles wrap-around in dest case only!!
--*/
static u32
CopyToRingBuffer(
RING_BUFFER_INFO *RingInfo,
u32 StartWriteOffset,
void * Src,
u32 SrcLen)
{
void * ringBuffer=GetRingBuffer(RingInfo);
u32 ringBufferSize=GetRingBufferSize(RingInfo);
u32 fragLen;
if (SrcLen > ringBufferSize - StartWriteOffset) /* wrap-around detected! */
{
DPRINT_DBG(VMBUS, "wrap-around detected!");
fragLen = ringBufferSize - StartWriteOffset;
memcpy(ringBuffer + StartWriteOffset, Src, fragLen);
memcpy(ringBuffer, Src + fragLen, SrcLen - fragLen);
}
else
{
memcpy(ringBuffer + StartWriteOffset, Src, SrcLen);
}
StartWriteOffset += SrcLen;
StartWriteOffset %= ringBufferSize;
return StartWriteOffset;
}
/*++
Name:
CopyFromRingBuffer()
Description:
Helper routine to copy to source from ring buffer.
Assume there is enough room. Handles wrap-around in src case only!!
--*/
static u32
CopyFromRingBuffer(
RING_BUFFER_INFO *RingInfo,
void * Dest,
u32 DestLen,
u32 StartReadOffset)
{
void * ringBuffer=GetRingBuffer(RingInfo);
u32 ringBufferSize=GetRingBufferSize(RingInfo);
u32 fragLen;
if (DestLen > ringBufferSize - StartReadOffset) /* wrap-around detected at the src */
{
DPRINT_DBG(VMBUS, "src wrap-around detected!");
fragLen = ringBufferSize - StartReadOffset;
memcpy(Dest, ringBuffer + StartReadOffset, fragLen);
memcpy(Dest + fragLen, ringBuffer, DestLen - fragLen);
}
else
{
memcpy(Dest, ringBuffer + StartReadOffset, DestLen);
}
StartReadOffset += DestLen;
StartReadOffset %= ringBufferSize;
return StartReadOffset;
}
/* eof */
| gpl-2.0 |
faux123/mainline-crack | drivers/gpio/gpio-max7301.c | 651 | 2705 | /*
* Copyright (C) 2006 Juergen Beisert, Pengutronix
* Copyright (C) 2008 Guennadi Liakhovetski, Pengutronix
* Copyright (C) 2009 Wolfram Sang, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Check max730x.c for further details.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/max7301.h>
/* A write to the MAX7301 means one message with one transfer */
static int max7301_spi_write(struct device *dev, unsigned int reg,
unsigned int val)
{
struct spi_device *spi = to_spi_device(dev);
u16 word = ((reg & 0x7F) << 8) | (val & 0xFF);
return spi_write(spi, (const u8 *)&word, sizeof(word));
}
/* A read from the MAX7301 means two transfers; here, one message each */
static int max7301_spi_read(struct device *dev, unsigned int reg)
{
int ret;
u16 word;
struct spi_device *spi = to_spi_device(dev);
word = 0x8000 | (reg << 8);
ret = spi_write(spi, (const u8 *)&word, sizeof(word));
if (ret)
return ret;
/*
* This relies on the fact, that a transfer with NULL tx_buf shifts out
* zero bytes (=NOOP for MAX7301)
*/
ret = spi_read(spi, (u8 *)&word, sizeof(word));
if (ret)
return ret;
return word & 0xff;
}
static int max7301_probe(struct spi_device *spi)
{
struct max7301 *ts;
int ret;
/* bits_per_word cannot be configured in platform data */
spi->bits_per_word = 16;
ret = spi_setup(spi);
if (ret < 0)
return ret;
ts = devm_kzalloc(&spi->dev, sizeof(struct max7301), GFP_KERNEL);
if (!ts)
return -ENOMEM;
ts->read = max7301_spi_read;
ts->write = max7301_spi_write;
ts->dev = &spi->dev;
ret = __max730x_probe(ts);
return ret;
}
static int max7301_remove(struct spi_device *spi)
{
return __max730x_remove(&spi->dev);
}
static const struct spi_device_id max7301_id[] = {
{ "max7301", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, max7301_id);
static struct spi_driver max7301_driver = {
.driver = {
.name = "max7301",
},
.probe = max7301_probe,
.remove = max7301_remove,
.id_table = max7301_id,
};
static int __init max7301_init(void)
{
return spi_register_driver(&max7301_driver);
}
/* register after spi postcore initcall and before
* subsys initcalls that may rely on these GPIOs
*/
subsys_initcall(max7301_init);
static void __exit max7301_exit(void)
{
spi_unregister_driver(&max7301_driver);
}
module_exit(max7301_exit);
MODULE_AUTHOR("Juergen Beisert, Wolfram Sang");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MAX7301 GPIO-Expander");
| gpl-2.0 |
SlimRoms/kernel_samsung_tuna | drivers/staging/tm6000/tm6000-dvb.c | 2699 | 10732 | /*
* tm6000-dvb.c - dvb-t support for TM5600/TM6000/TM6010 USB video capture devices
*
* Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation version 2
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include "tm6000.h"
#include "tm6000-regs.h"
#include "zl10353.h"
#include <media/tuner.h>
#include "tuner-xc2028.h"
#include "xc5000.h"
MODULE_DESCRIPTION("DVB driver extension module for tm5600/6000/6010 based TV cards");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Trident, tm5600},"
"{{Trident, tm6000},"
"{{Trident, tm6010}");
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug message");
static inline void print_err_status(struct tm6000_core *dev,
int packet, int status)
{
char *errmsg = "Unknown";
switch (status) {
case -ENOENT:
errmsg = "unlinked synchronuously";
break;
case -ECONNRESET:
errmsg = "unlinked asynchronuously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
break;
case -EPIPE:
errmsg = "Stalled (device not responding)";
break;
case -EOVERFLOW:
errmsg = "Babble (bad cable?)";
break;
case -EPROTO:
errmsg = "Bit-stuff error (bad cable?)";
break;
case -EILSEQ:
errmsg = "CRC/Timeout (could be anything)";
break;
case -ETIME:
errmsg = "Device does not respond";
break;
}
if (packet < 0) {
dprintk(dev, 1, "URB status %d [%s].\n",
status, errmsg);
} else {
dprintk(dev, 1, "URB packet %d, status %d [%s].\n",
packet, status, errmsg);
}
}
static void tm6000_urb_received(struct urb *urb)
{
int ret;
struct tm6000_core *dev = urb->context;
if (urb->status != 0)
print_err_status(dev, 0, urb->status);
else if (urb->actual_length > 0)
dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer,
urb->actual_length);
if (dev->dvb->streams > 0) {
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0) {
printk(KERN_ERR "tm6000: error %s\n", __FUNCTION__);
kfree(urb->transfer_buffer);
usb_free_urb(urb);
}
}
}
int tm6000_start_stream(struct tm6000_core *dev)
{
int ret;
unsigned int pipe, size;
struct tm6000_dvb *dvb = dev->dvb;
printk(KERN_INFO "tm6000: got start stream request %s\n", __FUNCTION__);
if (dev->mode != TM6000_MODE_DIGITAL) {
tm6000_init_digital_mode(dev);
dev->mode = TM6000_MODE_DIGITAL;
}
dvb->bulk_urb = usb_alloc_urb(0, GFP_KERNEL);
if (dvb->bulk_urb == NULL) {
printk(KERN_ERR "tm6000: couldn't allocate urb\n");
return -ENOMEM;
}
pipe = usb_rcvbulkpipe(dev->udev, dev->bulk_in.endp->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
size = size * 15; /* 512 x 8 or 12 or 15 */
dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
if (dvb->bulk_urb->transfer_buffer == NULL) {
usb_free_urb(dvb->bulk_urb);
printk(KERN_ERR "tm6000: couldn't allocate transfer buffer!\n");
return -ENOMEM;
}
usb_fill_bulk_urb(dvb->bulk_urb, dev->udev, pipe,
dvb->bulk_urb->transfer_buffer,
size,
tm6000_urb_received, dev);
ret = usb_clear_halt(dev->udev, pipe);
if (ret < 0) {
printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n",
ret, __FUNCTION__);
return ret;
} else
printk(KERN_ERR "tm6000: pipe resetted\n");
/* mutex_lock(&tm6000_driver.open_close_mutex); */
ret = usb_submit_urb(dvb->bulk_urb, GFP_KERNEL);
/* mutex_unlock(&tm6000_driver.open_close_mutex); */
if (ret) {
printk(KERN_ERR "tm6000: submit of urb failed (error=%i)\n",
ret);
kfree(dvb->bulk_urb->transfer_buffer);
usb_free_urb(dvb->bulk_urb);
return ret;
}
return 0;
}
void tm6000_stop_stream(struct tm6000_core *dev)
{
struct tm6000_dvb *dvb = dev->dvb;
if (dvb->bulk_urb) {
printk(KERN_INFO "urb killing\n");
usb_kill_urb(dvb->bulk_urb);
printk(KERN_INFO "urb buffer free\n");
kfree(dvb->bulk_urb->transfer_buffer);
usb_free_urb(dvb->bulk_urb);
dvb->bulk_urb = NULL;
}
}
int tm6000_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct tm6000_core *dev = demux->priv;
struct tm6000_dvb *dvb = dev->dvb;
printk(KERN_INFO "tm6000: got start feed request %s\n", __FUNCTION__);
mutex_lock(&dvb->mutex);
if (dvb->streams == 0) {
dvb->streams = 1;
/* mutex_init(&tm6000_dev->streming_mutex); */
tm6000_start_stream(dev);
} else
++(dvb->streams);
mutex_unlock(&dvb->mutex);
return 0;
}
int tm6000_stop_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct tm6000_core *dev = demux->priv;
struct tm6000_dvb *dvb = dev->dvb;
printk(KERN_INFO "tm6000: got stop feed request %s\n", __FUNCTION__);
mutex_lock(&dvb->mutex);
printk(KERN_INFO "stream %#x\n", dvb->streams);
--(dvb->streams);
if (dvb->streams == 0) {
printk(KERN_INFO "stop stream\n");
tm6000_stop_stream(dev);
/* mutex_destroy(&tm6000_dev->streaming_mutex); */
}
mutex_unlock(&dvb->mutex);
/* mutex_destroy(&tm6000_dev->streaming_mutex); */
return 0;
}
int tm6000_dvb_attach_frontend(struct tm6000_core *dev)
{
struct tm6000_dvb *dvb = dev->dvb;
if (dev->caps.has_zl10353) {
struct zl10353_config config = {
.demod_address = dev->demod_addr,
.no_tuner = 1,
.parallel_ts = 1,
.if2 = 45700,
.disable_i2c_gate_ctrl = 1,
};
dvb->frontend = dvb_attach(zl10353_attach, &config,
&dev->i2c_adap);
} else {
printk(KERN_ERR "tm6000: no frontend defined for the device!\n");
return -1;
}
return (!dvb->frontend) ? -1 : 0;
}
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
int register_dvb(struct tm6000_core *dev)
{
int ret = -1;
struct tm6000_dvb *dvb = dev->dvb;
mutex_init(&dvb->mutex);
dvb->streams = 0;
/* attach the frontend */
ret = tm6000_dvb_attach_frontend(dev);
if (ret < 0) {
printk(KERN_ERR "tm6000: couldn't attach the frontend!\n");
goto err;
}
ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T",
THIS_MODULE, &dev->udev->dev, adapter_nr);
dvb->adapter.priv = dev;
if (dvb->frontend) {
switch (dev->tuner_type) {
case TUNER_XC2028: {
struct xc2028_config cfg = {
.i2c_adap = &dev->i2c_adap,
.i2c_addr = dev->tuner_addr,
};
dvb->frontend->callback = tm6000_tuner_callback;
ret = dvb_register_frontend(&dvb->adapter, dvb->frontend);
if (ret < 0) {
printk(KERN_ERR
"tm6000: couldn't register frontend\n");
goto adapter_err;
}
if (!dvb_attach(xc2028_attach, dvb->frontend, &cfg)) {
printk(KERN_ERR "tm6000: couldn't register "
"frontend (xc3028)\n");
ret = -EINVAL;
goto frontend_err;
}
printk(KERN_INFO "tm6000: XC2028/3028 asked to be "
"attached to frontend!\n");
break;
}
case TUNER_XC5000: {
struct xc5000_config cfg = {
.i2c_address = dev->tuner_addr,
};
dvb->frontend->callback = tm6000_xc5000_callback;
ret = dvb_register_frontend(&dvb->adapter, dvb->frontend);
if (ret < 0) {
printk(KERN_ERR
"tm6000: couldn't register frontend\n");
goto adapter_err;
}
if (!dvb_attach(xc5000_attach, dvb->frontend, &dev->i2c_adap, &cfg)) {
printk(KERN_ERR "tm6000: couldn't register "
"frontend (xc5000)\n");
ret = -EINVAL;
goto frontend_err;
}
printk(KERN_INFO "tm6000: XC5000 asked to be "
"attached to frontend!\n");
break;
}
}
} else
printk(KERN_ERR "tm6000: no frontend found\n");
dvb->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING
| DMX_MEMORY_BASED_FILTERING;
dvb->demux.priv = dev;
dvb->demux.filternum = 8;
dvb->demux.feednum = 8;
dvb->demux.start_feed = tm6000_start_feed;
dvb->demux.stop_feed = tm6000_stop_feed;
dvb->demux.write_to_decoder = NULL;
ret = dvb_dmx_init(&dvb->demux);
if (ret < 0) {
printk("tm6000: dvb_dmx_init failed (errno = %d)\n", ret);
goto frontend_err;
}
dvb->dmxdev.filternum = dev->dvb->demux.filternum;
dvb->dmxdev.demux = &dev->dvb->demux.dmx;
dvb->dmxdev.capabilities = 0;
ret = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
if (ret < 0) {
printk("tm6000: dvb_dmxdev_init failed (errno = %d)\n", ret);
goto dvb_dmx_err;
}
return 0;
dvb_dmx_err:
dvb_dmx_release(&dvb->demux);
frontend_err:
if (dvb->frontend) {
dvb_frontend_detach(dvb->frontend);
dvb_unregister_frontend(dvb->frontend);
}
adapter_err:
dvb_unregister_adapter(&dvb->adapter);
err:
return ret;
}
void unregister_dvb(struct tm6000_core *dev)
{
struct tm6000_dvb *dvb = dev->dvb;
if (dvb->bulk_urb != NULL) {
struct urb *bulk_urb = dvb->bulk_urb;
kfree(bulk_urb->transfer_buffer);
bulk_urb->transfer_buffer = NULL;
usb_unlink_urb(bulk_urb);
usb_free_urb(bulk_urb);
}
/* mutex_lock(&tm6000_driver.open_close_mutex); */
if (dvb->frontend) {
dvb_frontend_detach(dvb->frontend);
dvb_unregister_frontend(dvb->frontend);
}
dvb_dmxdev_release(&dvb->dmxdev);
dvb_dmx_release(&dvb->demux);
dvb_unregister_adapter(&dvb->adapter);
mutex_destroy(&dvb->mutex);
/* mutex_unlock(&tm6000_driver.open_close_mutex); */
}
static int dvb_init(struct tm6000_core *dev)
{
struct tm6000_dvb *dvb;
int rc;
if (!dev)
return 0;
if (!dev->caps.has_dvb)
return 0;
dvb = kzalloc(sizeof(struct tm6000_dvb), GFP_KERNEL);
if (!dvb) {
printk(KERN_INFO "Cannot allocate memory\n");
return -ENOMEM;
}
dev->dvb = dvb;
rc = register_dvb(dev);
if (rc < 0) {
kfree(dvb);
dev->dvb = NULL;
return 0;
}
return 0;
}
static int dvb_fini(struct tm6000_core *dev)
{
if (!dev)
return 0;
if (!dev->caps.has_dvb)
return 0;
if (dev->dvb) {
unregister_dvb(dev);
kfree(dev->dvb);
dev->dvb = NULL;
}
return 0;
}
static struct tm6000_ops dvb_ops = {
.type = TM6000_DVB,
.name = "TM6000 dvb Extension",
.init = dvb_init,
.fini = dvb_fini,
};
static int __init tm6000_dvb_register(void)
{
return tm6000_register_extension(&dvb_ops);
}
static void __exit tm6000_dvb_unregister(void)
{
tm6000_unregister_extension(&dvb_ops);
}
module_init(tm6000_dvb_register);
module_exit(tm6000_dvb_unregister);
| gpl-2.0 |
fedya/android_kernel_msm8974 | security/smack/smack_lsm.c | 3979 | 89666 | /*
* Simplified MAC Kernel (smack) security module
*
* This file contains the smack hook function implementations.
*
* Authors:
* Casey Schaufler <casey@schaufler-ca.com>
* Jarkko Sakkinen <jarkko.sakkinen@intel.com>
*
* Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
* Paul Moore <paul@paul-moore.com>
* Copyright (C) 2010 Nokia Corporation
* Copyright (C) 2011 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*/
#include <linux/xattr.h>
#include <linux/pagemap.h>
#include <linux/mount.h>
#include <linux/stat.h>
#include <linux/kd.h>
#include <asm/ioctls.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/pipe_fs_i.h>
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
#include <linux/audit.h>
#include <linux/magic.h>
#include <linux/dcache.h>
#include <linux/personality.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/binfmts.h>
#include "smack.h"
#define task_security(task) (task_cred_xxx((task), security))
#define TRANS_TRUE "TRUE"
#define TRANS_TRUE_SIZE 4
/**
* smk_fetch - Fetch the smack label from a file.
* @ip: a pointer to the inode
* @dp: a pointer to the dentry
*
* Returns a pointer to the master list entry for the Smack label
* or NULL if there was no label to fetch.
*/
static char *smk_fetch(const char *name, struct inode *ip, struct dentry *dp)
{
int rc;
char in[SMK_LABELLEN];
if (ip->i_op->getxattr == NULL)
return NULL;
rc = ip->i_op->getxattr(dp, name, in, SMK_LABELLEN);
if (rc < 0)
return NULL;
return smk_import(in, rc);
}
/**
* new_inode_smack - allocate an inode security blob
* @smack: a pointer to the Smack label to use in the blob
*
* Returns the new blob or NULL if there's no memory available
*/
struct inode_smack *new_inode_smack(char *smack)
{
struct inode_smack *isp;
isp = kzalloc(sizeof(struct inode_smack), GFP_KERNEL);
if (isp == NULL)
return NULL;
isp->smk_inode = smack;
isp->smk_flags = 0;
mutex_init(&isp->smk_lock);
return isp;
}
/**
* new_task_smack - allocate a task security blob
* @smack: a pointer to the Smack label to use in the blob
*
* Returns the new blob or NULL if there's no memory available
*/
static struct task_smack *new_task_smack(char *task, char *forked, gfp_t gfp)
{
struct task_smack *tsp;
tsp = kzalloc(sizeof(struct task_smack), gfp);
if (tsp == NULL)
return NULL;
tsp->smk_task = task;
tsp->smk_forked = forked;
INIT_LIST_HEAD(&tsp->smk_rules);
mutex_init(&tsp->smk_rules_lock);
return tsp;
}
/**
* smk_copy_rules - copy a rule set
* @nhead - new rules header pointer
* @ohead - old rules header pointer
*
* Returns 0 on success, -ENOMEM on error
*/
static int smk_copy_rules(struct list_head *nhead, struct list_head *ohead,
gfp_t gfp)
{
struct smack_rule *nrp;
struct smack_rule *orp;
int rc = 0;
INIT_LIST_HEAD(nhead);
list_for_each_entry_rcu(orp, ohead, list) {
nrp = kzalloc(sizeof(struct smack_rule), gfp);
if (nrp == NULL) {
rc = -ENOMEM;
break;
}
*nrp = *orp;
list_add_rcu(&nrp->list, nhead);
}
return rc;
}
/*
* LSM hooks.
* We he, that is fun!
*/
/**
* smack_ptrace_access_check - Smack approval on PTRACE_ATTACH
* @ctp: child task pointer
* @mode: ptrace attachment mode
*
* Returns 0 if access is OK, an error code otherwise
*
* Do the capability checks, and require read and write.
*/
static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
{
int rc;
struct smk_audit_info ad;
char *tsp;
rc = cap_ptrace_access_check(ctp, mode);
if (rc != 0)
return rc;
tsp = smk_of_task(task_security(ctp));
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, ctp);
rc = smk_curacc(tsp, MAY_READWRITE, &ad);
return rc;
}
/**
* smack_ptrace_traceme - Smack approval on PTRACE_TRACEME
* @ptp: parent task pointer
*
* Returns 0 if access is OK, an error code otherwise
*
* Do the capability checks, and require read and write.
*/
static int smack_ptrace_traceme(struct task_struct *ptp)
{
int rc;
struct smk_audit_info ad;
char *tsp;
rc = cap_ptrace_traceme(ptp);
if (rc != 0)
return rc;
tsp = smk_of_task(task_security(ptp));
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, ptp);
rc = smk_curacc(tsp, MAY_READWRITE, &ad);
return rc;
}
/**
* smack_syslog - Smack approval on syslog
* @type: message type
*
* Require that the task has the floor label
*
* Returns 0 on success, error code otherwise.
*/
static int smack_syslog(int typefrom_file)
{
int rc = 0;
char *sp = smk_of_current();
if (capable(CAP_MAC_OVERRIDE))
return 0;
if (sp != smack_known_floor.smk_known)
rc = -EACCES;
return rc;
}
/*
* Superblock Hooks.
*/
/**
* smack_sb_alloc_security - allocate a superblock blob
* @sb: the superblock getting the blob
*
* Returns 0 on success or -ENOMEM on error.
*/
static int smack_sb_alloc_security(struct super_block *sb)
{
struct superblock_smack *sbsp;
sbsp = kzalloc(sizeof(struct superblock_smack), GFP_KERNEL);
if (sbsp == NULL)
return -ENOMEM;
sbsp->smk_root = smack_known_floor.smk_known;
sbsp->smk_default = smack_known_floor.smk_known;
sbsp->smk_floor = smack_known_floor.smk_known;
sbsp->smk_hat = smack_known_hat.smk_known;
sbsp->smk_initialized = 0;
spin_lock_init(&sbsp->smk_sblock);
sb->s_security = sbsp;
return 0;
}
/**
* smack_sb_free_security - free a superblock blob
* @sb: the superblock getting the blob
*
*/
static void smack_sb_free_security(struct super_block *sb)
{
kfree(sb->s_security);
sb->s_security = NULL;
}
/**
* smack_sb_copy_data - copy mount options data for processing
* @orig: where to start
* @smackopts: mount options string
*
* Returns 0 on success or -ENOMEM on error.
*
* Copy the Smack specific mount options out of the mount
* options list.
*/
static int smack_sb_copy_data(char *orig, char *smackopts)
{
char *cp, *commap, *otheropts, *dp;
otheropts = (char *)get_zeroed_page(GFP_KERNEL);
if (otheropts == NULL)
return -ENOMEM;
for (cp = orig, commap = orig; commap != NULL; cp = commap + 1) {
if (strstr(cp, SMK_FSDEFAULT) == cp)
dp = smackopts;
else if (strstr(cp, SMK_FSFLOOR) == cp)
dp = smackopts;
else if (strstr(cp, SMK_FSHAT) == cp)
dp = smackopts;
else if (strstr(cp, SMK_FSROOT) == cp)
dp = smackopts;
else
dp = otheropts;
commap = strchr(cp, ',');
if (commap != NULL)
*commap = '\0';
if (*dp != '\0')
strcat(dp, ",");
strcat(dp, cp);
}
strcpy(orig, otheropts);
free_page((unsigned long)otheropts);
return 0;
}
/**
* smack_sb_kern_mount - Smack specific mount processing
* @sb: the file system superblock
* @flags: the mount flags
* @data: the smack mount options
*
* Returns 0 on success, an error code on failure
*/
static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data)
{
struct dentry *root = sb->s_root;
struct inode *inode = root->d_inode;
struct superblock_smack *sp = sb->s_security;
struct inode_smack *isp;
char *op;
char *commap;
char *nsp;
spin_lock(&sp->smk_sblock);
if (sp->smk_initialized != 0) {
spin_unlock(&sp->smk_sblock);
return 0;
}
sp->smk_initialized = 1;
spin_unlock(&sp->smk_sblock);
for (op = data; op != NULL; op = commap) {
commap = strchr(op, ',');
if (commap != NULL)
*commap++ = '\0';
if (strncmp(op, SMK_FSHAT, strlen(SMK_FSHAT)) == 0) {
op += strlen(SMK_FSHAT);
nsp = smk_import(op, 0);
if (nsp != NULL)
sp->smk_hat = nsp;
} else if (strncmp(op, SMK_FSFLOOR, strlen(SMK_FSFLOOR)) == 0) {
op += strlen(SMK_FSFLOOR);
nsp = smk_import(op, 0);
if (nsp != NULL)
sp->smk_floor = nsp;
} else if (strncmp(op, SMK_FSDEFAULT,
strlen(SMK_FSDEFAULT)) == 0) {
op += strlen(SMK_FSDEFAULT);
nsp = smk_import(op, 0);
if (nsp != NULL)
sp->smk_default = nsp;
} else if (strncmp(op, SMK_FSROOT, strlen(SMK_FSROOT)) == 0) {
op += strlen(SMK_FSROOT);
nsp = smk_import(op, 0);
if (nsp != NULL)
sp->smk_root = nsp;
}
}
/*
* Initialize the root inode.
*/
isp = inode->i_security;
if (isp == NULL)
inode->i_security = new_inode_smack(sp->smk_root);
else
isp->smk_inode = sp->smk_root;
return 0;
}
/**
* smack_sb_statfs - Smack check on statfs
* @dentry: identifies the file system in question
*
* Returns 0 if current can read the floor of the filesystem,
* and error code otherwise
*/
static int smack_sb_statfs(struct dentry *dentry)
{
struct superblock_smack *sbp = dentry->d_sb->s_security;
int rc;
struct smk_audit_info ad;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
rc = smk_curacc(sbp->smk_floor, MAY_READ, &ad);
return rc;
}
/**
* smack_sb_mount - Smack check for mounting
* @dev_name: unused
* @path: mount point
* @type: unused
* @flags: unused
* @data: unused
*
* Returns 0 if current can write the floor of the filesystem
* being mounted on, an error code otherwise.
*/
static int smack_sb_mount(char *dev_name, struct path *path,
char *type, unsigned long flags, void *data)
{
struct superblock_smack *sbp = path->dentry->d_sb->s_security;
struct smk_audit_info ad;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, *path);
return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad);
}
/**
* smack_sb_umount - Smack check for unmounting
* @mnt: file system to unmount
* @flags: unused
*
* Returns 0 if current can write the floor of the filesystem
* being unmounted, an error code otherwise.
*/
static int smack_sb_umount(struct vfsmount *mnt, int flags)
{
struct superblock_smack *sbp;
struct smk_audit_info ad;
struct path path;
path.dentry = mnt->mnt_root;
path.mnt = mnt;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, path);
sbp = path.dentry->d_sb->s_security;
return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad);
}
/*
* BPRM hooks
*/
/**
* smack_bprm_set_creds - set creds for exec
* @bprm: the exec information
*
* Returns 0 if it gets a blob, -ENOMEM otherwise
*/
static int smack_bprm_set_creds(struct linux_binprm *bprm)
{
struct inode *inode = bprm->file->f_path.dentry->d_inode;
struct task_smack *bsp = bprm->cred->security;
struct inode_smack *isp;
int rc;
rc = cap_bprm_set_creds(bprm);
if (rc != 0)
return rc;
if (bprm->cred_prepared)
return 0;
isp = inode->i_security;
if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task)
return 0;
if (bprm->unsafe)
return -EPERM;
bsp->smk_task = isp->smk_task;
bprm->per_clear |= PER_CLEAR_ON_SETID;
return 0;
}
/**
* smack_bprm_committing_creds - Prepare to install the new credentials
* from bprm.
*
* @bprm: binprm for exec
*/
static void smack_bprm_committing_creds(struct linux_binprm *bprm)
{
struct task_smack *bsp = bprm->cred->security;
if (bsp->smk_task != bsp->smk_forked)
current->pdeath_signal = 0;
}
/**
* smack_bprm_secureexec - Return the decision to use secureexec.
* @bprm: binprm for exec
*
* Returns 0 on success.
*/
static int smack_bprm_secureexec(struct linux_binprm *bprm)
{
struct task_smack *tsp = current_security();
int ret = cap_bprm_secureexec(bprm);
if (!ret && (tsp->smk_task != tsp->smk_forked))
ret = 1;
return ret;
}
/*
* Inode hooks
*/
/**
* smack_inode_alloc_security - allocate an inode blob
* @inode: the inode in need of a blob
*
* Returns 0 if it gets a blob, -ENOMEM otherwise
*/
static int smack_inode_alloc_security(struct inode *inode)
{
inode->i_security = new_inode_smack(smk_of_current());
if (inode->i_security == NULL)
return -ENOMEM;
return 0;
}
/**
* smack_inode_free_security - free an inode blob
* @inode: the inode with a blob
*
* Clears the blob pointer in inode
*/
static void smack_inode_free_security(struct inode *inode)
{
kfree(inode->i_security);
inode->i_security = NULL;
}
/**
* smack_inode_init_security - copy out the smack from an inode
* @inode: the inode
* @dir: unused
* @qstr: unused
* @name: where to put the attribute name
* @value: where to put the attribute value
* @len: where to put the length of the attribute
*
* Returns 0 if it all works out, -ENOMEM if there's no memory
*/
static int smack_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, char **name,
void **value, size_t *len)
{
struct smack_known *skp;
char *csp = smk_of_current();
char *isp = smk_of_inode(inode);
char *dsp = smk_of_inode(dir);
int may;
if (name) {
*name = kstrdup(XATTR_SMACK_SUFFIX, GFP_KERNEL);
if (*name == NULL)
return -ENOMEM;
}
if (value) {
skp = smk_find_entry(csp);
rcu_read_lock();
may = smk_access_entry(csp, dsp, &skp->smk_rules);
rcu_read_unlock();
/*
* If the access rule allows transmutation and
* the directory requests transmutation then
* by all means transmute.
*/
if (may > 0 && ((may & MAY_TRANSMUTE) != 0) &&
smk_inode_transmutable(dir))
isp = dsp;
*value = kstrdup(isp, GFP_KERNEL);
if (*value == NULL)
return -ENOMEM;
}
if (len)
*len = strlen(isp) + 1;
return 0;
}
/**
* smack_inode_link - Smack check on link
* @old_dentry: the existing object
* @dir: unused
* @new_dentry: the new object
*
* Returns 0 if access is permitted, an error code otherwise
*/
static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
char *isp;
struct smk_audit_info ad;
int rc;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry);
isp = smk_of_inode(old_dentry->d_inode);
rc = smk_curacc(isp, MAY_WRITE, &ad);
if (rc == 0 && new_dentry->d_inode != NULL) {
isp = smk_of_inode(new_dentry->d_inode);
smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
rc = smk_curacc(isp, MAY_WRITE, &ad);
}
return rc;
}
/**
* smack_inode_unlink - Smack check on inode deletion
* @dir: containing directory object
* @dentry: file to unlink
*
* Returns 0 if current can write the containing directory
* and the object, error code otherwise
*/
static int smack_inode_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *ip = dentry->d_inode;
struct smk_audit_info ad;
int rc;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
/*
* You need write access to the thing you're unlinking
*/
rc = smk_curacc(smk_of_inode(ip), MAY_WRITE, &ad);
if (rc == 0) {
/*
* You also need write access to the containing directory
*/
smk_ad_setfield_u_fs_path_dentry(&ad, NULL);
smk_ad_setfield_u_fs_inode(&ad, dir);
rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad);
}
return rc;
}
/**
* smack_inode_rmdir - Smack check on directory deletion
* @dir: containing directory object
* @dentry: directory to unlink
*
* Returns 0 if current can write the containing directory
* and the directory, error code otherwise
*/
static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry)
{
struct smk_audit_info ad;
int rc;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
/*
* You need write access to the thing you're removing
*/
rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
if (rc == 0) {
/*
* You also need write access to the containing directory
*/
smk_ad_setfield_u_fs_path_dentry(&ad, NULL);
smk_ad_setfield_u_fs_inode(&ad, dir);
rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad);
}
return rc;
}
/**
* smack_inode_rename - Smack check on rename
* @old_inode: the old directory
* @old_dentry: unused
* @new_inode: the new directory
* @new_dentry: unused
*
* Read and write access is required on both the old and
* new directories.
*
* Returns 0 if access is permitted, an error code otherwise
*/
static int smack_inode_rename(struct inode *old_inode,
struct dentry *old_dentry,
struct inode *new_inode,
struct dentry *new_dentry)
{
int rc;
char *isp;
struct smk_audit_info ad;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry);
isp = smk_of_inode(old_dentry->d_inode);
rc = smk_curacc(isp, MAY_READWRITE, &ad);
if (rc == 0 && new_dentry->d_inode != NULL) {
isp = smk_of_inode(new_dentry->d_inode);
smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
rc = smk_curacc(isp, MAY_READWRITE, &ad);
}
return rc;
}
/**
* smack_inode_permission - Smack version of permission()
* @inode: the inode in question
* @mask: the access requested
*
* This is the important Smack hook.
*
* Returns 0 if access is permitted, -EACCES otherwise
*/
static int smack_inode_permission(struct inode *inode, int mask)
{
struct smk_audit_info ad;
int no_block = mask & MAY_NOT_BLOCK;
mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND);
/*
* No permission to check. Existence test. Yup, it's there.
*/
if (mask == 0)
return 0;
/* May be droppable after audit */
if (no_block)
return -ECHILD;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
smk_ad_setfield_u_fs_inode(&ad, inode);
return smk_curacc(smk_of_inode(inode), mask, &ad);
}
/**
* smack_inode_setattr - Smack check for setting attributes
* @dentry: the object
* @iattr: for the force flag
*
* Returns 0 if access is permitted, an error code otherwise
*/
static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
{
struct smk_audit_info ad;
/*
* Need to allow for clearing the setuid bit.
*/
if (iattr->ia_valid & ATTR_FORCE)
return 0;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
return smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
}
/**
* smack_inode_getattr - Smack check for getting attributes
* @mnt: unused
* @dentry: the object
*
* Returns 0 if access is permitted, an error code otherwise
*/
static int smack_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
{
struct smk_audit_info ad;
struct path path;
path.dentry = dentry;
path.mnt = mnt;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, path);
return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
}
/**
* smack_inode_setxattr - Smack check for setting xattrs
* @dentry: the object
* @name: name of the attribute
* @value: unused
* @size: unused
* @flags: unused
*
* This protects the Smack attribute explicitly.
*
* Returns 0 if access is permitted, an error code otherwise
*/
static int smack_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct smk_audit_info ad;
int rc = 0;
if (strcmp(name, XATTR_NAME_SMACK) == 0 ||
strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 ||
strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
if (!capable(CAP_MAC_ADMIN))
rc = -EPERM;
/*
* check label validity here so import wont fail on
* post_setxattr
*/
if (size == 0 || size >= SMK_LABELLEN ||
smk_import(value, size) == NULL)
rc = -EINVAL;
} else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
if (!capable(CAP_MAC_ADMIN))
rc = -EPERM;
if (size != TRANS_TRUE_SIZE ||
strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
rc = -EINVAL;
} else
rc = cap_inode_setxattr(dentry, name, value, size, flags);
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
if (rc == 0)
rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
return rc;
}
/**
* smack_inode_post_setxattr - Apply the Smack update approved above
* @dentry: object
* @name: attribute name
* @value: attribute value
* @size: attribute size
* @flags: unused
*
* Set the pointer in the inode blob to the entry found
* in the master label list.
*/
static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
char *nsp;
struct inode_smack *isp = dentry->d_inode->i_security;
if (strcmp(name, XATTR_NAME_SMACK) == 0) {
nsp = smk_import(value, size);
if (nsp != NULL)
isp->smk_inode = nsp;
else
isp->smk_inode = smack_known_invalid.smk_known;
} else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) {
nsp = smk_import(value, size);
if (nsp != NULL)
isp->smk_task = nsp;
else
isp->smk_task = smack_known_invalid.smk_known;
} else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
nsp = smk_import(value, size);
if (nsp != NULL)
isp->smk_mmap = nsp;
else
isp->smk_mmap = smack_known_invalid.smk_known;
} else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0)
isp->smk_flags |= SMK_INODE_TRANSMUTE;
return;
}
/**
* smack_inode_getxattr - Smack check on getxattr
* @dentry: the object
* @name: unused
*
* Returns 0 if access is permitted, an error code otherwise
*/
static int smack_inode_getxattr(struct dentry *dentry, const char *name)
{
struct smk_audit_info ad;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
}
/**
* smack_inode_removexattr - Smack check on removexattr
* @dentry: the object
* @name: name of the attribute
*
* Removing the Smack attribute requires CAP_MAC_ADMIN
*
* Returns 0 if access is permitted, an error code otherwise
*/
static int smack_inode_removexattr(struct dentry *dentry, const char *name)
{
struct inode_smack *isp;
struct smk_audit_info ad;
int rc = 0;
if (strcmp(name, XATTR_NAME_SMACK) == 0 ||
strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 ||
strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0 ||
strcmp(name, XATTR_NAME_SMACKMMAP)) {
if (!capable(CAP_MAC_ADMIN))
rc = -EPERM;
} else
rc = cap_inode_removexattr(dentry, name);
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
if (rc == 0)
rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
if (rc == 0) {
isp = dentry->d_inode->i_security;
isp->smk_task = NULL;
isp->smk_mmap = NULL;
}
return rc;
}
/**
* smack_inode_getsecurity - get smack xattrs
* @inode: the object
* @name: attribute name
* @buffer: where to put the result
* @alloc: unused
*
* Returns the size of the attribute or an error code
*/
static int smack_inode_getsecurity(const struct inode *inode,
const char *name, void **buffer,
bool alloc)
{
struct socket_smack *ssp;
struct socket *sock;
struct super_block *sbp;
struct inode *ip = (struct inode *)inode;
char *isp;
int ilen;
int rc = 0;
if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
isp = smk_of_inode(inode);
ilen = strlen(isp) + 1;
*buffer = isp;
return ilen;
}
/*
* The rest of the Smack xattrs are only on sockets.
*/
sbp = ip->i_sb;
if (sbp->s_magic != SOCKFS_MAGIC)
return -EOPNOTSUPP;
sock = SOCKET_I(ip);
if (sock == NULL || sock->sk == NULL)
return -EOPNOTSUPP;
ssp = sock->sk->sk_security;
if (strcmp(name, XATTR_SMACK_IPIN) == 0)
isp = ssp->smk_in;
else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
isp = ssp->smk_out;
else
return -EOPNOTSUPP;
ilen = strlen(isp) + 1;
if (rc == 0) {
*buffer = isp;
rc = ilen;
}
return rc;
}
/**
* smack_inode_listsecurity - list the Smack attributes
* @inode: the object
* @buffer: where they go
* @buffer_size: size of buffer
*
* Returns 0 on success, -EINVAL otherwise
*/
static int smack_inode_listsecurity(struct inode *inode, char *buffer,
size_t buffer_size)
{
int len = strlen(XATTR_NAME_SMACK);
if (buffer != NULL && len <= buffer_size) {
memcpy(buffer, XATTR_NAME_SMACK, len);
return len;
}
return -EINVAL;
}
/**
* smack_inode_getsecid - Extract inode's security id
* @inode: inode to extract the info from
* @secid: where result will be saved
*/
static void smack_inode_getsecid(const struct inode *inode, u32 *secid)
{
struct inode_smack *isp = inode->i_security;
*secid = smack_to_secid(isp->smk_inode);
}
/*
* File Hooks
*/
/**
* smack_file_permission - Smack check on file operations
* @file: unused
* @mask: unused
*
* Returns 0
*
* Should access checks be done on each read or write?
* UNICOS and SELinux say yes.
* Trusted Solaris, Trusted Irix, and just about everyone else says no.
*
* I'll say no for now. Smack does not do the frequent
* label changing that SELinux does.
*/
static int smack_file_permission(struct file *file, int mask)
{
return 0;
}
/**
* smack_file_alloc_security - assign a file security blob
* @file: the object
*
* The security blob for a file is a pointer to the master
* label list, so no allocation is done.
*
* Returns 0
*/
static int smack_file_alloc_security(struct file *file)
{
file->f_security = smk_of_current();
return 0;
}
/**
* smack_file_free_security - clear a file security blob
* @file: the object
*
* The security blob for a file is a pointer to the master
* label list, so no memory is freed.
*/
static void smack_file_free_security(struct file *file)
{
file->f_security = NULL;
}
/**
* smack_file_ioctl - Smack check on ioctls
* @file: the object
* @cmd: what to do
* @arg: unused
*
* Relies heavily on the correct use of the ioctl command conventions.
*
* Returns 0 if allowed, error code otherwise
*/
static int smack_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int rc = 0;
struct smk_audit_info ad;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
if (_IOC_DIR(cmd) & _IOC_WRITE)
rc = smk_curacc(file->f_security, MAY_WRITE, &ad);
if (rc == 0 && (_IOC_DIR(cmd) & _IOC_READ))
rc = smk_curacc(file->f_security, MAY_READ, &ad);
return rc;
}
/**
* smack_file_lock - Smack check on file locking
* @file: the object
* @cmd: unused
*
* Returns 0 if current has write access, error code otherwise
*/
static int smack_file_lock(struct file *file, unsigned int cmd)
{
struct smk_audit_info ad;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
return smk_curacc(file->f_security, MAY_WRITE, &ad);
}
/**
* smack_file_fcntl - Smack check on fcntl
* @file: the object
* @cmd: what action to check
* @arg: unused
*
* Generally these operations are harmless.
* File locking operations present an obvious mechanism
* for passing information, so they require write access.
*
* Returns 0 if current has access, error code otherwise
*/
static int smack_file_fcntl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct smk_audit_info ad;
int rc = 0;
switch (cmd) {
case F_GETLK:
case F_SETLK:
case F_SETLKW:
case F_SETOWN:
case F_SETSIG:
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
rc = smk_curacc(file->f_security, MAY_WRITE, &ad);
break;
default:
break;
}
return rc;
}
/**
* smack_file_mmap :
* Check permissions for a mmap operation. The @file may be NULL, e.g.
* if mapping anonymous memory.
* @file contains the file structure for file to map (may be NULL).
* @reqprot contains the protection requested by the application.
* @prot contains the protection that will be applied by the kernel.
* @flags contains the operational flags.
* Return 0 if permission is granted.
*/
static int smack_file_mmap(struct file *file,
unsigned long reqprot, unsigned long prot,
unsigned long flags, unsigned long addr,
unsigned long addr_only)
{
struct smack_known *skp;
struct smack_rule *srp;
struct task_smack *tsp;
char *sp;
char *msmack;
char *osmack;
struct inode_smack *isp;
struct dentry *dp;
int may;
int mmay;
int tmay;
int rc;
/* do DAC check on address space usage */
rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
if (rc || addr_only)
return rc;
if (file == NULL || file->f_dentry == NULL)
return 0;
dp = file->f_dentry;
if (dp->d_inode == NULL)
return 0;
isp = dp->d_inode->i_security;
if (isp->smk_mmap == NULL)
return 0;
msmack = isp->smk_mmap;
tsp = current_security();
sp = smk_of_current();
skp = smk_find_entry(sp);
rc = 0;
rcu_read_lock();
/*
* For each Smack rule associated with the subject
* label verify that the SMACK64MMAP also has access
* to that rule's object label.
*/
list_for_each_entry_rcu(srp, &skp->smk_rules, list) {
osmack = srp->smk_object;
/*
* Matching labels always allows access.
*/
if (msmack == osmack)
continue;
/*
* If there is a matching local rule take
* that into account as well.
*/
may = smk_access_entry(srp->smk_subject, osmack,
&tsp->smk_rules);
if (may == -ENOENT)
may = srp->smk_access;
else
may &= srp->smk_access;
/*
* If may is zero the SMACK64MMAP subject can't
* possibly have less access.
*/
if (may == 0)
continue;
/*
* Fetch the global list entry.
* If there isn't one a SMACK64MMAP subject
* can't have as much access as current.
*/
skp = smk_find_entry(msmack);
mmay = smk_access_entry(msmack, osmack, &skp->smk_rules);
if (mmay == -ENOENT) {
rc = -EACCES;
break;
}
/*
* If there is a local entry it modifies the
* potential access, too.
*/
tmay = smk_access_entry(msmack, osmack, &tsp->smk_rules);
if (tmay != -ENOENT)
mmay &= tmay;
/*
* If there is any access available to current that is
* not available to a SMACK64MMAP subject
* deny access.
*/
if ((may | mmay) != mmay) {
rc = -EACCES;
break;
}
}
rcu_read_unlock();
return rc;
}
/**
* smack_file_set_fowner - set the file security blob value
* @file: object in question
*
* Returns 0
* Further research may be required on this one.
*/
static int smack_file_set_fowner(struct file *file)
{
file->f_security = smk_of_current();
return 0;
}
/**
* smack_file_send_sigiotask - Smack on sigio
* @tsk: The target task
* @fown: the object the signal come from
* @signum: unused
*
* Allow a privileged task to get signals even if it shouldn't
*
* Returns 0 if a subject with the object's smack could
* write to the task, an error code otherwise.
*/
static int smack_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int signum)
{
struct file *file;
int rc;
char *tsp = smk_of_task(tsk->cred->security);
struct smk_audit_info ad;
/*
* struct fown_struct is never outside the context of a struct file
*/
file = container_of(fown, struct file, f_owner);
/* we don't log here as rc can be overriden */
rc = smk_access(file->f_security, tsp, MAY_WRITE, NULL);
if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE))
rc = 0;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, tsk);
smack_log(file->f_security, tsp, MAY_WRITE, rc, &ad);
return rc;
}
/**
* smack_file_receive - Smack file receive check
* @file: the object
*
* Returns 0 if current has access, error code otherwise
*/
static int smack_file_receive(struct file *file)
{
int may = 0;
struct smk_audit_info ad;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
/*
* This code relies on bitmasks.
*/
if (file->f_mode & FMODE_READ)
may = MAY_READ;
if (file->f_mode & FMODE_WRITE)
may |= MAY_WRITE;
return smk_curacc(file->f_security, may, &ad);
}
/**
* smack_dentry_open - Smack dentry open processing
* @file: the object
* @cred: unused
*
* Set the security blob in the file structure.
*
* Returns 0
*/
static int smack_dentry_open(struct file *file, const struct cred *cred)
{
struct inode_smack *isp = file->f_path.dentry->d_inode->i_security;
file->f_security = isp->smk_inode;
return 0;
}
/*
* Task hooks
*/
/**
* smack_cred_alloc_blank - "allocate" blank task-level security credentials
* @new: the new credentials
* @gfp: the atomicity of any memory allocations
*
* Prepare a blank set of credentials for modification. This must allocate all
* the memory the LSM module might require such that cred_transfer() can
* complete without error.
*/
static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
struct task_smack *tsp;
tsp = new_task_smack(NULL, NULL, gfp);
if (tsp == NULL)
return -ENOMEM;
cred->security = tsp;
return 0;
}
/**
* smack_cred_free - "free" task-level security credentials
* @cred: the credentials in question
*
*/
static void smack_cred_free(struct cred *cred)
{
struct task_smack *tsp = cred->security;
struct smack_rule *rp;
struct list_head *l;
struct list_head *n;
if (tsp == NULL)
return;
cred->security = NULL;
list_for_each_safe(l, n, &tsp->smk_rules) {
rp = list_entry(l, struct smack_rule, list);
list_del(&rp->list);
kfree(rp);
}
kfree(tsp);
}
/**
* smack_cred_prepare - prepare new set of credentials for modification
* @new: the new credentials
* @old: the original credentials
* @gfp: the atomicity of any memory allocations
*
* Prepare a new set of credentials for modification.
*/
static int smack_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
struct task_smack *old_tsp = old->security;
struct task_smack *new_tsp;
int rc;
new_tsp = new_task_smack(old_tsp->smk_task, old_tsp->smk_task, gfp);
if (new_tsp == NULL)
return -ENOMEM;
rc = smk_copy_rules(&new_tsp->smk_rules, &old_tsp->smk_rules, gfp);
if (rc != 0)
return rc;
new->security = new_tsp;
return 0;
}
/**
* smack_cred_transfer - Transfer the old credentials to the new credentials
* @new: the new credentials
* @old: the original credentials
*
* Fill in a set of blank credentials from another set of credentials.
*/
static void smack_cred_transfer(struct cred *new, const struct cred *old)
{
struct task_smack *old_tsp = old->security;
struct task_smack *new_tsp = new->security;
new_tsp->smk_task = old_tsp->smk_task;
new_tsp->smk_forked = old_tsp->smk_task;
mutex_init(&new_tsp->smk_rules_lock);
INIT_LIST_HEAD(&new_tsp->smk_rules);
/* cbs copy rule list */
}
/**
* smack_kernel_act_as - Set the subjective context in a set of credentials
* @new: points to the set of credentials to be modified.
* @secid: specifies the security ID to be set
*
* Set the security data for a kernel service.
*/
static int smack_kernel_act_as(struct cred *new, u32 secid)
{
struct task_smack *new_tsp = new->security;
char *smack = smack_from_secid(secid);
if (smack == NULL)
return -EINVAL;
new_tsp->smk_task = smack;
return 0;
}
/**
* smack_kernel_create_files_as - Set the file creation label in a set of creds
* @new: points to the set of credentials to be modified
* @inode: points to the inode to use as a reference
*
* Set the file creation context in a set of credentials to the same
* as the objective context of the specified inode
*/
static int smack_kernel_create_files_as(struct cred *new,
struct inode *inode)
{
struct inode_smack *isp = inode->i_security;
struct task_smack *tsp = new->security;
tsp->smk_forked = isp->smk_inode;
tsp->smk_task = isp->smk_inode;
return 0;
}
/**
* smk_curacc_on_task - helper to log task related access
* @p: the task object
* @access: the access requested
* @caller: name of the calling function for audit
*
* Return 0 if access is permitted
*/
static int smk_curacc_on_task(struct task_struct *p, int access,
const char *caller)
{
struct smk_audit_info ad;
smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, p);
return smk_curacc(smk_of_task(task_security(p)), access, &ad);
}
/**
* smack_task_setpgid - Smack check on setting pgid
* @p: the task object
* @pgid: unused
*
* Return 0 if write access is permitted
*/
static int smack_task_setpgid(struct task_struct *p, pid_t pgid)
{
return smk_curacc_on_task(p, MAY_WRITE, __func__);
}
/**
* smack_task_getpgid - Smack access check for getpgid
* @p: the object task
*
* Returns 0 if current can read the object task, error code otherwise
*/
static int smack_task_getpgid(struct task_struct *p)
{
return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
* smack_task_getsid - Smack access check for getsid
* @p: the object task
*
* Returns 0 if current can read the object task, error code otherwise
*/
static int smack_task_getsid(struct task_struct *p)
{
return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
* smack_task_getsecid - get the secid of the task
* @p: the object task
* @secid: where to put the result
*
* Sets the secid to contain a u32 version of the smack label.
*/
static void smack_task_getsecid(struct task_struct *p, u32 *secid)
{
*secid = smack_to_secid(smk_of_task(task_security(p)));
}
/**
* smack_task_setnice - Smack check on setting nice
* @p: the task object
* @nice: unused
*
* Return 0 if write access is permitted
*/
static int smack_task_setnice(struct task_struct *p, int nice)
{
int rc;
rc = cap_task_setnice(p, nice);
if (rc == 0)
rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
/**
* smack_task_setioprio - Smack check on setting ioprio
* @p: the task object
* @ioprio: unused
*
* Return 0 if write access is permitted
*/
static int smack_task_setioprio(struct task_struct *p, int ioprio)
{
int rc;
rc = cap_task_setioprio(p, ioprio);
if (rc == 0)
rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
/**
* smack_task_getioprio - Smack check on reading ioprio
* @p: the task object
*
* Return 0 if read access is permitted
*/
static int smack_task_getioprio(struct task_struct *p)
{
return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
* smack_task_setscheduler - Smack check on setting scheduler
* @p: the task object
* @policy: unused
* @lp: unused
*
* Return 0 if read access is permitted
*/
static int smack_task_setscheduler(struct task_struct *p)
{
int rc;
rc = cap_task_setscheduler(p);
if (rc == 0)
rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
/**
* smack_task_getscheduler - Smack check on reading scheduler
* @p: the task object
*
* Return 0 if read access is permitted
*/
static int smack_task_getscheduler(struct task_struct *p)
{
return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
* smack_task_movememory - Smack check on moving memory
* @p: the task object
*
* Return 0 if write access is permitted
*/
static int smack_task_movememory(struct task_struct *p)
{
return smk_curacc_on_task(p, MAY_WRITE, __func__);
}
/**
* smack_task_kill - Smack check on signal delivery
* @p: the task object
* @info: unused
* @sig: unused
* @secid: identifies the smack to use in lieu of current's
*
* Return 0 if write access is permitted
*
* The secid behavior is an artifact of an SELinux hack
* in the USB code. Someday it may go away.
*/
static int smack_task_kill(struct task_struct *p, struct siginfo *info,
int sig, u32 secid)
{
struct smk_audit_info ad;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, p);
/*
* Sending a signal requires that the sender
* can write the receiver.
*/
if (secid == 0)
return smk_curacc(smk_of_task(task_security(p)), MAY_WRITE,
&ad);
/*
* If the secid isn't 0 we're dealing with some USB IO
* specific behavior. This is not clean. For one thing
* we can't take privilege into account.
*/
return smk_access(smack_from_secid(secid),
smk_of_task(task_security(p)), MAY_WRITE, &ad);
}
/**
* smack_task_wait - Smack access check for waiting
* @p: task to wait for
*
* Returns 0 if current can wait for p, error code otherwise
*/
static int smack_task_wait(struct task_struct *p)
{
struct smk_audit_info ad;
char *sp = smk_of_current();
char *tsp = smk_of_forked(task_security(p));
int rc;
/* we don't log here, we can be overriden */
rc = smk_access(tsp, sp, MAY_WRITE, NULL);
if (rc == 0)
goto out_log;
/*
* Allow the operation to succeed if either task
* has privilege to perform operations that might
* account for the smack labels having gotten to
* be different in the first place.
*
* This breaks the strict subject/object access
* control ideal, taking the object's privilege
* state into account in the decision as well as
* the smack value.
*/
if (capable(CAP_MAC_OVERRIDE) || has_capability(p, CAP_MAC_OVERRIDE))
rc = 0;
/* we log only if we didn't get overriden */
out_log:
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, p);
smack_log(tsp, sp, MAY_WRITE, rc, &ad);
return rc;
}
/**
* smack_task_to_inode - copy task smack into the inode blob
* @p: task to copy from
* @inode: inode to copy to
*
* Sets the smack pointer in the inode security blob
*/
static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
{
struct inode_smack *isp = inode->i_security;
isp->smk_inode = smk_of_task(task_security(p));
}
/*
* Socket hooks.
*/
/**
* smack_sk_alloc_security - Allocate a socket blob
* @sk: the socket
* @family: unused
* @gfp_flags: memory allocation flags
*
* Assign Smack pointers to current
*
* Returns 0 on success, -ENOMEM is there's no memory
*/
static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
{
char *csp = smk_of_current();
struct socket_smack *ssp;
ssp = kzalloc(sizeof(struct socket_smack), gfp_flags);
if (ssp == NULL)
return -ENOMEM;
ssp->smk_in = csp;
ssp->smk_out = csp;
ssp->smk_packet = NULL;
sk->sk_security = ssp;
return 0;
}
/**
* smack_sk_free_security - Free a socket blob
* @sk: the socket
*
* Clears the blob pointer
*/
static void smack_sk_free_security(struct sock *sk)
{
kfree(sk->sk_security);
}
/**
* smack_host_label - check host based restrictions
* @sip: the object end
*
* looks for host based access restrictions
*
* This version will only be appropriate for really small sets of single label
* hosts. The caller is responsible for ensuring that the RCU read lock is
* taken before calling this function.
*
* Returns the label of the far end or NULL if it's not special.
*/
static char *smack_host_label(struct sockaddr_in *sip)
{
struct smk_netlbladdr *snp;
struct in_addr *siap = &sip->sin_addr;
if (siap->s_addr == 0)
return NULL;
list_for_each_entry_rcu(snp, &smk_netlbladdr_list, list)
/*
* we break after finding the first match because
* the list is sorted from longest to shortest mask
* so we have found the most specific match
*/
if ((&snp->smk_host.sin_addr)->s_addr ==
(siap->s_addr & (&snp->smk_mask)->s_addr)) {
/* we have found the special CIPSO option */
if (snp->smk_label == smack_cipso_option)
return NULL;
return snp->smk_label;
}
return NULL;
}
/**
* smack_set_catset - convert a capset to netlabel mls categories
* @catset: the Smack categories
* @sap: where to put the netlabel categories
*
* Allocates and fills attr.mls.cat
*/
static void smack_set_catset(char *catset, struct netlbl_lsm_secattr *sap)
{
unsigned char *cp;
unsigned char m;
int cat;
int rc;
int byte;
if (!catset)
return;
sap->flags |= NETLBL_SECATTR_MLS_CAT;
sap->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
sap->attr.mls.cat->startbit = 0;
for (cat = 1, cp = catset, byte = 0; byte < SMK_LABELLEN; cp++, byte++)
for (m = 0x80; m != 0; m >>= 1, cat++) {
if ((m & *cp) == 0)
continue;
rc = netlbl_secattr_catmap_setbit(sap->attr.mls.cat,
cat, GFP_ATOMIC);
}
}
/**
* smack_to_secattr - fill a secattr from a smack value
* @smack: the smack value
* @nlsp: where the result goes
*
* Casey says that CIPSO is good enough for now.
* It can be used to effect.
* It can also be abused to effect when necessary.
* Apologies to the TSIG group in general and GW in particular.
*/
static void smack_to_secattr(char *smack, struct netlbl_lsm_secattr *nlsp)
{
struct smack_cipso cipso;
int rc;
nlsp->domain = smack;
nlsp->flags = NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL;
rc = smack_to_cipso(smack, &cipso);
if (rc == 0) {
nlsp->attr.mls.lvl = cipso.smk_level;
smack_set_catset(cipso.smk_catset, nlsp);
} else {
nlsp->attr.mls.lvl = smack_cipso_direct;
smack_set_catset(smack, nlsp);
}
}
/**
* smack_netlabel - Set the secattr on a socket
* @sk: the socket
* @labeled: socket label scheme
*
* Convert the outbound smack value (smk_out) to a
* secattr and attach it to the socket.
*
* Returns 0 on success or an error code
*/
static int smack_netlabel(struct sock *sk, int labeled)
{
struct socket_smack *ssp = sk->sk_security;
struct netlbl_lsm_secattr secattr;
int rc = 0;
/*
* Usually the netlabel code will handle changing the
* packet labeling based on the label.
* The case of a single label host is different, because
* a single label host should never get a labeled packet
* even though the label is usually associated with a packet
* label.
*/
local_bh_disable();
bh_lock_sock_nested(sk);
if (ssp->smk_out == smack_net_ambient ||
labeled == SMACK_UNLABELED_SOCKET)
netlbl_sock_delattr(sk);
else {
netlbl_secattr_init(&secattr);
smack_to_secattr(ssp->smk_out, &secattr);
rc = netlbl_sock_setattr(sk, sk->sk_family, &secattr);
netlbl_secattr_destroy(&secattr);
}
bh_unlock_sock(sk);
local_bh_enable();
return rc;
}
/**
* smack_netlbel_send - Set the secattr on a socket and perform access checks
* @sk: the socket
* @sap: the destination address
*
* Set the correct secattr for the given socket based on the destination
* address and perform any outbound access checks needed.
*
* Returns 0 on success or an error code.
*
*/
static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
{
int rc;
int sk_lbl;
char *hostsp;
struct socket_smack *ssp = sk->sk_security;
struct smk_audit_info ad;
rcu_read_lock();
hostsp = smack_host_label(sap);
if (hostsp != NULL) {
#ifdef CONFIG_AUDIT
struct lsm_network_audit net;
smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
ad.a.u.net->family = sap->sin_family;
ad.a.u.net->dport = sap->sin_port;
ad.a.u.net->v4info.daddr = sap->sin_addr.s_addr;
#endif
sk_lbl = SMACK_UNLABELED_SOCKET;
rc = smk_access(ssp->smk_out, hostsp, MAY_WRITE, &ad);
} else {
sk_lbl = SMACK_CIPSO_SOCKET;
rc = 0;
}
rcu_read_unlock();
if (rc != 0)
return rc;
return smack_netlabel(sk, sk_lbl);
}
/**
* smack_inode_setsecurity - set smack xattrs
* @inode: the object
* @name: attribute name
* @value: attribute value
* @size: size of the attribute
* @flags: unused
*
* Sets the named attribute in the appropriate blob
*
* Returns 0 on success, or an error code
*/
static int smack_inode_setsecurity(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
char *sp;
struct inode_smack *nsp = inode->i_security;
struct socket_smack *ssp;
struct socket *sock;
int rc = 0;
if (value == NULL || size > SMK_LABELLEN || size == 0)
return -EACCES;
sp = smk_import(value, size);
if (sp == NULL)
return -EINVAL;
if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
nsp->smk_inode = sp;
nsp->smk_flags |= SMK_INODE_INSTANT;
return 0;
}
/*
* The rest of the Smack xattrs are only on sockets.
*/
if (inode->i_sb->s_magic != SOCKFS_MAGIC)
return -EOPNOTSUPP;
sock = SOCKET_I(inode);
if (sock == NULL || sock->sk == NULL)
return -EOPNOTSUPP;
ssp = sock->sk->sk_security;
if (strcmp(name, XATTR_SMACK_IPIN) == 0)
ssp->smk_in = sp;
else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) {
ssp->smk_out = sp;
if (sock->sk->sk_family != PF_UNIX) {
rc = smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
if (rc != 0)
printk(KERN_WARNING
"Smack: \"%s\" netlbl error %d.\n",
__func__, -rc);
}
} else
return -EOPNOTSUPP;
return 0;
}
/**
* smack_socket_post_create - finish socket setup
* @sock: the socket
* @family: protocol family
* @type: unused
* @protocol: unused
* @kern: unused
*
* Sets the netlabel information on the socket
*
* Returns 0 on success, and error code otherwise
*/
static int smack_socket_post_create(struct socket *sock, int family,
int type, int protocol, int kern)
{
if (family != PF_INET || sock->sk == NULL)
return 0;
/*
* Set the outbound netlbl.
*/
return smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
}
/**
* smack_socket_connect - connect access check
* @sock: the socket
* @sap: the other end
* @addrlen: size of sap
*
* Verifies that a connection may be possible
*
* Returns 0 on success, and error code otherwise
*/
static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
int addrlen)
{
if (sock->sk == NULL || sock->sk->sk_family != PF_INET)
return 0;
if (addrlen < sizeof(struct sockaddr_in))
return -EINVAL;
return smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap);
}
/**
* smack_flags_to_may - convert S_ to MAY_ values
* @flags: the S_ value
*
* Returns the equivalent MAY_ value
*/
static int smack_flags_to_may(int flags)
{
int may = 0;
if (flags & S_IRUGO)
may |= MAY_READ;
if (flags & S_IWUGO)
may |= MAY_WRITE;
if (flags & S_IXUGO)
may |= MAY_EXEC;
return may;
}
/**
* smack_msg_msg_alloc_security - Set the security blob for msg_msg
* @msg: the object
*
* Returns 0
*/
static int smack_msg_msg_alloc_security(struct msg_msg *msg)
{
msg->security = smk_of_current();
return 0;
}
/**
* smack_msg_msg_free_security - Clear the security blob for msg_msg
* @msg: the object
*
* Clears the blob pointer
*/
static void smack_msg_msg_free_security(struct msg_msg *msg)
{
msg->security = NULL;
}
/**
* smack_of_shm - the smack pointer for the shm
* @shp: the object
*
* Returns a pointer to the smack value
*/
static char *smack_of_shm(struct shmid_kernel *shp)
{
return (char *)shp->shm_perm.security;
}
/**
* smack_shm_alloc_security - Set the security blob for shm
* @shp: the object
*
* Returns 0
*/
static int smack_shm_alloc_security(struct shmid_kernel *shp)
{
struct kern_ipc_perm *isp = &shp->shm_perm;
isp->security = smk_of_current();
return 0;
}
/**
* smack_shm_free_security - Clear the security blob for shm
* @shp: the object
*
* Clears the blob pointer
*/
static void smack_shm_free_security(struct shmid_kernel *shp)
{
struct kern_ipc_perm *isp = &shp->shm_perm;
isp->security = NULL;
}
/**
* smk_curacc_shm : check if current has access on shm
* @shp : the object
* @access : access requested
*
* Returns 0 if current has the requested access, error code otherwise
*/
static int smk_curacc_shm(struct shmid_kernel *shp, int access)
{
char *ssp = smack_of_shm(shp);
struct smk_audit_info ad;
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
ad.a.u.ipc_id = shp->shm_perm.id;
#endif
return smk_curacc(ssp, access, &ad);
}
/**
* smack_shm_associate - Smack access check for shm
* @shp: the object
* @shmflg: access requested
*
* Returns 0 if current has the requested access, error code otherwise
*/
static int smack_shm_associate(struct shmid_kernel *shp, int shmflg)
{
int may;
may = smack_flags_to_may(shmflg);
return smk_curacc_shm(shp, may);
}
/**
* smack_shm_shmctl - Smack access check for shm
* @shp: the object
* @cmd: what it wants to do
*
* Returns 0 if current has the requested access, error code otherwise
*/
static int smack_shm_shmctl(struct shmid_kernel *shp, int cmd)
{
int may;
switch (cmd) {
case IPC_STAT:
case SHM_STAT:
may = MAY_READ;
break;
case IPC_SET:
case SHM_LOCK:
case SHM_UNLOCK:
case IPC_RMID:
may = MAY_READWRITE;
break;
case IPC_INFO:
case SHM_INFO:
/*
* System level information.
*/
return 0;
default:
return -EINVAL;
}
return smk_curacc_shm(shp, may);
}
/**
* smack_shm_shmat - Smack access for shmat
* @shp: the object
* @shmaddr: unused
* @shmflg: access requested
*
* Returns 0 if current has the requested access, error code otherwise
*/
static int smack_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr,
int shmflg)
{
int may;
may = smack_flags_to_may(shmflg);
return smk_curacc_shm(shp, may);
}
/**
* smack_of_sem - the smack pointer for the sem
* @sma: the object
*
* Returns a pointer to the smack value
*/
static char *smack_of_sem(struct sem_array *sma)
{
return (char *)sma->sem_perm.security;
}
/**
* smack_sem_alloc_security - Set the security blob for sem
* @sma: the object
*
* Returns 0
*/
static int smack_sem_alloc_security(struct sem_array *sma)
{
struct kern_ipc_perm *isp = &sma->sem_perm;
isp->security = smk_of_current();
return 0;
}
/**
* smack_sem_free_security - Clear the security blob for sem
* @sma: the object
*
* Clears the blob pointer
*/
static void smack_sem_free_security(struct sem_array *sma)
{
struct kern_ipc_perm *isp = &sma->sem_perm;
isp->security = NULL;
}
/**
* smk_curacc_sem : check if current has access on sem
* @sma : the object
* @access : access requested
*
* Returns 0 if current has the requested access, error code otherwise
*/
static int smk_curacc_sem(struct sem_array *sma, int access)
{
char *ssp = smack_of_sem(sma);
struct smk_audit_info ad;
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
ad.a.u.ipc_id = sma->sem_perm.id;
#endif
return smk_curacc(ssp, access, &ad);
}
/**
* smack_sem_associate - Smack access check for sem
* @sma: the object
* @semflg: access requested
*
* Returns 0 if current has the requested access, error code otherwise
*/
static int smack_sem_associate(struct sem_array *sma, int semflg)
{
int may;
may = smack_flags_to_may(semflg);
return smk_curacc_sem(sma, may);
}
/**
* smack_sem_shmctl - Smack access check for sem
* @sma: the object
* @cmd: what it wants to do
*
* Returns 0 if current has the requested access, error code otherwise
*/
static int smack_sem_semctl(struct sem_array *sma, int cmd)
{
int may;
switch (cmd) {
case GETPID:
case GETNCNT:
case GETZCNT:
case GETVAL:
case GETALL:
case IPC_STAT:
case SEM_STAT:
may = MAY_READ;
break;
case SETVAL:
case SETALL:
case IPC_RMID:
case IPC_SET:
may = MAY_READWRITE;
break;
case IPC_INFO:
case SEM_INFO:
/*
* System level information
*/
return 0;
default:
return -EINVAL;
}
return smk_curacc_sem(sma, may);
}
/**
* smack_sem_semop - Smack checks of semaphore operations
* @sma: the object
* @sops: unused
* @nsops: unused
* @alter: unused
*
* Treated as read and write in all cases.
*
* Returns 0 if access is allowed, error code otherwise
*/
static int smack_sem_semop(struct sem_array *sma, struct sembuf *sops,
unsigned nsops, int alter)
{
return smk_curacc_sem(sma, MAY_READWRITE);
}
/**
* smack_msg_alloc_security - Set the security blob for msg
* @msq: the object
*
* Returns 0
*/
static int smack_msg_queue_alloc_security(struct msg_queue *msq)
{
struct kern_ipc_perm *kisp = &msq->q_perm;
kisp->security = smk_of_current();
return 0;
}
/**
* smack_msg_free_security - Clear the security blob for msg
* @msq: the object
*
* Clears the blob pointer
*/
static void smack_msg_queue_free_security(struct msg_queue *msq)
{
struct kern_ipc_perm *kisp = &msq->q_perm;
kisp->security = NULL;
}
/**
* smack_of_msq - the smack pointer for the msq
* @msq: the object
*
* Returns a pointer to the smack value
*/
static char *smack_of_msq(struct msg_queue *msq)
{
return (char *)msq->q_perm.security;
}
/**
* smk_curacc_msq : helper to check if current has access on msq
* @msq : the msq
* @access : access requested
*
* return 0 if current has access, error otherwise
*/
static int smk_curacc_msq(struct msg_queue *msq, int access)
{
char *msp = smack_of_msq(msq);
struct smk_audit_info ad;
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
ad.a.u.ipc_id = msq->q_perm.id;
#endif
return smk_curacc(msp, access, &ad);
}
/**
* smack_msg_queue_associate - Smack access check for msg_queue
* @msq: the object
* @msqflg: access requested
*
* Returns 0 if current has the requested access, error code otherwise
*/
static int smack_msg_queue_associate(struct msg_queue *msq, int msqflg)
{
int may;
may = smack_flags_to_may(msqflg);
return smk_curacc_msq(msq, may);
}
/**
* smack_msg_queue_msgctl - Smack access check for msg_queue
* @msq: the object
* @cmd: what it wants to do
*
* Returns 0 if current has the requested access, error code otherwise
*/
static int smack_msg_queue_msgctl(struct msg_queue *msq, int cmd)
{
int may;
switch (cmd) {
case IPC_STAT:
case MSG_STAT:
may = MAY_READ;
break;
case IPC_SET:
case IPC_RMID:
may = MAY_READWRITE;
break;
case IPC_INFO:
case MSG_INFO:
/*
* System level information
*/
return 0;
default:
return -EINVAL;
}
return smk_curacc_msq(msq, may);
}
/**
* smack_msg_queue_msgsnd - Smack access check for msg_queue
* @msq: the object
* @msg: unused
* @msqflg: access requested
*
* Returns 0 if current has the requested access, error code otherwise
*/
static int smack_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg,
int msqflg)
{
int may;
may = smack_flags_to_may(msqflg);
return smk_curacc_msq(msq, may);
}
/**
* smack_msg_queue_msgsnd - Smack access check for msg_queue
* @msq: the object
* @msg: unused
* @target: unused
* @type: unused
* @mode: unused
*
* Returns 0 if current has read and write access, error code otherwise
*/
static int smack_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
struct task_struct *target, long type, int mode)
{
return smk_curacc_msq(msq, MAY_READWRITE);
}
/**
* smack_ipc_permission - Smack access for ipc_permission()
* @ipp: the object permissions
* @flag: access requested
*
* Returns 0 if current has read and write access, error code otherwise
*/
static int smack_ipc_permission(struct kern_ipc_perm *ipp, short flag)
{
char *isp = ipp->security;
int may = smack_flags_to_may(flag);
struct smk_audit_info ad;
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
ad.a.u.ipc_id = ipp->id;
#endif
return smk_curacc(isp, may, &ad);
}
/**
* smack_ipc_getsecid - Extract smack security id
* @ipp: the object permissions
* @secid: where result will be saved
*/
static void smack_ipc_getsecid(struct kern_ipc_perm *ipp, u32 *secid)
{
char *smack = ipp->security;
*secid = smack_to_secid(smack);
}
/**
* smack_d_instantiate - Make sure the blob is correct on an inode
* @opt_dentry: dentry where inode will be attached
* @inode: the object
*
* Set the inode's security blob if it hasn't been done already.
*/
static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
{
struct super_block *sbp;
struct superblock_smack *sbsp;
struct inode_smack *isp;
char *csp = smk_of_current();
char *fetched;
char *final;
char trattr[TRANS_TRUE_SIZE];
int transflag = 0;
struct dentry *dp;
if (inode == NULL)
return;
isp = inode->i_security;
mutex_lock(&isp->smk_lock);
/*
* If the inode is already instantiated
* take the quick way out
*/
if (isp->smk_flags & SMK_INODE_INSTANT)
goto unlockandout;
sbp = inode->i_sb;
sbsp = sbp->s_security;
/*
* We're going to use the superblock default label
* if there's no label on the file.
*/
final = sbsp->smk_default;
/*
* If this is the root inode the superblock
* may be in the process of initialization.
* If that is the case use the root value out
* of the superblock.
*/
if (opt_dentry->d_parent == opt_dentry) {
isp->smk_inode = sbsp->smk_root;
isp->smk_flags |= SMK_INODE_INSTANT;
goto unlockandout;
}
/*
* This is pretty hackish.
* Casey says that we shouldn't have to do
* file system specific code, but it does help
* with keeping it simple.
*/
switch (sbp->s_magic) {
case SMACK_MAGIC:
/*
* Casey says that it's a little embarrassing
* that the smack file system doesn't do
* extended attributes.
*/
final = smack_known_star.smk_known;
break;
case PIPEFS_MAGIC:
/*
* Casey says pipes are easy (?)
*/
final = smack_known_star.smk_known;
break;
case DEVPTS_SUPER_MAGIC:
/*
* devpts seems content with the label of the task.
* Programs that change smack have to treat the
* pty with respect.
*/
final = csp;
break;
case SOCKFS_MAGIC:
/*
* Socket access is controlled by the socket
* structures associated with the task involved.
*/
final = smack_known_star.smk_known;
break;
case PROC_SUPER_MAGIC:
/*
* Casey says procfs appears not to care.
* The superblock default suffices.
*/
break;
case TMPFS_MAGIC:
/*
* Device labels should come from the filesystem,
* but watch out, because they're volitile,
* getting recreated on every reboot.
*/
final = smack_known_star.smk_known;
/*
* No break.
*
* If a smack value has been set we want to use it,
* but since tmpfs isn't giving us the opportunity
* to set mount options simulate setting the
* superblock default.
*/
default:
/*
* This isn't an understood special case.
* Get the value from the xattr.
*/
/*
* UNIX domain sockets use lower level socket data.
*/
if (S_ISSOCK(inode->i_mode)) {
final = smack_known_star.smk_known;
break;
}
/*
* No xattr support means, alas, no SMACK label.
* Use the aforeapplied default.
* It would be curious if the label of the task
* does not match that assigned.
*/
if (inode->i_op->getxattr == NULL)
break;
/*
* Get the dentry for xattr.
*/
dp = dget(opt_dentry);
fetched = smk_fetch(XATTR_NAME_SMACK, inode, dp);
if (fetched != NULL) {
final = fetched;
if (S_ISDIR(inode->i_mode)) {
trattr[0] = '\0';
inode->i_op->getxattr(dp,
XATTR_NAME_SMACKTRANSMUTE,
trattr, TRANS_TRUE_SIZE);
if (strncmp(trattr, TRANS_TRUE,
TRANS_TRUE_SIZE) == 0)
transflag = SMK_INODE_TRANSMUTE;
}
}
isp->smk_task = smk_fetch(XATTR_NAME_SMACKEXEC, inode, dp);
isp->smk_mmap = smk_fetch(XATTR_NAME_SMACKMMAP, inode, dp);
dput(dp);
break;
}
if (final == NULL)
isp->smk_inode = csp;
else
isp->smk_inode = final;
isp->smk_flags |= (SMK_INODE_INSTANT | transflag);
unlockandout:
mutex_unlock(&isp->smk_lock);
return;
}
/**
* smack_getprocattr - Smack process attribute access
* @p: the object task
* @name: the name of the attribute in /proc/.../attr
* @value: where to put the result
*
* Places a copy of the task Smack into value
*
* Returns the length of the smack label or an error code
*/
static int smack_getprocattr(struct task_struct *p, char *name, char **value)
{
char *cp;
int slen;
if (strcmp(name, "current") != 0)
return -EINVAL;
cp = kstrdup(smk_of_task(task_security(p)), GFP_KERNEL);
if (cp == NULL)
return -ENOMEM;
slen = strlen(cp);
*value = cp;
return slen;
}
/**
* smack_setprocattr - Smack process attribute setting
* @p: the object task
* @name: the name of the attribute in /proc/.../attr
* @value: the value to set
* @size: the size of the value
*
* Sets the Smack value of the task. Only setting self
* is permitted and only with privilege
*
* Returns the length of the smack label or an error code
*/
static int smack_setprocattr(struct task_struct *p, char *name,
void *value, size_t size)
{
int rc;
struct task_smack *tsp;
struct task_smack *oldtsp;
struct cred *new;
char *newsmack;
/*
* Changing another process' Smack value is too dangerous
* and supports no sane use case.
*/
if (p != current)
return -EPERM;
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
if (value == NULL || size == 0 || size >= SMK_LABELLEN)
return -EINVAL;
if (strcmp(name, "current") != 0)
return -EINVAL;
newsmack = smk_import(value, size);
if (newsmack == NULL)
return -EINVAL;
/*
* No process is ever allowed the web ("@") label.
*/
if (newsmack == smack_known_web.smk_known)
return -EPERM;
oldtsp = p->cred->security;
new = prepare_creds();
if (new == NULL)
return -ENOMEM;
tsp = new_task_smack(newsmack, oldtsp->smk_forked, GFP_KERNEL);
if (tsp == NULL) {
kfree(new);
return -ENOMEM;
}
rc = smk_copy_rules(&tsp->smk_rules, &oldtsp->smk_rules, GFP_KERNEL);
if (rc != 0)
return rc;
new->security = tsp;
commit_creds(new);
return size;
}
/**
* smack_unix_stream_connect - Smack access on UDS
* @sock: one sock
* @other: the other sock
* @newsk: unused
*
* Return 0 if a subject with the smack of sock could access
* an object with the smack of other, otherwise an error code
*/
static int smack_unix_stream_connect(struct sock *sock,
struct sock *other, struct sock *newsk)
{
struct socket_smack *ssp = sock->sk_security;
struct socket_smack *osp = other->sk_security;
struct socket_smack *nsp = newsk->sk_security;
struct smk_audit_info ad;
int rc = 0;
#ifdef CONFIG_AUDIT
struct lsm_network_audit net;
smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
smk_ad_setfield_u_net_sk(&ad, other);
#endif
if (!capable(CAP_MAC_OVERRIDE))
rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
/*
* Cross reference the peer labels for SO_PEERSEC.
*/
if (rc == 0) {
nsp->smk_packet = ssp->smk_out;
ssp->smk_packet = osp->smk_out;
}
return rc;
}
/**
* smack_unix_may_send - Smack access on UDS
* @sock: one socket
* @other: the other socket
*
* Return 0 if a subject with the smack of sock could access
* an object with the smack of other, otherwise an error code
*/
static int smack_unix_may_send(struct socket *sock, struct socket *other)
{
struct socket_smack *ssp = sock->sk->sk_security;
struct socket_smack *osp = other->sk->sk_security;
struct smk_audit_info ad;
int rc = 0;
#ifdef CONFIG_AUDIT
struct lsm_network_audit net;
smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
smk_ad_setfield_u_net_sk(&ad, other->sk);
#endif
if (!capable(CAP_MAC_OVERRIDE))
rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
return rc;
}
/**
* smack_socket_sendmsg - Smack check based on destination host
* @sock: the socket
* @msg: the message
* @size: the size of the message
*
* Return 0 if the current subject can write to the destination
* host. This is only a question if the destination is a single
* label host.
*/
static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
int size)
{
struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name;
/*
* Perfectly reasonable for this to be NULL
*/
if (sip == NULL || sip->sin_family != AF_INET)
return 0;
return smack_netlabel_send(sock->sk, sip);
}
/**
* smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat pair to smack
* @sap: netlabel secattr
* @ssp: socket security information
*
* Returns a pointer to a Smack label found on the label list.
*/
static char *smack_from_secattr(struct netlbl_lsm_secattr *sap,
struct socket_smack *ssp)
{
struct smack_known *skp;
char smack[SMK_LABELLEN];
char *sp;
int pcat;
if ((sap->flags & NETLBL_SECATTR_MLS_LVL) != 0) {
/*
* Looks like a CIPSO packet.
* If there are flags but no level netlabel isn't
* behaving the way we expect it to.
*
* Get the categories, if any
* Without guidance regarding the smack value
* for the packet fall back on the network
* ambient value.
*/
memset(smack, '\0', SMK_LABELLEN);
if ((sap->flags & NETLBL_SECATTR_MLS_CAT) != 0)
for (pcat = -1;;) {
pcat = netlbl_secattr_catmap_walk(
sap->attr.mls.cat, pcat + 1);
if (pcat < 0)
break;
smack_catset_bit(pcat, smack);
}
/*
* If it is CIPSO using smack direct mapping
* we are already done. WeeHee.
*/
if (sap->attr.mls.lvl == smack_cipso_direct) {
/*
* The label sent is usually on the label list.
*
* If it is not we may still want to allow the
* delivery.
*
* If the recipient is accepting all packets
* because it is using the star ("*") label
* for SMACK64IPIN provide the web ("@") label
* so that a directed response will succeed.
* This is not very correct from a MAC point
* of view, but gets around the problem that
* locking prevents adding the newly discovered
* label to the list.
* The case where the recipient is not using
* the star label should obviously fail.
* The easy way to do this is to provide the
* star label as the subject label.
*/
skp = smk_find_entry(smack);
if (skp != NULL)
return skp->smk_known;
if (ssp != NULL &&
ssp->smk_in == smack_known_star.smk_known)
return smack_known_web.smk_known;
return smack_known_star.smk_known;
}
/*
* Look it up in the supplied table if it is not
* a direct mapping.
*/
sp = smack_from_cipso(sap->attr.mls.lvl, smack);
if (sp != NULL)
return sp;
if (ssp != NULL && ssp->smk_in == smack_known_star.smk_known)
return smack_known_web.smk_known;
return smack_known_star.smk_known;
}
if ((sap->flags & NETLBL_SECATTR_SECID) != 0) {
/*
* Looks like a fallback, which gives us a secid.
*/
sp = smack_from_secid(sap->attr.secid);
/*
* This has got to be a bug because it is
* impossible to specify a fallback without
* specifying the label, which will ensure
* it has a secid, and the only way to get a
* secid is from a fallback.
*/
BUG_ON(sp == NULL);
return sp;
}
/*
* Without guidance regarding the smack value
* for the packet fall back on the network
* ambient value.
*/
return smack_net_ambient;
}
/**
* smack_socket_sock_rcv_skb - Smack packet delivery access check
* @sk: socket
* @skb: packet
*
* Returns 0 if the packet should be delivered, an error code otherwise
*/
static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct netlbl_lsm_secattr secattr;
struct socket_smack *ssp = sk->sk_security;
char *csp;
int rc;
struct smk_audit_info ad;
#ifdef CONFIG_AUDIT
struct lsm_network_audit net;
#endif
if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
return 0;
/*
* Translate what netlabel gave us.
*/
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr);
if (rc == 0)
csp = smack_from_secattr(&secattr, ssp);
else
csp = smack_net_ambient;
netlbl_secattr_destroy(&secattr);
#ifdef CONFIG_AUDIT
smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
ad.a.u.net->family = sk->sk_family;
ad.a.u.net->netif = skb->skb_iif;
ipv4_skb_to_auditdata(skb, &ad.a, NULL);
#endif
/*
* Receiving a packet requires that the other end
* be able to write here. Read access is not required.
* This is the simplist possible security model
* for networking.
*/
rc = smk_access(csp, ssp->smk_in, MAY_WRITE, &ad);
if (rc != 0)
netlbl_skbuff_err(skb, rc, 0);
return rc;
}
/**
* smack_socket_getpeersec_stream - pull in packet label
* @sock: the socket
* @optval: user's destination
* @optlen: size thereof
* @len: max thereof
*
* returns zero on success, an error code otherwise
*/
static int smack_socket_getpeersec_stream(struct socket *sock,
char __user *optval,
int __user *optlen, unsigned len)
{
struct socket_smack *ssp;
char *rcp = "";
int slen = 1;
int rc = 0;
ssp = sock->sk->sk_security;
if (ssp->smk_packet != NULL) {
rcp = ssp->smk_packet;
slen = strlen(rcp) + 1;
}
if (slen > len)
rc = -ERANGE;
else if (copy_to_user(optval, rcp, slen) != 0)
rc = -EFAULT;
if (put_user(slen, optlen) != 0)
rc = -EFAULT;
return rc;
}
/**
* smack_socket_getpeersec_dgram - pull in packet label
* @sock: the peer socket
* @skb: packet data
* @secid: pointer to where to put the secid of the packet
*
* Sets the netlabel socket state on sk from parent
*/
static int smack_socket_getpeersec_dgram(struct socket *sock,
struct sk_buff *skb, u32 *secid)
{
struct netlbl_lsm_secattr secattr;
struct socket_smack *ssp = NULL;
char *sp;
int family = PF_UNSPEC;
u32 s = 0; /* 0 is the invalid secid */
int rc;
if (skb != NULL) {
if (skb->protocol == htons(ETH_P_IP))
family = PF_INET;
else if (skb->protocol == htons(ETH_P_IPV6))
family = PF_INET6;
}
if (family == PF_UNSPEC && sock != NULL)
family = sock->sk->sk_family;
if (family == PF_UNIX) {
ssp = sock->sk->sk_security;
s = smack_to_secid(ssp->smk_out);
} else if (family == PF_INET || family == PF_INET6) {
/*
* Translate what netlabel gave us.
*/
if (sock != NULL && sock->sk != NULL)
ssp = sock->sk->sk_security;
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, family, &secattr);
if (rc == 0) {
sp = smack_from_secattr(&secattr, ssp);
s = smack_to_secid(sp);
}
netlbl_secattr_destroy(&secattr);
}
*secid = s;
if (s == 0)
return -EINVAL;
return 0;
}
/**
* smack_sock_graft - Initialize a newly created socket with an existing sock
* @sk: child sock
* @parent: parent socket
*
* Set the smk_{in,out} state of an existing sock based on the process that
* is creating the new socket.
*/
static void smack_sock_graft(struct sock *sk, struct socket *parent)
{
struct socket_smack *ssp;
if (sk == NULL ||
(sk->sk_family != PF_INET && sk->sk_family != PF_INET6))
return;
ssp = sk->sk_security;
ssp->smk_in = ssp->smk_out = smk_of_current();
/* cssp->smk_packet is already set in smack_inet_csk_clone() */
}
/**
* smack_inet_conn_request - Smack access check on connect
* @sk: socket involved
* @skb: packet
* @req: unused
*
* Returns 0 if a task with the packet label could write to
* the socket, otherwise an error code
*/
static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
{
u16 family = sk->sk_family;
struct socket_smack *ssp = sk->sk_security;
struct netlbl_lsm_secattr secattr;
struct sockaddr_in addr;
struct iphdr *hdr;
char *sp;
int rc;
struct smk_audit_info ad;
#ifdef CONFIG_AUDIT
struct lsm_network_audit net;
#endif
/* handle mapped IPv4 packets arriving via IPv6 sockets */
if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
family = PF_INET;
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, family, &secattr);
if (rc == 0)
sp = smack_from_secattr(&secattr, ssp);
else
sp = smack_known_huh.smk_known;
netlbl_secattr_destroy(&secattr);
#ifdef CONFIG_AUDIT
smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
ad.a.u.net->family = family;
ad.a.u.net->netif = skb->skb_iif;
ipv4_skb_to_auditdata(skb, &ad.a, NULL);
#endif
/*
* Receiving a packet requires that the other end be able to write
* here. Read access is not required.
*/
rc = smk_access(sp, ssp->smk_in, MAY_WRITE, &ad);
if (rc != 0)
return rc;
/*
* Save the peer's label in the request_sock so we can later setup
* smk_packet in the child socket so that SO_PEERCRED can report it.
*/
req->peer_secid = smack_to_secid(sp);
/*
* We need to decide if we want to label the incoming connection here
* if we do we only need to label the request_sock and the stack will
* propagate the wire-label to the sock when it is created.
*/
hdr = ip_hdr(skb);
addr.sin_addr.s_addr = hdr->saddr;
rcu_read_lock();
if (smack_host_label(&addr) == NULL) {
rcu_read_unlock();
netlbl_secattr_init(&secattr);
smack_to_secattr(sp, &secattr);
rc = netlbl_req_setattr(req, &secattr);
netlbl_secattr_destroy(&secattr);
} else {
rcu_read_unlock();
netlbl_req_delattr(req);
}
return rc;
}
/**
* smack_inet_csk_clone - Copy the connection information to the new socket
* @sk: the new socket
* @req: the connection's request_sock
*
* Transfer the connection's peer label to the newly created socket.
*/
static void smack_inet_csk_clone(struct sock *sk,
const struct request_sock *req)
{
struct socket_smack *ssp = sk->sk_security;
if (req->peer_secid != 0)
ssp->smk_packet = smack_from_secid(req->peer_secid);
else
ssp->smk_packet = NULL;
}
/*
* Key management security hooks
*
* Casey has not tested key support very heavily.
* The permission check is most likely too restrictive.
* If you care about keys please have a look.
*/
#ifdef CONFIG_KEYS
/**
* smack_key_alloc - Set the key security blob
* @key: object
* @cred: the credentials to use
* @flags: unused
*
* No allocation required
*
* Returns 0
*/
static int smack_key_alloc(struct key *key, const struct cred *cred,
unsigned long flags)
{
key->security = smk_of_task(cred->security);
return 0;
}
/**
* smack_key_free - Clear the key security blob
* @key: the object
*
* Clear the blob pointer
*/
static void smack_key_free(struct key *key)
{
key->security = NULL;
}
/*
* smack_key_permission - Smack access on a key
* @key_ref: gets to the object
* @cred: the credentials to use
* @perm: unused
*
* Return 0 if the task has read and write to the object,
* an error code otherwise
*/
static int smack_key_permission(key_ref_t key_ref,
const struct cred *cred, key_perm_t perm)
{
struct key *keyp;
struct smk_audit_info ad;
char *tsp = smk_of_task(cred->security);
keyp = key_ref_to_ptr(key_ref);
if (keyp == NULL)
return -EINVAL;
/*
* If the key hasn't been initialized give it access so that
* it may do so.
*/
if (keyp->security == NULL)
return 0;
/*
* This should not occur
*/
if (tsp == NULL)
return -EACCES;
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY);
ad.a.u.key_struct.key = keyp->serial;
ad.a.u.key_struct.key_desc = keyp->description;
#endif
return smk_access(tsp, keyp->security,
MAY_READWRITE, &ad);
}
#endif /* CONFIG_KEYS */
/*
* Smack Audit hooks
*
* Audit requires a unique representation of each Smack specific
* rule. This unique representation is used to distinguish the
* object to be audited from remaining kernel objects and also
* works as a glue between the audit hooks.
*
* Since repository entries are added but never deleted, we'll use
* the smack_known label address related to the given audit rule as
* the needed unique representation. This also better fits the smack
* model where nearly everything is a label.
*/
#ifdef CONFIG_AUDIT
/**
* smack_audit_rule_init - Initialize a smack audit rule
* @field: audit rule fields given from user-space (audit.h)
* @op: required testing operator (=, !=, >, <, ...)
* @rulestr: smack label to be audited
* @vrule: pointer to save our own audit rule representation
*
* Prepare to audit cases where (@field @op @rulestr) is true.
* The label to be audited is created if necessay.
*/
static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
{
char **rule = (char **)vrule;
*rule = NULL;
if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER)
return -EINVAL;
if (op != Audit_equal && op != Audit_not_equal)
return -EINVAL;
*rule = smk_import(rulestr, 0);
return 0;
}
/**
* smack_audit_rule_known - Distinguish Smack audit rules
* @krule: rule of interest, in Audit kernel representation format
*
* This is used to filter Smack rules from remaining Audit ones.
* If it's proved that this rule belongs to us, the
* audit_rule_match hook will be called to do the final judgement.
*/
static int smack_audit_rule_known(struct audit_krule *krule)
{
struct audit_field *f;
int i;
for (i = 0; i < krule->field_count; i++) {
f = &krule->fields[i];
if (f->type == AUDIT_SUBJ_USER || f->type == AUDIT_OBJ_USER)
return 1;
}
return 0;
}
/**
* smack_audit_rule_match - Audit given object ?
* @secid: security id for identifying the object to test
* @field: audit rule flags given from user-space
* @op: required testing operator
* @vrule: smack internal rule presentation
* @actx: audit context associated with the check
*
* The core Audit hook. It's used to take the decision of
* whether to audit or not to audit a given object.
*/
static int smack_audit_rule_match(u32 secid, u32 field, u32 op, void *vrule,
struct audit_context *actx)
{
char *smack;
char *rule = vrule;
if (!rule) {
audit_log(actx, GFP_KERNEL, AUDIT_SELINUX_ERR,
"Smack: missing rule\n");
return -ENOENT;
}
if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER)
return 0;
smack = smack_from_secid(secid);
/*
* No need to do string comparisons. If a match occurs,
* both pointers will point to the same smack_known
* label.
*/
if (op == Audit_equal)
return (rule == smack);
if (op == Audit_not_equal)
return (rule != smack);
return 0;
}
/**
* smack_audit_rule_free - free smack rule representation
* @vrule: rule to be freed.
*
* No memory was allocated.
*/
static void smack_audit_rule_free(void *vrule)
{
/* No-op */
}
#endif /* CONFIG_AUDIT */
/**
* smack_secid_to_secctx - return the smack label for a secid
* @secid: incoming integer
* @secdata: destination
* @seclen: how long it is
*
* Exists for networking code.
*/
static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
char *sp = smack_from_secid(secid);
if (secdata)
*secdata = sp;
*seclen = strlen(sp);
return 0;
}
/**
* smack_secctx_to_secid - return the secid for a smack label
* @secdata: smack label
* @seclen: how long result is
* @secid: outgoing integer
*
* Exists for audit and networking code.
*/
static int smack_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
*secid = smack_to_secid(secdata);
return 0;
}
/**
* smack_release_secctx - don't do anything.
* @secdata: unused
* @seclen: unused
*
* Exists to make sure nothing gets done, and properly
*/
static void smack_release_secctx(char *secdata, u32 seclen)
{
}
static int smack_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
{
return smack_inode_setsecurity(inode, XATTR_SMACK_SUFFIX, ctx, ctxlen, 0);
}
static int smack_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
{
return __vfs_setxattr_noperm(dentry, XATTR_NAME_SMACK, ctx, ctxlen, 0);
}
static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
{
int len = 0;
len = smack_inode_getsecurity(inode, XATTR_SMACK_SUFFIX, ctx, true);
if (len < 0)
return len;
*ctxlen = len;
return 0;
}
struct security_operations smack_ops = {
.name = "smack",
.ptrace_access_check = smack_ptrace_access_check,
.ptrace_traceme = smack_ptrace_traceme,
.syslog = smack_syslog,
.sb_alloc_security = smack_sb_alloc_security,
.sb_free_security = smack_sb_free_security,
.sb_copy_data = smack_sb_copy_data,
.sb_kern_mount = smack_sb_kern_mount,
.sb_statfs = smack_sb_statfs,
.sb_mount = smack_sb_mount,
.sb_umount = smack_sb_umount,
.bprm_set_creds = smack_bprm_set_creds,
.bprm_committing_creds = smack_bprm_committing_creds,
.bprm_secureexec = smack_bprm_secureexec,
.inode_alloc_security = smack_inode_alloc_security,
.inode_free_security = smack_inode_free_security,
.inode_init_security = smack_inode_init_security,
.inode_link = smack_inode_link,
.inode_unlink = smack_inode_unlink,
.inode_rmdir = smack_inode_rmdir,
.inode_rename = smack_inode_rename,
.inode_permission = smack_inode_permission,
.inode_setattr = smack_inode_setattr,
.inode_getattr = smack_inode_getattr,
.inode_setxattr = smack_inode_setxattr,
.inode_post_setxattr = smack_inode_post_setxattr,
.inode_getxattr = smack_inode_getxattr,
.inode_removexattr = smack_inode_removexattr,
.inode_getsecurity = smack_inode_getsecurity,
.inode_setsecurity = smack_inode_setsecurity,
.inode_listsecurity = smack_inode_listsecurity,
.inode_getsecid = smack_inode_getsecid,
.file_permission = smack_file_permission,
.file_alloc_security = smack_file_alloc_security,
.file_free_security = smack_file_free_security,
.file_ioctl = smack_file_ioctl,
.file_lock = smack_file_lock,
.file_fcntl = smack_file_fcntl,
.file_mmap = smack_file_mmap,
.file_set_fowner = smack_file_set_fowner,
.file_send_sigiotask = smack_file_send_sigiotask,
.file_receive = smack_file_receive,
.dentry_open = smack_dentry_open,
.cred_alloc_blank = smack_cred_alloc_blank,
.cred_free = smack_cred_free,
.cred_prepare = smack_cred_prepare,
.cred_transfer = smack_cred_transfer,
.kernel_act_as = smack_kernel_act_as,
.kernel_create_files_as = smack_kernel_create_files_as,
.task_setpgid = smack_task_setpgid,
.task_getpgid = smack_task_getpgid,
.task_getsid = smack_task_getsid,
.task_getsecid = smack_task_getsecid,
.task_setnice = smack_task_setnice,
.task_setioprio = smack_task_setioprio,
.task_getioprio = smack_task_getioprio,
.task_setscheduler = smack_task_setscheduler,
.task_getscheduler = smack_task_getscheduler,
.task_movememory = smack_task_movememory,
.task_kill = smack_task_kill,
.task_wait = smack_task_wait,
.task_to_inode = smack_task_to_inode,
.ipc_permission = smack_ipc_permission,
.ipc_getsecid = smack_ipc_getsecid,
.msg_msg_alloc_security = smack_msg_msg_alloc_security,
.msg_msg_free_security = smack_msg_msg_free_security,
.msg_queue_alloc_security = smack_msg_queue_alloc_security,
.msg_queue_free_security = smack_msg_queue_free_security,
.msg_queue_associate = smack_msg_queue_associate,
.msg_queue_msgctl = smack_msg_queue_msgctl,
.msg_queue_msgsnd = smack_msg_queue_msgsnd,
.msg_queue_msgrcv = smack_msg_queue_msgrcv,
.shm_alloc_security = smack_shm_alloc_security,
.shm_free_security = smack_shm_free_security,
.shm_associate = smack_shm_associate,
.shm_shmctl = smack_shm_shmctl,
.shm_shmat = smack_shm_shmat,
.sem_alloc_security = smack_sem_alloc_security,
.sem_free_security = smack_sem_free_security,
.sem_associate = smack_sem_associate,
.sem_semctl = smack_sem_semctl,
.sem_semop = smack_sem_semop,
.d_instantiate = smack_d_instantiate,
.getprocattr = smack_getprocattr,
.setprocattr = smack_setprocattr,
.unix_stream_connect = smack_unix_stream_connect,
.unix_may_send = smack_unix_may_send,
.socket_post_create = smack_socket_post_create,
.socket_connect = smack_socket_connect,
.socket_sendmsg = smack_socket_sendmsg,
.socket_sock_rcv_skb = smack_socket_sock_rcv_skb,
.socket_getpeersec_stream = smack_socket_getpeersec_stream,
.socket_getpeersec_dgram = smack_socket_getpeersec_dgram,
.sk_alloc_security = smack_sk_alloc_security,
.sk_free_security = smack_sk_free_security,
.sock_graft = smack_sock_graft,
.inet_conn_request = smack_inet_conn_request,
.inet_csk_clone = smack_inet_csk_clone,
/* key management security hooks */
#ifdef CONFIG_KEYS
.key_alloc = smack_key_alloc,
.key_free = smack_key_free,
.key_permission = smack_key_permission,
#endif /* CONFIG_KEYS */
/* Audit hooks */
#ifdef CONFIG_AUDIT
.audit_rule_init = smack_audit_rule_init,
.audit_rule_known = smack_audit_rule_known,
.audit_rule_match = smack_audit_rule_match,
.audit_rule_free = smack_audit_rule_free,
#endif /* CONFIG_AUDIT */
.secid_to_secctx = smack_secid_to_secctx,
.secctx_to_secid = smack_secctx_to_secid,
.release_secctx = smack_release_secctx,
.inode_notifysecctx = smack_inode_notifysecctx,
.inode_setsecctx = smack_inode_setsecctx,
.inode_getsecctx = smack_inode_getsecctx,
};
static __init void init_smack_known_list(void)
{
/*
* Initialize CIPSO locks
*/
spin_lock_init(&smack_known_huh.smk_cipsolock);
spin_lock_init(&smack_known_hat.smk_cipsolock);
spin_lock_init(&smack_known_star.smk_cipsolock);
spin_lock_init(&smack_known_floor.smk_cipsolock);
spin_lock_init(&smack_known_invalid.smk_cipsolock);
spin_lock_init(&smack_known_web.smk_cipsolock);
/*
* Initialize rule list locks
*/
mutex_init(&smack_known_huh.smk_rules_lock);
mutex_init(&smack_known_hat.smk_rules_lock);
mutex_init(&smack_known_floor.smk_rules_lock);
mutex_init(&smack_known_star.smk_rules_lock);
mutex_init(&smack_known_invalid.smk_rules_lock);
mutex_init(&smack_known_web.smk_rules_lock);
/*
* Initialize rule lists
*/
INIT_LIST_HEAD(&smack_known_huh.smk_rules);
INIT_LIST_HEAD(&smack_known_hat.smk_rules);
INIT_LIST_HEAD(&smack_known_star.smk_rules);
INIT_LIST_HEAD(&smack_known_floor.smk_rules);
INIT_LIST_HEAD(&smack_known_invalid.smk_rules);
INIT_LIST_HEAD(&smack_known_web.smk_rules);
/*
* Create the known labels list
*/
list_add(&smack_known_huh.list, &smack_known_list);
list_add(&smack_known_hat.list, &smack_known_list);
list_add(&smack_known_star.list, &smack_known_list);
list_add(&smack_known_floor.list, &smack_known_list);
list_add(&smack_known_invalid.list, &smack_known_list);
list_add(&smack_known_web.list, &smack_known_list);
}
/**
* smack_init - initialize the smack system
*
* Returns 0
*/
static __init int smack_init(void)
{
struct cred *cred;
struct task_smack *tsp;
if (!security_module_enable(&smack_ops))
return 0;
tsp = new_task_smack(smack_known_floor.smk_known,
smack_known_floor.smk_known, GFP_KERNEL);
if (tsp == NULL)
return -ENOMEM;
printk(KERN_INFO "Smack: Initializing.\n");
/*
* Set the security state for the initial task.
*/
cred = (struct cred *) current->cred;
cred->security = tsp;
/* initialize the smack_known_list */
init_smack_known_list();
/*
* Register with LSM
*/
if (register_security(&smack_ops))
panic("smack: Unable to register with kernel.\n");
return 0;
}
/*
* Smack requires early initialization in order to label
* all processes and objects when they are created.
*/
security_initcall(smack_init);
| gpl-2.0 |
santod/android_GE_kernel_htc_m7vzw | arch/powerpc/platforms/83xx/suspend.c | 4491 | 9234 | /*
* MPC83xx suspend support
*
* Author: Scott Wood <scottwood@freescale.com>
*
* Copyright (c) 2006-2007 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/suspend.h>
#include <linux/fsl_devices.h>
#include <linux/of_platform.h>
#include <linux/export.h>
#include <asm/reg.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/mpc6xx.h>
#include <asm/switch_to.h>
#include <sysdev/fsl_soc.h>
#define PMCCR1_NEXT_STATE 0x0C /* Next state for power management */
#define PMCCR1_NEXT_STATE_SHIFT 2
#define PMCCR1_CURR_STATE 0x03 /* Current state for power management*/
#define IMMR_SYSCR_OFFSET 0x100
#define IMMR_RCW_OFFSET 0x900
#define RCW_PCI_HOST 0x80000000
void mpc83xx_enter_deep_sleep(phys_addr_t immrbase);
struct mpc83xx_pmc {
u32 config;
#define PMCCR_DLPEN 2 /* DDR SDRAM low power enable */
#define PMCCR_SLPEN 1 /* System low power enable */
u32 event;
u32 mask;
/* All but PMCI are deep-sleep only */
#define PMCER_GPIO 0x100
#define PMCER_PCI 0x080
#define PMCER_USB 0x040
#define PMCER_ETSEC1 0x020
#define PMCER_ETSEC2 0x010
#define PMCER_TIMER 0x008
#define PMCER_INT1 0x004
#define PMCER_INT2 0x002
#define PMCER_PMCI 0x001
#define PMCER_ALL 0x1FF
/* deep-sleep only */
u32 config1;
#define PMCCR1_USE_STATE 0x80000000
#define PMCCR1_PME_EN 0x00000080
#define PMCCR1_ASSERT_PME 0x00000040
#define PMCCR1_POWER_OFF 0x00000020
/* deep-sleep only */
u32 config2;
};
struct mpc83xx_rcw {
u32 rcwlr;
u32 rcwhr;
};
struct mpc83xx_clock {
u32 spmr;
u32 occr;
u32 sccr;
};
struct mpc83xx_syscr {
__be32 sgprl;
__be32 sgprh;
__be32 spridr;
__be32 :32;
__be32 spcr;
__be32 sicrl;
__be32 sicrh;
};
struct mpc83xx_saved {
u32 sicrl;
u32 sicrh;
u32 sccr;
};
struct pmc_type {
int has_deep_sleep;
};
static struct platform_device *pmc_dev;
static int has_deep_sleep, deep_sleeping;
static int pmc_irq;
static struct mpc83xx_pmc __iomem *pmc_regs;
static struct mpc83xx_clock __iomem *clock_regs;
static struct mpc83xx_syscr __iomem *syscr_regs;
static struct mpc83xx_saved saved_regs;
static int is_pci_agent, wake_from_pci;
static phys_addr_t immrbase;
static int pci_pm_state;
static DECLARE_WAIT_QUEUE_HEAD(agent_wq);
int fsl_deep_sleep(void)
{
return deep_sleeping;
}
EXPORT_SYMBOL(fsl_deep_sleep);
static int mpc83xx_change_state(void)
{
u32 curr_state;
u32 reg_cfg1 = in_be32(&pmc_regs->config1);
if (is_pci_agent) {
pci_pm_state = (reg_cfg1 & PMCCR1_NEXT_STATE) >>
PMCCR1_NEXT_STATE_SHIFT;
curr_state = reg_cfg1 & PMCCR1_CURR_STATE;
if (curr_state != pci_pm_state) {
reg_cfg1 &= ~PMCCR1_CURR_STATE;
reg_cfg1 |= pci_pm_state;
out_be32(&pmc_regs->config1, reg_cfg1);
wake_up(&agent_wq);
return 1;
}
}
return 0;
}
static irqreturn_t pmc_irq_handler(int irq, void *dev_id)
{
u32 event = in_be32(&pmc_regs->event);
int ret = IRQ_NONE;
if (mpc83xx_change_state())
ret = IRQ_HANDLED;
if (event) {
out_be32(&pmc_regs->event, event);
ret = IRQ_HANDLED;
}
return ret;
}
static void mpc83xx_suspend_restore_regs(void)
{
out_be32(&syscr_regs->sicrl, saved_regs.sicrl);
out_be32(&syscr_regs->sicrh, saved_regs.sicrh);
out_be32(&clock_regs->sccr, saved_regs.sccr);
}
static void mpc83xx_suspend_save_regs(void)
{
saved_regs.sicrl = in_be32(&syscr_regs->sicrl);
saved_regs.sicrh = in_be32(&syscr_regs->sicrh);
saved_regs.sccr = in_be32(&clock_regs->sccr);
}
static int mpc83xx_suspend_enter(suspend_state_t state)
{
int ret = -EAGAIN;
/* Don't go to sleep if there's a race where pci_pm_state changes
* between the agent thread checking it and the PM code disabling
* interrupts.
*/
if (wake_from_pci) {
if (pci_pm_state != (deep_sleeping ? 3 : 2))
goto out;
out_be32(&pmc_regs->config1,
in_be32(&pmc_regs->config1) | PMCCR1_PME_EN);
}
/* Put the system into low-power mode and the RAM
* into self-refresh mode once the core goes to
* sleep.
*/
out_be32(&pmc_regs->config, PMCCR_SLPEN | PMCCR_DLPEN);
/* If it has deep sleep (i.e. it's an 831x or compatible),
* disable power to the core upon entering sleep mode. This will
* require going through the boot firmware upon a wakeup event.
*/
if (deep_sleeping) {
mpc83xx_suspend_save_regs();
out_be32(&pmc_regs->mask, PMCER_ALL);
out_be32(&pmc_regs->config1,
in_be32(&pmc_regs->config1) | PMCCR1_POWER_OFF);
enable_kernel_fp();
mpc83xx_enter_deep_sleep(immrbase);
out_be32(&pmc_regs->config1,
in_be32(&pmc_regs->config1) & ~PMCCR1_POWER_OFF);
out_be32(&pmc_regs->mask, PMCER_PMCI);
mpc83xx_suspend_restore_regs();
} else {
out_be32(&pmc_regs->mask, PMCER_PMCI);
mpc6xx_enter_standby();
}
ret = 0;
out:
out_be32(&pmc_regs->config1,
in_be32(&pmc_regs->config1) & ~PMCCR1_PME_EN);
return ret;
}
static void mpc83xx_suspend_end(void)
{
deep_sleeping = 0;
}
static int mpc83xx_suspend_valid(suspend_state_t state)
{
return state == PM_SUSPEND_STANDBY || state == PM_SUSPEND_MEM;
}
static int mpc83xx_suspend_begin(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
deep_sleeping = 0;
return 0;
case PM_SUSPEND_MEM:
if (has_deep_sleep)
deep_sleeping = 1;
return 0;
default:
return -EINVAL;
}
}
static int agent_thread_fn(void *data)
{
while (1) {
wait_event_interruptible(agent_wq, pci_pm_state >= 2);
try_to_freeze();
if (signal_pending(current) || pci_pm_state < 2)
continue;
/* With a preemptible kernel (or SMP), this could race with
* a userspace-driven suspend request. It's probably best
* to avoid mixing the two with such a configuration (or
* else fix it by adding a mutex to state_store that we can
* synchronize with).
*/
wake_from_pci = 1;
pm_suspend(pci_pm_state == 3 ? PM_SUSPEND_MEM :
PM_SUSPEND_STANDBY);
wake_from_pci = 0;
}
return 0;
}
static void mpc83xx_set_agent(void)
{
out_be32(&pmc_regs->config1, PMCCR1_USE_STATE);
out_be32(&pmc_regs->mask, PMCER_PMCI);
kthread_run(agent_thread_fn, NULL, "PCI power mgt");
}
static int mpc83xx_is_pci_agent(void)
{
struct mpc83xx_rcw __iomem *rcw_regs;
int ret;
rcw_regs = ioremap(get_immrbase() + IMMR_RCW_OFFSET,
sizeof(struct mpc83xx_rcw));
if (!rcw_regs)
return -ENOMEM;
ret = !(in_be32(&rcw_regs->rcwhr) & RCW_PCI_HOST);
iounmap(rcw_regs);
return ret;
}
static const struct platform_suspend_ops mpc83xx_suspend_ops = {
.valid = mpc83xx_suspend_valid,
.begin = mpc83xx_suspend_begin,
.enter = mpc83xx_suspend_enter,
.end = mpc83xx_suspend_end,
};
static struct of_device_id pmc_match[];
static int pmc_probe(struct platform_device *ofdev)
{
const struct of_device_id *match;
struct device_node *np = ofdev->dev.of_node;
struct resource res;
struct pmc_type *type;
int ret = 0;
match = of_match_device(pmc_match, &ofdev->dev);
if (!match)
return -EINVAL;
type = match->data;
if (!of_device_is_available(np))
return -ENODEV;
has_deep_sleep = type->has_deep_sleep;
immrbase = get_immrbase();
pmc_dev = ofdev;
is_pci_agent = mpc83xx_is_pci_agent();
if (is_pci_agent < 0)
return is_pci_agent;
ret = of_address_to_resource(np, 0, &res);
if (ret)
return -ENODEV;
pmc_irq = irq_of_parse_and_map(np, 0);
if (pmc_irq != NO_IRQ) {
ret = request_irq(pmc_irq, pmc_irq_handler, IRQF_SHARED,
"pmc", ofdev);
if (ret)
return -EBUSY;
}
pmc_regs = ioremap(res.start, sizeof(struct mpc83xx_pmc));
if (!pmc_regs) {
ret = -ENOMEM;
goto out;
}
ret = of_address_to_resource(np, 1, &res);
if (ret) {
ret = -ENODEV;
goto out_pmc;
}
clock_regs = ioremap(res.start, sizeof(struct mpc83xx_pmc));
if (!clock_regs) {
ret = -ENOMEM;
goto out_pmc;
}
if (has_deep_sleep) {
syscr_regs = ioremap(immrbase + IMMR_SYSCR_OFFSET,
sizeof(*syscr_regs));
if (!syscr_regs) {
ret = -ENOMEM;
goto out_syscr;
}
}
if (is_pci_agent)
mpc83xx_set_agent();
suspend_set_ops(&mpc83xx_suspend_ops);
return 0;
out_syscr:
iounmap(clock_regs);
out_pmc:
iounmap(pmc_regs);
out:
if (pmc_irq != NO_IRQ)
free_irq(pmc_irq, ofdev);
return ret;
}
static int pmc_remove(struct platform_device *ofdev)
{
return -EPERM;
};
static struct pmc_type pmc_types[] = {
{
.has_deep_sleep = 1,
},
{
.has_deep_sleep = 0,
}
};
static struct of_device_id pmc_match[] = {
{
.compatible = "fsl,mpc8313-pmc",
.data = &pmc_types[0],
},
{
.compatible = "fsl,mpc8349-pmc",
.data = &pmc_types[1],
},
{}
};
static struct platform_driver pmc_driver = {
.driver = {
.name = "mpc83xx-pmc",
.owner = THIS_MODULE,
.of_match_table = pmc_match,
},
.probe = pmc_probe,
.remove = pmc_remove
};
static int pmc_init(void)
{
return platform_driver_register(&pmc_driver);
}
module_init(pmc_init);
| gpl-2.0 |
SomethingExplosive/android_kernel_lge_mako | arch/s390/mm/gup.c | 4491 | 5622 | /*
* Lockless get_user_pages_fast for s390
*
* Copyright IBM Corp. 2010
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/vmstat.h>
#include <linux/pagemap.h>
#include <linux/rwsem.h>
#include <asm/pgtable.h>
/*
* The performance critical leaf functions are made noinline otherwise gcc
* inlines everything into a single function which results in too much
* register pressure.
*/
static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long mask;
pte_t *ptep, pte;
struct page *page;
mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
do {
pte = *ptep;
barrier();
if ((pte_val(pte) & mask) != 0)
return 0;
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
if (!page_cache_get_speculative(page))
return 0;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_page(page);
return 0;
}
pages[*nr] = page;
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
return 1;
}
static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long mask, result;
struct page *head, *page, *tail;
int refs;
result = write ? 0 : _SEGMENT_ENTRY_RO;
mask = result | _SEGMENT_ENTRY_INV;
if ((pmd_val(pmd) & mask) != result)
return 0;
VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
refs = 0;
head = pmd_page(pmd);
page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
tail = page;
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
if (!page_cache_add_speculative(head, refs)) {
*nr -= refs;
return 0;
}
if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
*nr -= refs;
while (refs--)
put_page(head);
return 0;
}
/*
* Any tail page need their mapcount reference taken before we
* return.
*/
while (refs--) {
if (PageTail(tail))
get_huge_page_tail(tail);
tail++;
}
return 1;
}
static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long next;
pmd_t *pmdp, pmd;
pmdp = (pmd_t *) pudp;
#ifdef CONFIG_64BIT
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pmdp = (pmd_t *) pud_deref(pud);
pmdp += pmd_index(addr);
#endif
do {
pmd = *pmdp;
barrier();
next = pmd_addr_end(addr, end);
if (pmd_none(pmd))
return 0;
if (unlikely(pmd_huge(pmd))) {
if (!gup_huge_pmd(pmdp, pmd, addr, next,
write, pages, nr))
return 0;
} else if (!gup_pte_range(pmdp, pmd, addr, next,
write, pages, nr))
return 0;
} while (pmdp++, addr = next, addr != end);
return 1;
}
static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long next;
pud_t *pudp, pud;
pudp = (pud_t *) pgdp;
#ifdef CONFIG_64BIT
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
pudp = (pud_t *) pgd_deref(pgd);
pudp += pud_index(addr);
#endif
do {
pud = *pudp;
barrier();
next = pud_addr_end(addr, end);
if (pud_none(pud))
return 0;
if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr))
return 0;
} while (pudp++, addr = next, addr != end);
return 1;
}
/**
* get_user_pages_fast() - pin user pages in memory
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @write: whether pages will be written to
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long.
*
* Attempt to pin user pages in memory without taking mm->mmap_sem.
* If not successful, it will fall back to taking the lock and
* calling get_user_pages().
*
* Returns number of pages pinned. This may be fewer than the number
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno.
*/
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
unsigned long next;
pgd_t *pgdp, pgd;
int nr = 0;
start &= PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (end < start)
goto slow_irqon;
/*
* local_irq_disable() doesn't prevent pagetable teardown, but does
* prevent the pagetables from being freed on s390.
*
* So long as we atomically load page table pointers versus teardown,
* we can follow the address down to the the page and take a ref on it.
*/
local_irq_disable();
pgdp = pgd_offset(mm, addr);
do {
pgd = *pgdp;
barrier();
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
goto slow;
if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
goto slow;
} while (pgdp++, addr = next, addr != end);
local_irq_enable();
VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
return nr;
{
int ret;
slow:
local_irq_enable();
slow_irqon:
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
pages += nr;
down_read(&mm->mmap_sem);
ret = get_user_pages(current, mm, start,
(end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
up_read(&mm->mmap_sem);
/* Have to be a bit careful with return values */
if (nr > 0) {
if (ret < 0)
ret = nr;
else
ret += nr;
}
return ret;
}
}
| gpl-2.0 |
genesi/linux-testing | arch/x86/kernel/cpu/hypervisor.c | 5259 | 2060 | /*
* Common hypervisor code
*
* Copyright (C) 2008, VMware, Inc.
* Author : Alok N Kataria <akataria@vmware.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/module.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
/*
* Hypervisor detect order. This is specified explicitly here because
* some hypervisors might implement compatibility modes for other
* hypervisors and therefore need to be detected in specific sequence.
*/
static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
#ifdef CONFIG_XEN_PVHVM
&x86_hyper_xen_hvm,
#endif
&x86_hyper_vmware,
&x86_hyper_ms_hyperv,
};
const struct hypervisor_x86 *x86_hyper;
EXPORT_SYMBOL(x86_hyper);
static inline void __init
detect_hypervisor_vendor(void)
{
const struct hypervisor_x86 *h, * const *p;
for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) {
h = *p;
if (h->detect()) {
x86_hyper = h;
printk(KERN_INFO "Hypervisor detected: %s\n", h->name);
break;
}
}
}
void __cpuinit init_hypervisor(struct cpuinfo_x86 *c)
{
if (x86_hyper && x86_hyper->set_cpu_features)
x86_hyper->set_cpu_features(c);
}
void __init init_hypervisor_platform(void)
{
detect_hypervisor_vendor();
if (!x86_hyper)
return;
init_hypervisor(&boot_cpu_data);
if (x86_hyper->init_platform)
x86_hyper->init_platform();
}
| gpl-2.0 |
edoko/android_samsung_galaxy_pop | drivers/gpu/drm/vmwgfx/vmwgfx_marker.c | 8587 | 4359 | /**************************************************************************
*
* Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
struct vmw_marker {
struct list_head head;
uint32_t seqno;
struct timespec submitted;
};
void vmw_marker_queue_init(struct vmw_marker_queue *queue)
{
INIT_LIST_HEAD(&queue->head);
queue->lag = ns_to_timespec(0);
getrawmonotonic(&queue->lag_time);
spin_lock_init(&queue->lock);
}
void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
{
struct vmw_marker *marker, *next;
spin_lock(&queue->lock);
list_for_each_entry_safe(marker, next, &queue->head, head) {
kfree(marker);
}
spin_unlock(&queue->lock);
}
int vmw_marker_push(struct vmw_marker_queue *queue,
uint32_t seqno)
{
struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
if (unlikely(!marker))
return -ENOMEM;
marker->seqno = seqno;
getrawmonotonic(&marker->submitted);
spin_lock(&queue->lock);
list_add_tail(&marker->head, &queue->head);
spin_unlock(&queue->lock);
return 0;
}
int vmw_marker_pull(struct vmw_marker_queue *queue,
uint32_t signaled_seqno)
{
struct vmw_marker *marker, *next;
struct timespec now;
bool updated = false;
spin_lock(&queue->lock);
getrawmonotonic(&now);
if (list_empty(&queue->head)) {
queue->lag = ns_to_timespec(0);
queue->lag_time = now;
updated = true;
goto out_unlock;
}
list_for_each_entry_safe(marker, next, &queue->head, head) {
if (signaled_seqno - marker->seqno > (1 << 30))
continue;
queue->lag = timespec_sub(now, marker->submitted);
queue->lag_time = now;
updated = true;
list_del(&marker->head);
kfree(marker);
}
out_unlock:
spin_unlock(&queue->lock);
return (updated) ? 0 : -EBUSY;
}
static struct timespec vmw_timespec_add(struct timespec t1,
struct timespec t2)
{
t1.tv_sec += t2.tv_sec;
t1.tv_nsec += t2.tv_nsec;
if (t1.tv_nsec >= 1000000000L) {
t1.tv_sec += 1;
t1.tv_nsec -= 1000000000L;
}
return t1;
}
static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
{
struct timespec now;
spin_lock(&queue->lock);
getrawmonotonic(&now);
queue->lag = vmw_timespec_add(queue->lag,
timespec_sub(now, queue->lag_time));
queue->lag_time = now;
spin_unlock(&queue->lock);
return queue->lag;
}
static bool vmw_lag_lt(struct vmw_marker_queue *queue,
uint32_t us)
{
struct timespec lag, cond;
cond = ns_to_timespec((s64) us * 1000);
lag = vmw_fifo_lag(queue);
return (timespec_compare(&lag, &cond) < 1);
}
int vmw_wait_lag(struct vmw_private *dev_priv,
struct vmw_marker_queue *queue, uint32_t us)
{
struct vmw_marker *marker;
uint32_t seqno;
int ret;
while (!vmw_lag_lt(queue, us)) {
spin_lock(&queue->lock);
if (list_empty(&queue->head))
seqno = atomic_read(&dev_priv->marker_seq);
else {
marker = list_first_entry(&queue->head,
struct vmw_marker, head);
seqno = marker->seqno;
}
spin_unlock(&queue->lock);
ret = vmw_wait_seqno(dev_priv, false, seqno, true,
3*HZ);
if (unlikely(ret != 0))
return ret;
(void) vmw_marker_pull(queue, seqno);
}
return 0;
}
| gpl-2.0 |
NormandyCM11/android_kernel_nokia_normandy | fs/afs/volume.c | 11659 | 10135 | /* AFS volume management
*
* Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
#include "internal.h"
static const char *afs_voltypes[] = { "R/W", "R/O", "BAK" };
/*
* lookup a volume by name
* - this can be one of the following:
* "%[cell:]volume[.]" R/W volume
* "#[cell:]volume[.]" R/O or R/W volume (rwparent=0),
* or R/W (rwparent=1) volume
* "%[cell:]volume.readonly" R/O volume
* "#[cell:]volume.readonly" R/O volume
* "%[cell:]volume.backup" Backup volume
* "#[cell:]volume.backup" Backup volume
*
* The cell name is optional, and defaults to the current cell.
*
* See "The Rules of Mount Point Traversal" in Chapter 5 of the AFS SysAdmin
* Guide
* - Rule 1: Explicit type suffix forces access of that type or nothing
* (no suffix, then use Rule 2 & 3)
* - Rule 2: If parent volume is R/O, then mount R/O volume by preference, R/W
* if not available
* - Rule 3: If parent volume is R/W, then only mount R/W volume unless
* explicitly told otherwise
*/
struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
{
struct afs_vlocation *vlocation = NULL;
struct afs_volume *volume = NULL;
struct afs_server *server = NULL;
char srvtmask;
int ret, loop;
_enter("{%*.*s,%d}",
params->volnamesz, params->volnamesz, params->volname, params->rwpath);
/* lookup the volume location record */
vlocation = afs_vlocation_lookup(params->cell, params->key,
params->volname, params->volnamesz);
if (IS_ERR(vlocation)) {
ret = PTR_ERR(vlocation);
vlocation = NULL;
goto error;
}
/* make the final decision on the type we want */
ret = -ENOMEDIUM;
if (params->force && !(vlocation->vldb.vidmask & (1 << params->type)))
goto error;
srvtmask = 0;
for (loop = 0; loop < vlocation->vldb.nservers; loop++)
srvtmask |= vlocation->vldb.srvtmask[loop];
if (params->force) {
if (!(srvtmask & (1 << params->type)))
goto error;
} else if (srvtmask & AFS_VOL_VTM_RO) {
params->type = AFSVL_ROVOL;
} else if (srvtmask & AFS_VOL_VTM_RW) {
params->type = AFSVL_RWVOL;
} else {
goto error;
}
down_write(¶ms->cell->vl_sem);
/* is the volume already active? */
if (vlocation->vols[params->type]) {
/* yes - re-use it */
volume = vlocation->vols[params->type];
afs_get_volume(volume);
goto success;
}
/* create a new volume record */
_debug("creating new volume record");
ret = -ENOMEM;
volume = kzalloc(sizeof(struct afs_volume), GFP_KERNEL);
if (!volume)
goto error_up;
atomic_set(&volume->usage, 1);
volume->type = params->type;
volume->type_force = params->force;
volume->cell = params->cell;
volume->vid = vlocation->vldb.vid[params->type];
ret = bdi_setup_and_register(&volume->bdi, "afs", BDI_CAP_MAP_COPY);
if (ret)
goto error_bdi;
init_rwsem(&volume->server_sem);
/* look up all the applicable server records */
for (loop = 0; loop < 8; loop++) {
if (vlocation->vldb.srvtmask[loop] & (1 << volume->type)) {
server = afs_lookup_server(
volume->cell, &vlocation->vldb.servers[loop]);
if (IS_ERR(server)) {
ret = PTR_ERR(server);
goto error_discard;
}
volume->servers[volume->nservers] = server;
volume->nservers++;
}
}
/* attach the cache and volume location */
#ifdef CONFIG_AFS_FSCACHE
volume->cache = fscache_acquire_cookie(vlocation->cache,
&afs_volume_cache_index_def,
volume);
#endif
afs_get_vlocation(vlocation);
volume->vlocation = vlocation;
vlocation->vols[volume->type] = volume;
success:
_debug("kAFS selected %s volume %08x",
afs_voltypes[volume->type], volume->vid);
up_write(¶ms->cell->vl_sem);
afs_put_vlocation(vlocation);
_leave(" = %p", volume);
return volume;
/* clean up */
error_up:
up_write(¶ms->cell->vl_sem);
error:
afs_put_vlocation(vlocation);
_leave(" = %d", ret);
return ERR_PTR(ret);
error_discard:
bdi_destroy(&volume->bdi);
error_bdi:
up_write(¶ms->cell->vl_sem);
for (loop = volume->nservers - 1; loop >= 0; loop--)
afs_put_server(volume->servers[loop]);
kfree(volume);
goto error;
}
/*
* destroy a volume record
*/
void afs_put_volume(struct afs_volume *volume)
{
struct afs_vlocation *vlocation;
int loop;
if (!volume)
return;
_enter("%p", volume);
ASSERTCMP(atomic_read(&volume->usage), >, 0);
vlocation = volume->vlocation;
/* to prevent a race, the decrement and the dequeue must be effectively
* atomic */
down_write(&vlocation->cell->vl_sem);
if (likely(!atomic_dec_and_test(&volume->usage))) {
up_write(&vlocation->cell->vl_sem);
_leave("");
return;
}
vlocation->vols[volume->type] = NULL;
up_write(&vlocation->cell->vl_sem);
/* finish cleaning up the volume */
#ifdef CONFIG_AFS_FSCACHE
fscache_relinquish_cookie(volume->cache, 0);
#endif
afs_put_vlocation(vlocation);
for (loop = volume->nservers - 1; loop >= 0; loop--)
afs_put_server(volume->servers[loop]);
bdi_destroy(&volume->bdi);
kfree(volume);
_leave(" [destroyed]");
}
/*
* pick a server to use to try accessing this volume
* - returns with an elevated usage count on the server chosen
*/
struct afs_server *afs_volume_pick_fileserver(struct afs_vnode *vnode)
{
struct afs_volume *volume = vnode->volume;
struct afs_server *server;
int ret, state, loop;
_enter("%s", volume->vlocation->vldb.name);
/* stick with the server we're already using if we can */
if (vnode->server && vnode->server->fs_state == 0) {
afs_get_server(vnode->server);
_leave(" = %p [current]", vnode->server);
return vnode->server;
}
down_read(&volume->server_sem);
/* handle the no-server case */
if (volume->nservers == 0) {
ret = volume->rjservers ? -ENOMEDIUM : -ESTALE;
up_read(&volume->server_sem);
_leave(" = %d [no servers]", ret);
return ERR_PTR(ret);
}
/* basically, just search the list for the first live server and use
* that */
ret = 0;
for (loop = 0; loop < volume->nservers; loop++) {
server = volume->servers[loop];
state = server->fs_state;
_debug("consider %d [%d]", loop, state);
switch (state) {
/* found an apparently healthy server */
case 0:
afs_get_server(server);
up_read(&volume->server_sem);
_leave(" = %p (picked %08x)",
server, ntohl(server->addr.s_addr));
return server;
case -ENETUNREACH:
if (ret == 0)
ret = state;
break;
case -EHOSTUNREACH:
if (ret == 0 ||
ret == -ENETUNREACH)
ret = state;
break;
case -ECONNREFUSED:
if (ret == 0 ||
ret == -ENETUNREACH ||
ret == -EHOSTUNREACH)
ret = state;
break;
default:
case -EREMOTEIO:
if (ret == 0 ||
ret == -ENETUNREACH ||
ret == -EHOSTUNREACH ||
ret == -ECONNREFUSED)
ret = state;
break;
}
}
/* no available servers
* - TODO: handle the no active servers case better
*/
up_read(&volume->server_sem);
_leave(" = %d", ret);
return ERR_PTR(ret);
}
/*
* release a server after use
* - releases the ref on the server struct that was acquired by picking
* - records result of using a particular server to access a volume
* - return 0 to try again, 1 if okay or to issue error
* - the caller must release the server struct if result was 0
*/
int afs_volume_release_fileserver(struct afs_vnode *vnode,
struct afs_server *server,
int result)
{
struct afs_volume *volume = vnode->volume;
unsigned loop;
_enter("%s,%08x,%d",
volume->vlocation->vldb.name, ntohl(server->addr.s_addr),
result);
switch (result) {
/* success */
case 0:
server->fs_act_jif = jiffies;
server->fs_state = 0;
_leave("");
return 1;
/* the fileserver denied all knowledge of the volume */
case -ENOMEDIUM:
server->fs_act_jif = jiffies;
down_write(&volume->server_sem);
/* firstly, find where the server is in the active list (if it
* is) */
for (loop = 0; loop < volume->nservers; loop++)
if (volume->servers[loop] == server)
goto present;
/* no longer there - may have been discarded by another op */
goto try_next_server_upw;
present:
volume->nservers--;
memmove(&volume->servers[loop],
&volume->servers[loop + 1],
sizeof(volume->servers[loop]) *
(volume->nservers - loop));
volume->servers[volume->nservers] = NULL;
afs_put_server(server);
volume->rjservers++;
if (volume->nservers > 0)
/* another server might acknowledge its existence */
goto try_next_server_upw;
/* handle the case where all the fileservers have rejected the
* volume
* - TODO: try asking the fileservers for volume information
* - TODO: contact the VL server again to see if the volume is
* no longer registered
*/
up_write(&volume->server_sem);
afs_put_server(server);
_leave(" [completely rejected]");
return 1;
/* problem reaching the server */
case -ENETUNREACH:
case -EHOSTUNREACH:
case -ECONNREFUSED:
case -ETIME:
case -ETIMEDOUT:
case -EREMOTEIO:
/* mark the server as dead
* TODO: vary dead timeout depending on error
*/
spin_lock(&server->fs_lock);
if (!server->fs_state) {
server->fs_dead_jif = jiffies + HZ * 10;
server->fs_state = result;
printk("kAFS: SERVER DEAD state=%d\n", result);
}
spin_unlock(&server->fs_lock);
goto try_next_server;
/* miscellaneous error */
default:
server->fs_act_jif = jiffies;
case -ENOMEM:
case -ENONET:
/* tell the caller to accept the result */
afs_put_server(server);
_leave(" [local failure]");
return 1;
}
/* tell the caller to loop around and try the next server */
try_next_server_upw:
up_write(&volume->server_sem);
try_next_server:
afs_put_server(server);
_leave(" [try next server]");
return 0;
}
| gpl-2.0 |
StanislavPodolsky/RT5350 | linux-3.8.7/drivers/parport/ieee1284.c | 12683 | 23096 | /*
* IEEE-1284 implementation for parport.
*
* Authors: Phil Blundell <philb@gnu.org>
* Carsten Gross <carsten@sol.wohnheim.uni-ulm.de>
* Jose Renau <renau@acm.org>
* Tim Waugh <tim@cyberelk.demon.co.uk> (largely rewritten)
*
* This file is responsible for IEEE 1284 negotiation, and for handing
* read/write requests to low-level drivers.
*
* Any part of this program may be used in documents licensed under
* the GNU Free Documentation License, Version 1.1 or any later version
* published by the Free Software Foundation.
*
* Various hacks, Fred Barnes <frmb2@ukc.ac.uk>, 04/2000
*/
#include <linux/module.h>
#include <linux/threads.h>
#include <linux/parport.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/sched.h>
#undef DEBUG /* undef me for production */
#ifdef CONFIG_LP_CONSOLE
#undef DEBUG /* Don't want a garbled console */
#endif
#ifdef DEBUG
#define DPRINTK(stuff...) printk (stuff)
#else
#define DPRINTK(stuff...)
#endif
/* Make parport_wait_peripheral wake up.
* It will be useful to call this from an interrupt handler. */
static void parport_ieee1284_wakeup (struct parport *port)
{
up (&port->physport->ieee1284.irq);
}
static struct parport *port_from_cookie[PARPORT_MAX];
static void timeout_waiting_on_port (unsigned long cookie)
{
parport_ieee1284_wakeup (port_from_cookie[cookie % PARPORT_MAX]);
}
/**
* parport_wait_event - wait for an event on a parallel port
* @port: port to wait on
* @timeout: time to wait (in jiffies)
*
* This function waits for up to @timeout jiffies for an
* interrupt to occur on a parallel port. If the port timeout is
* set to zero, it returns immediately.
*
* If an interrupt occurs before the timeout period elapses, this
* function returns zero immediately. If it times out, it returns
* one. An error code less than zero indicates an error (most
* likely a pending signal), and the calling code should finish
* what it's doing as soon as it can.
*/
int parport_wait_event (struct parport *port, signed long timeout)
{
int ret;
struct timer_list timer;
if (!port->physport->cad->timeout)
/* Zero timeout is special, and we can't down() the
semaphore. */
return 1;
init_timer_on_stack(&timer);
timer.expires = jiffies + timeout;
timer.function = timeout_waiting_on_port;
port_from_cookie[port->number % PARPORT_MAX] = port;
timer.data = port->number;
add_timer (&timer);
ret = down_interruptible (&port->physport->ieee1284.irq);
if (!del_timer_sync(&timer) && !ret)
/* Timed out. */
ret = 1;
destroy_timer_on_stack(&timer);
return ret;
}
/**
* parport_poll_peripheral - poll status lines
* @port: port to watch
* @mask: status lines to watch
* @result: desired values of chosen status lines
* @usec: timeout
*
* This function busy-waits until the masked status lines have
* the desired values, or until the timeout period elapses. The
* @mask and @result parameters are bitmasks, with the bits
* defined by the constants in parport.h: %PARPORT_STATUS_BUSY,
* and so on.
*
* This function does not call schedule(); instead it busy-waits
* using udelay(). It currently has a resolution of 5usec.
*
* If the status lines take on the desired values before the
* timeout period elapses, parport_poll_peripheral() returns zero
* immediately. A return value greater than zero indicates
* a timeout. An error code (less than zero) indicates an error,
* most likely a signal that arrived, and the caller should
* finish what it is doing as soon as possible.
*/
int parport_poll_peripheral(struct parport *port,
unsigned char mask,
unsigned char result,
int usec)
{
/* Zero return code is success, >0 is timeout. */
int count = usec / 5 + 2;
int i;
unsigned char status;
for (i = 0; i < count; i++) {
status = parport_read_status (port);
if ((status & mask) == result)
return 0;
if (signal_pending (current))
return -EINTR;
if (need_resched())
break;
if (i >= 2)
udelay (5);
}
return 1;
}
/**
* parport_wait_peripheral - wait for status lines to change in 35ms
* @port: port to watch
* @mask: status lines to watch
* @result: desired values of chosen status lines
*
* This function waits until the masked status lines have the
* desired values, or until 35ms have elapsed (see IEEE 1284-1994
* page 24 to 25 for why this value in particular is hardcoded).
* The @mask and @result parameters are bitmasks, with the bits
* defined by the constants in parport.h: %PARPORT_STATUS_BUSY,
* and so on.
*
* The port is polled quickly to start off with, in anticipation
* of a fast response from the peripheral. This fast polling
* time is configurable (using /proc), and defaults to 500usec.
* If the timeout for this port (see parport_set_timeout()) is
* zero, the fast polling time is 35ms, and this function does
* not call schedule().
*
* If the timeout for this port is non-zero, after the fast
* polling fails it uses parport_wait_event() to wait for up to
* 10ms, waking up if an interrupt occurs.
*/
int parport_wait_peripheral(struct parport *port,
unsigned char mask,
unsigned char result)
{
int ret;
int usec;
unsigned long deadline;
unsigned char status;
usec = port->physport->spintime; /* usecs of fast polling */
if (!port->physport->cad->timeout)
/* A zero timeout is "special": busy wait for the
entire 35ms. */
usec = 35000;
/* Fast polling.
*
* This should be adjustable.
* How about making a note (in the device structure) of how long
* it takes, so we know for next time?
*/
ret = parport_poll_peripheral (port, mask, result, usec);
if (ret != 1)
return ret;
if (!port->physport->cad->timeout)
/* We may be in an interrupt handler, so we can't poll
* slowly anyway. */
return 1;
/* 40ms of slow polling. */
deadline = jiffies + msecs_to_jiffies(40);
while (time_before (jiffies, deadline)) {
if (signal_pending (current))
return -EINTR;
/* Wait for 10ms (or until an interrupt occurs if
* the handler is set) */
if ((ret = parport_wait_event (port, msecs_to_jiffies(10))) < 0)
return ret;
status = parport_read_status (port);
if ((status & mask) == result)
return 0;
if (!ret) {
/* parport_wait_event didn't time out, but the
* peripheral wasn't actually ready either.
* Wait for another 10ms. */
schedule_timeout_interruptible(msecs_to_jiffies(10));
}
}
return 1;
}
#ifdef CONFIG_PARPORT_1284
/* Terminate a negotiated mode. */
static void parport_ieee1284_terminate (struct parport *port)
{
int r;
port = port->physport;
/* EPP terminates differently. */
switch (port->ieee1284.mode) {
case IEEE1284_MODE_EPP:
case IEEE1284_MODE_EPPSL:
case IEEE1284_MODE_EPPSWE:
/* Terminate from EPP mode. */
/* Event 68: Set nInit low */
parport_frob_control (port, PARPORT_CONTROL_INIT, 0);
udelay (50);
/* Event 69: Set nInit high, nSelectIn low */
parport_frob_control (port,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_INIT,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_INIT);
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
case IEEE1284_MODE_ECPSWE:
/* In ECP we can only terminate from fwd idle phase. */
if (port->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
/* Event 47: Set nInit high */
parport_frob_control (port,
PARPORT_CONTROL_INIT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_INIT
| PARPORT_CONTROL_AUTOFD);
/* Event 49: PError goes high */
r = parport_wait_peripheral (port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r)
DPRINTK (KERN_INFO "%s: Timeout at event 49\n",
port->name);
parport_data_forward (port);
DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
port->name);
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
}
/* fall-though.. */
default:
/* Terminate from all other modes. */
/* Event 22: Set nSelectIn low, nAutoFd high */
parport_frob_control (port,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_SELECT);
/* Event 24: nAck goes low */
r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0);
if (r)
DPRINTK (KERN_INFO "%s: Timeout at event 24\n",
port->name);
/* Event 25: Set nAutoFd low */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
/* Event 27: nAck goes high */
r = parport_wait_peripheral (port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK);
if (r)
DPRINTK (KERN_INFO "%s: Timeout at event 27\n",
port->name);
/* Event 29: Set nAutoFd high */
parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
}
port->ieee1284.mode = IEEE1284_MODE_COMPAT;
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
DPRINTK (KERN_DEBUG "%s: In compatibility (forward idle) mode\n",
port->name);
}
#endif /* IEEE1284 support */
/**
* parport_negotiate - negotiate an IEEE 1284 mode
* @port: port to use
* @mode: mode to negotiate to
*
* Use this to negotiate to a particular IEEE 1284 transfer mode.
* The @mode parameter should be one of the constants in
* parport.h starting %IEEE1284_MODE_xxx.
*
* The return value is 0 if the peripheral has accepted the
* negotiation to the mode specified, -1 if the peripheral is not
* IEEE 1284 compliant (or not present), or 1 if the peripheral
* has rejected the negotiation.
*/
int parport_negotiate (struct parport *port, int mode)
{
#ifndef CONFIG_PARPORT_1284
if (mode == IEEE1284_MODE_COMPAT)
return 0;
printk (KERN_ERR "parport: IEEE1284 not supported in this kernel\n");
return -1;
#else
int m = mode & ~IEEE1284_ADDR;
int r;
unsigned char xflag;
port = port->physport;
/* Is there anything to do? */
if (port->ieee1284.mode == mode)
return 0;
/* Is the difference just an address-or-not bit? */
if ((port->ieee1284.mode & ~IEEE1284_ADDR) == (mode & ~IEEE1284_ADDR)){
port->ieee1284.mode = mode;
return 0;
}
/* Go to compatibility forward idle mode */
if (port->ieee1284.mode != IEEE1284_MODE_COMPAT)
parport_ieee1284_terminate (port);
if (mode == IEEE1284_MODE_COMPAT)
/* Compatibility mode: no negotiation. */
return 0;
switch (mode) {
case IEEE1284_MODE_ECPSWE:
m = IEEE1284_MODE_ECP;
break;
case IEEE1284_MODE_EPPSL:
case IEEE1284_MODE_EPPSWE:
m = IEEE1284_MODE_EPP;
break;
case IEEE1284_MODE_BECP:
return -ENOSYS; /* FIXME (implement BECP) */
}
if (mode & IEEE1284_EXT_LINK)
m = 1<<7; /* request extensibility link */
port->ieee1284.phase = IEEE1284_PH_NEGOTIATION;
/* Start off with nStrobe and nAutoFd high, and nSelectIn low */
parport_frob_control (port,
PARPORT_CONTROL_STROBE
| PARPORT_CONTROL_AUTOFD
| PARPORT_CONTROL_SELECT,
PARPORT_CONTROL_SELECT);
udelay(1);
/* Event 0: Set data */
parport_data_forward (port);
parport_write_data (port, m);
udelay (400); /* Shouldn't need to wait this long. */
/* Event 1: Set nSelectIn high, nAutoFd low */
parport_frob_control (port,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
/* Event 2: PError, Select, nFault go high, nAck goes low */
if (parport_wait_peripheral (port,
PARPORT_STATUS_ERROR
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_ACK,
PARPORT_STATUS_ERROR
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_PAPEROUT)) {
/* Timeout */
parport_frob_control (port,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_SELECT);
DPRINTK (KERN_DEBUG
"%s: Peripheral not IEEE1284 compliant (0x%02X)\n",
port->name, parport_read_status (port));
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
return -1; /* Not IEEE1284 compliant */
}
/* Event 3: Set nStrobe low */
parport_frob_control (port,
PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
/* Event 4: Set nStrobe and nAutoFd high */
udelay (5);
parport_frob_control (port,
PARPORT_CONTROL_STROBE
| PARPORT_CONTROL_AUTOFD,
0);
/* Event 6: nAck goes high */
if (parport_wait_peripheral (port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* This shouldn't really happen with a compliant device. */
DPRINTK (KERN_DEBUG
"%s: Mode 0x%02x not supported? (0x%02x)\n",
port->name, mode, port->ops->read_status (port));
parport_ieee1284_terminate (port);
return 1;
}
xflag = parport_read_status (port) & PARPORT_STATUS_SELECT;
/* xflag should be high for all modes other than nibble (0). */
if (mode && !xflag) {
/* Mode not supported. */
DPRINTK (KERN_DEBUG "%s: Mode 0x%02x rejected by peripheral\n",
port->name, mode);
parport_ieee1284_terminate (port);
return 1;
}
/* More to do if we've requested extensibility link. */
if (mode & IEEE1284_EXT_LINK) {
m = mode & 0x7f;
udelay (1);
parport_write_data (port, m);
udelay (1);
/* Event 51: Set nStrobe low */
parport_frob_control (port,
PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
/* Event 52: nAck goes low */
if (parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0)) {
/* This peripheral is _very_ slow. */
DPRINTK (KERN_DEBUG
"%s: Event 52 didn't happen\n",
port->name);
parport_ieee1284_terminate (port);
return 1;
}
/* Event 53: Set nStrobe high */
parport_frob_control (port,
PARPORT_CONTROL_STROBE,
0);
/* Event 55: nAck goes high */
if (parport_wait_peripheral (port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* This shouldn't really happen with a compliant
* device. */
DPRINTK (KERN_DEBUG
"%s: Mode 0x%02x not supported? (0x%02x)\n",
port->name, mode,
port->ops->read_status (port));
parport_ieee1284_terminate (port);
return 1;
}
/* Event 54: Peripheral sets XFlag to reflect support */
xflag = parport_read_status (port) & PARPORT_STATUS_SELECT;
/* xflag should be high. */
if (!xflag) {
/* Extended mode not supported. */
DPRINTK (KERN_DEBUG "%s: Extended mode 0x%02x not "
"supported\n", port->name, mode);
parport_ieee1284_terminate (port);
return 1;
}
/* Any further setup is left to the caller. */
}
/* Mode is supported */
DPRINTK (KERN_DEBUG "%s: In mode 0x%02x\n", port->name, mode);
port->ieee1284.mode = mode;
/* But ECP is special */
if (!(mode & IEEE1284_EXT_LINK) && (m & IEEE1284_MODE_ECP)) {
port->ieee1284.phase = IEEE1284_PH_ECP_SETUP;
/* Event 30: Set nAutoFd low */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
/* Event 31: PError goes high. */
r = parport_wait_peripheral (port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r) {
DPRINTK (KERN_INFO "%s: Timeout at event 31\n",
port->name);
}
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
port->name);
} else switch (mode) {
case IEEE1284_MODE_NIBBLE:
case IEEE1284_MODE_BYTE:
port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
break;
default:
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
}
return 0;
#endif /* IEEE1284 support */
}
/* Acknowledge that the peripheral has data available.
* Events 18-20, in order to get from Reverse Idle phase
* to Host Busy Data Available.
* This will most likely be called from an interrupt.
* Returns zero if data was available.
*/
#ifdef CONFIG_PARPORT_1284
static int parport_ieee1284_ack_data_avail (struct parport *port)
{
if (parport_read_status (port) & PARPORT_STATUS_ERROR)
/* Event 18 didn't happen. */
return -1;
/* Event 20: nAutoFd goes high. */
port->ops->frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
return 0;
}
#endif /* IEEE1284 support */
/* Handle an interrupt. */
void parport_ieee1284_interrupt (void *handle)
{
struct parport *port = handle;
parport_ieee1284_wakeup (port);
#ifdef CONFIG_PARPORT_1284
if (port->ieee1284.phase == IEEE1284_PH_REV_IDLE) {
/* An interrupt in this phase means that data
* is now available. */
DPRINTK (KERN_DEBUG "%s: Data available\n", port->name);
parport_ieee1284_ack_data_avail (port);
}
#endif /* IEEE1284 support */
}
/**
* parport_write - write a block of data to a parallel port
* @port: port to write to
* @buffer: data buffer (in kernel space)
* @len: number of bytes of data to transfer
*
* This will write up to @len bytes of @buffer to the port
* specified, using the IEEE 1284 transfer mode most recently
* negotiated to (using parport_negotiate()), as long as that
* mode supports forward transfers (host to peripheral).
*
* It is the caller's responsibility to ensure that the first
* @len bytes of @buffer are valid.
*
* This function returns the number of bytes transferred (if zero
* or positive), or else an error code.
*/
ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
{
#ifndef CONFIG_PARPORT_1284
return port->ops->compat_write_data (port, buffer, len, 0);
#else
ssize_t retval;
int mode = port->ieee1284.mode;
int addr = mode & IEEE1284_ADDR;
size_t (*fn) (struct parport *, const void *, size_t, int);
/* Ignore the device-ID-request bit and the address bit. */
mode &= ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
/* Use the mode we're in. */
switch (mode) {
case IEEE1284_MODE_NIBBLE:
case IEEE1284_MODE_BYTE:
parport_negotiate (port, IEEE1284_MODE_COMPAT);
case IEEE1284_MODE_COMPAT:
DPRINTK (KERN_DEBUG "%s: Using compatibility mode\n",
port->name);
fn = port->ops->compat_write_data;
break;
case IEEE1284_MODE_EPP:
DPRINTK (KERN_DEBUG "%s: Using EPP mode\n", port->name);
if (addr) {
fn = port->ops->epp_write_addr;
} else {
fn = port->ops->epp_write_data;
}
break;
case IEEE1284_MODE_EPPSWE:
DPRINTK (KERN_DEBUG "%s: Using software-emulated EPP mode\n",
port->name);
if (addr) {
fn = parport_ieee1284_epp_write_addr;
} else {
fn = parport_ieee1284_epp_write_data;
}
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
DPRINTK (KERN_DEBUG "%s: Using ECP mode\n", port->name);
if (addr) {
fn = port->ops->ecp_write_addr;
} else {
fn = port->ops->ecp_write_data;
}
break;
case IEEE1284_MODE_ECPSWE:
DPRINTK (KERN_DEBUG "%s: Using software-emulated ECP mode\n",
port->name);
/* The caller has specified that it must be emulated,
* even if we have ECP hardware! */
if (addr) {
fn = parport_ieee1284_ecp_write_addr;
} else {
fn = parport_ieee1284_ecp_write_data;
}
break;
default:
DPRINTK (KERN_DEBUG "%s: Unknown mode 0x%02x\n", port->name,
port->ieee1284.mode);
return -ENOSYS;
}
retval = (*fn) (port, buffer, len, 0);
DPRINTK (KERN_DEBUG "%s: wrote %d/%d bytes\n", port->name, retval, len);
return retval;
#endif /* IEEE1284 support */
}
/**
* parport_read - read a block of data from a parallel port
* @port: port to read from
* @buffer: data buffer (in kernel space)
* @len: number of bytes of data to transfer
*
* This will read up to @len bytes of @buffer to the port
* specified, using the IEEE 1284 transfer mode most recently
* negotiated to (using parport_negotiate()), as long as that
* mode supports reverse transfers (peripheral to host).
*
* It is the caller's responsibility to ensure that the first
* @len bytes of @buffer are available to write to.
*
* This function returns the number of bytes transferred (if zero
* or positive), or else an error code.
*/
ssize_t parport_read (struct parport *port, void *buffer, size_t len)
{
#ifndef CONFIG_PARPORT_1284
printk (KERN_ERR "parport: IEEE1284 not supported in this kernel\n");
return -ENODEV;
#else
int mode = port->physport->ieee1284.mode;
int addr = mode & IEEE1284_ADDR;
size_t (*fn) (struct parport *, void *, size_t, int);
/* Ignore the device-ID-request bit and the address bit. */
mode &= ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
/* Use the mode we're in. */
switch (mode) {
case IEEE1284_MODE_COMPAT:
/* if we can tri-state use BYTE mode instead of NIBBLE mode,
* if that fails, revert to NIBBLE mode -- ought to store somewhere
* the device's ability to do BYTE mode reverse transfers, so we don't
* end up needlessly calling negotiate(BYTE) repeately.. (fb)
*/
if ((port->physport->modes & PARPORT_MODE_TRISTATE) &&
!parport_negotiate (port, IEEE1284_MODE_BYTE)) {
/* got into BYTE mode OK */
DPRINTK (KERN_DEBUG "%s: Using byte mode\n", port->name);
fn = port->ops->byte_read_data;
break;
}
if (parport_negotiate (port, IEEE1284_MODE_NIBBLE)) {
return -EIO;
}
/* fall through to NIBBLE */
case IEEE1284_MODE_NIBBLE:
DPRINTK (KERN_DEBUG "%s: Using nibble mode\n", port->name);
fn = port->ops->nibble_read_data;
break;
case IEEE1284_MODE_BYTE:
DPRINTK (KERN_DEBUG "%s: Using byte mode\n", port->name);
fn = port->ops->byte_read_data;
break;
case IEEE1284_MODE_EPP:
DPRINTK (KERN_DEBUG "%s: Using EPP mode\n", port->name);
if (addr) {
fn = port->ops->epp_read_addr;
} else {
fn = port->ops->epp_read_data;
}
break;
case IEEE1284_MODE_EPPSWE:
DPRINTK (KERN_DEBUG "%s: Using software-emulated EPP mode\n",
port->name);
if (addr) {
fn = parport_ieee1284_epp_read_addr;
} else {
fn = parport_ieee1284_epp_read_data;
}
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
DPRINTK (KERN_DEBUG "%s: Using ECP mode\n", port->name);
fn = port->ops->ecp_read_data;
break;
case IEEE1284_MODE_ECPSWE:
DPRINTK (KERN_DEBUG "%s: Using software-emulated ECP mode\n",
port->name);
fn = parport_ieee1284_ecp_read_data;
break;
default:
DPRINTK (KERN_DEBUG "%s: Unknown mode 0x%02x\n", port->name,
port->physport->ieee1284.mode);
return -ENOSYS;
}
return (*fn) (port, buffer, len, 0);
#endif /* IEEE1284 support */
}
/**
* parport_set_timeout - set the inactivity timeout for a device
* @dev: device on a port
* @inactivity: inactivity timeout (in jiffies)
*
* This sets the inactivity timeout for a particular device on a
* port. This affects functions like parport_wait_peripheral().
* The special value 0 means not to call schedule() while dealing
* with this device.
*
* The return value is the previous inactivity timeout.
*
* Any callers of parport_wait_event() for this device are woken
* up.
*/
long parport_set_timeout (struct pardevice *dev, long inactivity)
{
long int old = dev->timeout;
dev->timeout = inactivity;
if (dev->port->physport->cad == dev)
parport_ieee1284_wakeup (dev->port);
return old;
}
/* Exported symbols for modules. */
EXPORT_SYMBOL(parport_negotiate);
EXPORT_SYMBOL(parport_write);
EXPORT_SYMBOL(parport_read);
EXPORT_SYMBOL(parport_wait_peripheral);
EXPORT_SYMBOL(parport_wait_event);
EXPORT_SYMBOL(parport_set_timeout);
EXPORT_SYMBOL(parport_ieee1284_interrupt);
| gpl-2.0 |
rukin5197/android_kernel_lge_m3s | arch/microblaze/kernel/asm-offsets.c | 13451 | 5153 | /*
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/thread_info.h>
#include <linux/kbuild.h>
#include <asm/cpuinfo.h>
int main(int argc, char *argv[])
{
/* struct pt_regs */
DEFINE(PT_SIZE, sizeof(struct pt_regs));
DEFINE(PT_MSR, offsetof(struct pt_regs, msr));
DEFINE(PT_EAR, offsetof(struct pt_regs, ear));
DEFINE(PT_ESR, offsetof(struct pt_regs, esr));
DEFINE(PT_FSR, offsetof(struct pt_regs, fsr));
DEFINE(PT_PC, offsetof(struct pt_regs, pc));
DEFINE(PT_R0, offsetof(struct pt_regs, r0));
DEFINE(PT_R1, offsetof(struct pt_regs, r1));
DEFINE(PT_R2, offsetof(struct pt_regs, r2));
DEFINE(PT_R3, offsetof(struct pt_regs, r3));
DEFINE(PT_R4, offsetof(struct pt_regs, r4));
DEFINE(PT_R5, offsetof(struct pt_regs, r5));
DEFINE(PT_R6, offsetof(struct pt_regs, r6));
DEFINE(PT_R7, offsetof(struct pt_regs, r7));
DEFINE(PT_R8, offsetof(struct pt_regs, r8));
DEFINE(PT_R9, offsetof(struct pt_regs, r9));
DEFINE(PT_R10, offsetof(struct pt_regs, r10));
DEFINE(PT_R11, offsetof(struct pt_regs, r11));
DEFINE(PT_R12, offsetof(struct pt_regs, r12));
DEFINE(PT_R13, offsetof(struct pt_regs, r13));
DEFINE(PT_R14, offsetof(struct pt_regs, r14));
DEFINE(PT_R15, offsetof(struct pt_regs, r15));
DEFINE(PT_R16, offsetof(struct pt_regs, r16));
DEFINE(PT_R17, offsetof(struct pt_regs, r17));
DEFINE(PT_R18, offsetof(struct pt_regs, r18));
DEFINE(PT_R19, offsetof(struct pt_regs, r19));
DEFINE(PT_R20, offsetof(struct pt_regs, r20));
DEFINE(PT_R21, offsetof(struct pt_regs, r21));
DEFINE(PT_R22, offsetof(struct pt_regs, r22));
DEFINE(PT_R23, offsetof(struct pt_regs, r23));
DEFINE(PT_R24, offsetof(struct pt_regs, r24));
DEFINE(PT_R25, offsetof(struct pt_regs, r25));
DEFINE(PT_R26, offsetof(struct pt_regs, r26));
DEFINE(PT_R27, offsetof(struct pt_regs, r27));
DEFINE(PT_R28, offsetof(struct pt_regs, r28));
DEFINE(PT_R29, offsetof(struct pt_regs, r29));
DEFINE(PT_R30, offsetof(struct pt_regs, r30));
DEFINE(PT_R31, offsetof(struct pt_regs, r31));
DEFINE(PT_MODE, offsetof(struct pt_regs, pt_mode));
BLANK();
/* Magic offsets for PTRACE PEEK/POKE etc */
DEFINE(PT_TEXT_ADDR, sizeof(struct pt_regs) + 1);
DEFINE(PT_TEXT_LEN, sizeof(struct pt_regs) + 2);
DEFINE(PT_DATA_ADDR, sizeof(struct pt_regs) + 3);
BLANK();
/* struct task_struct */
DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack));
#ifdef CONFIG_MMU
DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
DEFINE(TASK_PID, offsetof(struct task_struct, pid));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
BLANK();
DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
BLANK();
#endif
/* struct thread_info */
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count));
BLANK();
/* struct cpu_context */
DEFINE(CC_R1, offsetof(struct cpu_context, r1)); /* r1 */
DEFINE(CC_R2, offsetof(struct cpu_context, r2));
/* dedicated registers */
DEFINE(CC_R13, offsetof(struct cpu_context, r13));
DEFINE(CC_R14, offsetof(struct cpu_context, r14));
DEFINE(CC_R15, offsetof(struct cpu_context, r15));
DEFINE(CC_R16, offsetof(struct cpu_context, r16));
DEFINE(CC_R17, offsetof(struct cpu_context, r17));
DEFINE(CC_R18, offsetof(struct cpu_context, r18));
/* non-volatile registers */
DEFINE(CC_R19, offsetof(struct cpu_context, r19));
DEFINE(CC_R20, offsetof(struct cpu_context, r20));
DEFINE(CC_R21, offsetof(struct cpu_context, r21));
DEFINE(CC_R22, offsetof(struct cpu_context, r22));
DEFINE(CC_R23, offsetof(struct cpu_context, r23));
DEFINE(CC_R24, offsetof(struct cpu_context, r24));
DEFINE(CC_R25, offsetof(struct cpu_context, r25));
DEFINE(CC_R26, offsetof(struct cpu_context, r26));
DEFINE(CC_R27, offsetof(struct cpu_context, r27));
DEFINE(CC_R28, offsetof(struct cpu_context, r28));
DEFINE(CC_R29, offsetof(struct cpu_context, r29));
DEFINE(CC_R30, offsetof(struct cpu_context, r30));
/* special purpose registers */
DEFINE(CC_MSR, offsetof(struct cpu_context, msr));
DEFINE(CC_EAR, offsetof(struct cpu_context, ear));
DEFINE(CC_ESR, offsetof(struct cpu_context, esr));
DEFINE(CC_FSR, offsetof(struct cpu_context, fsr));
BLANK();
return 0;
}
| gpl-2.0 |
Klozz/Stratos-kernel-GB | drivers/staging/wlan-ng/prism2usb.c | 140 | 8924 | #include "hfa384x_usb.c"
#include "prism2mgmt.c"
#include "prism2mib.c"
#include "prism2sta.c"
#define PRISM_USB_DEVICE(vid, pid, name) \
USB_DEVICE(vid, pid), \
.driver_info = (unsigned long) name
static struct usb_device_id usb_prism_tbl[] = {
{PRISM_USB_DEVICE(0x04bb, 0x0922, "IOData AirPort WN-B11/USBS")},
{PRISM_USB_DEVICE(0x07aa, 0x0012, "Corega Wireless LAN USB Stick-11")},
{PRISM_USB_DEVICE(0x09aa, 0x3642, "Prism2.x 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE(0x1668, 0x0408, "Actiontec Prism2.5 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE(0x1668, 0x0421, "Actiontec Prism2.5 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE(0x1915, 0x2236, "Linksys WUSB11v3.0 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE(0x066b, 0x2212, "Linksys WUSB11v2.5 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE(0x066b, 0x2213, "Linksys WUSB12v1.1 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE(0x067c, 0x1022, "Siemens SpeedStream 1022 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE(0x049f, 0x0033, "Compaq/Intel W100 PRO/Wireless 11Mbps multiport WLAN Adapter")},
{PRISM_USB_DEVICE(0x0411, 0x0016, "Melco WLI-USB-S11 11Mbps WLAN Adapter")},
{PRISM_USB_DEVICE(0x08de, 0x7a01, "PRISM25 IEEE 802.11 Mini USB Adapter")},
{PRISM_USB_DEVICE(0x8086, 0x1111, "Intel PRO/Wireless 2011B LAN USB Adapter")},
{PRISM_USB_DEVICE(0x0d8e, 0x7a01, "PRISM25 IEEE 802.11 Mini USB Adapter")},
{PRISM_USB_DEVICE(0x045e, 0x006e, "Microsoft MN510 Wireless USB Adapter")},
{PRISM_USB_DEVICE(0x0967, 0x0204, "Acer Warplink USB Adapter")},
{PRISM_USB_DEVICE(0x0cde, 0x0002, "Z-Com 725/726 Prism2.5 USB/USB Integrated")},
{PRISM_USB_DEVICE(0x0cde, 0x0005, "Z-Com Xl735 Wireless 802.11b USB Adapter")},
{PRISM_USB_DEVICE(0x413c, 0x8100, "Dell TrueMobile 1180 Wireless USB Adapter")},
{PRISM_USB_DEVICE(0x0b3b, 0x1601, "ALLNET 0193 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE(0x0b3b, 0x1602, "ZyXEL ZyAIR B200 Wireless USB Adapter")},
{PRISM_USB_DEVICE(0x0baf, 0x00eb, "USRobotics USR1120 Wireless USB Adapter")},
{PRISM_USB_DEVICE(0x0411, 0x0027, "Melco WLI-USB-KS11G 11Mbps WLAN Adapter")},
{PRISM_USB_DEVICE(0x04f1, 0x3009, "JVC MP-XP7250 Builtin USB WLAN Adapter")},
{PRISM_USB_DEVICE(0x0846, 0x4110, "NetGear MA111")},
{PRISM_USB_DEVICE(0x03f3, 0x0020, "Adaptec AWN-8020 USB WLAN Adapter")},
// {PRISM_USB_DEVICE(0x0ace, 0x1201, "ZyDAS ZD1201 Wireless USB Adapter")},
{PRISM_USB_DEVICE(0x2821, 0x3300, "ASUS-WL140 Wireless USB Adapter")},
{PRISM_USB_DEVICE(0x2001, 0x3700, "DWL-122 Wireless USB Adapter")},
{PRISM_USB_DEVICE(0x2001, 0x3702, "DWL-120 Rev F Wireless USB Adapter")},
{PRISM_USB_DEVICE(0x50c2, 0x4013, "Averatec USB WLAN Adapter")},
{PRISM_USB_DEVICE(0x2c02, 0x14ea, "Planex GW-US11H WLAN USB Adapter")},
{PRISM_USB_DEVICE(0x124a, 0x168b, "Airvast PRISM3 WLAN USB Adapter")},
{PRISM_USB_DEVICE(0x083a, 0x3503, "T-Sinus 111 USB WLAN Adapter")},
{PRISM_USB_DEVICE(0x2821, 0x3300, "Hawking HighDB USB Adapter")},
{PRISM_USB_DEVICE(0x0411, 0x0044, "Melco WLI-USB-KB11 11Mbps WLAN Adapter")},
{PRISM_USB_DEVICE(0x1668, 0x6106, "ROPEX FreeLan 802.11b USB Adapter")},
{PRISM_USB_DEVICE(0x124a, 0x4017, "Pheenet WL-503IA 802.11b USB Adapter")},
{PRISM_USB_DEVICE(0x0bb2, 0x0302, "Ambit Microsystems Corp.")},
{PRISM_USB_DEVICE(0x9016, 0x182d, "Sitecom WL-022 802.11b USB Adapter")},
{PRISM_USB_DEVICE(0x0543, 0x0f01, "ViewSonic Airsync USB Adapter 11Mbps (Prism2.5)")},
{ /* terminator */ }
};
MODULE_DEVICE_TABLE(usb, usb_prism_tbl);
/*----------------------------------------------------------------
* prism2sta_probe_usb
*
* Probe routine called by the USB subsystem.
*
* Arguments:
* dev ptr to the usb_device struct
* ifnum interface number being offered
*
* Returns:
* NULL - we're not claiming the device+interface
* non-NULL - we are claiming the device+interface and
* this is a ptr to the data we want back
* when disconnect is called.
*
* Side effects:
*
* Call context:
* I'm not sure, assume it's interrupt.
*
----------------------------------------------------------------*/
static int prism2sta_probe_usb(
struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *dev;
wlandevice_t *wlandev = NULL;
hfa384x_t *hw = NULL;
int result = 0;
DBFENTER;
dev = interface_to_usbdev(interface);
if ((wlandev = create_wlan()) == NULL) {
WLAN_LOG_ERROR("%s: Memory allocation failure.\n", dev_info);
result = -EIO;
goto failed;
}
hw = wlandev->priv;
if ( wlan_setup(wlandev) != 0 ) {
WLAN_LOG_ERROR("%s: wlan_setup() failed.\n", dev_info);
result = -EIO;
goto failed;
}
/* Initialize the hw data */
hfa384x_create(hw, dev);
hw->wlandev = wlandev;
/* Register the wlandev, this gets us a name and registers the
* linux netdevice.
*/
SET_NETDEV_DEV(wlandev->netdev, &(interface->dev));
/* Do a chip-level reset on the MAC */
if (prism2_doreset) {
result = hfa384x_corereset(hw,
prism2_reset_holdtime,
prism2_reset_settletime, 0);
if (result != 0) {
unregister_wlandev(wlandev);
hfa384x_destroy(hw);
result = -EIO;
WLAN_LOG_ERROR(
"%s: hfa384x_corereset() failed.\n",
dev_info);
goto failed;
}
}
usb_get_dev(dev);
wlandev->msdstate = WLAN_MSD_HWPRESENT;
if ( register_wlandev(wlandev) != 0 ) {
WLAN_LOG_ERROR("%s: register_wlandev() failed.\n", dev_info);
result = -EIO;
goto failed;
}
/* enable the card */
prism2sta_ifstate(wlandev, P80211ENUM_ifstate_enable);
goto done;
failed:
if (wlandev) kfree(wlandev);
if (hw) kfree(hw);
wlandev = NULL;
done:
DBFEXIT;
usb_set_intfdata(interface, wlandev);
return result;
}
/*----------------------------------------------------------------
* prism2sta_disconnect_usb
*
* Called when a device previously claimed by probe is removed
* from the USB.
*
* Arguments:
* dev ptr to the usb_device struct
* ptr ptr returned by probe() when the device
* was claimed.
*
* Returns:
* Nothing
*
* Side effects:
*
* Call context:
* process
----------------------------------------------------------------*/
static void
prism2sta_disconnect_usb(struct usb_interface *interface)
{
wlandevice_t *wlandev;
DBFENTER;
wlandev = (wlandevice_t *) usb_get_intfdata(interface);
if ( wlandev != NULL ) {
LIST_HEAD(cleanlist);
struct list_head *entry;
struct list_head *temp;
unsigned long flags;
hfa384x_t *hw = wlandev->priv;
if (!hw)
goto exit;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
p80211netdev_hwremoved(wlandev);
list_splice_init(&hw->ctlxq.reapable, &cleanlist);
list_splice_init(&hw->ctlxq.completing, &cleanlist);
list_splice_init(&hw->ctlxq.pending, &cleanlist);
list_splice_init(&hw->ctlxq.active, &cleanlist);
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
/* There's no hardware to shutdown, but the driver
* might have some tasks or tasklets that must be
* stopped before we can tear everything down.
*/
prism2sta_ifstate(wlandev, P80211ENUM_ifstate_disable);
del_singleshot_timer_sync(&hw->throttle);
del_singleshot_timer_sync(&hw->reqtimer);
del_singleshot_timer_sync(&hw->resptimer);
/* Unlink all the URBs. This "removes the wheels"
* from the entire CTLX handling mechanism.
*/
usb_kill_urb(&hw->rx_urb);
usb_kill_urb(&hw->tx_urb);
usb_kill_urb(&hw->ctlx_urb);
tasklet_kill(&hw->completion_bh);
tasklet_kill(&hw->reaper_bh);
flush_scheduled_work();
/* Now we complete any outstanding commands
* and tell everyone who is waiting for their
* responses that we have shut down.
*/
list_for_each(entry, &cleanlist) {
hfa384x_usbctlx_t *ctlx;
ctlx = list_entry(entry, hfa384x_usbctlx_t, list);
complete(&ctlx->done);
}
/* Give any outstanding synchronous commands
* a chance to complete. All they need to do
* is "wake up", so that's easy.
* (I'd like a better way to do this, really.)
*/
msleep(100);
/* Now delete the CTLXs, because no-one else can now. */
list_for_each_safe(entry, temp, &cleanlist) {
hfa384x_usbctlx_t *ctlx;
ctlx = list_entry(entry, hfa384x_usbctlx_t, list);
kfree(ctlx);
}
/* Unhook the wlandev */
unregister_wlandev(wlandev);
wlan_unsetup(wlandev);
usb_put_dev(hw->usb);
hfa384x_destroy(hw);
kfree(hw);
kfree(wlandev);
}
exit:
usb_set_intfdata(interface, NULL);
DBFEXIT;
}
static struct usb_driver prism2_usb_driver = {
.name = "prism2_usb",
.probe = prism2sta_probe_usb,
.disconnect = prism2sta_disconnect_usb,
.id_table = usb_prism_tbl,
/* fops, minor? */
};
static int __init prism2usb_init(void)
{
DBFENTER;
/* This call will result in calls to prism2sta_probe_usb. */
return usb_register(&prism2_usb_driver);
DBFEXIT;
};
static void __exit prism2usb_cleanup(void)
{
DBFENTER;
usb_deregister(&prism2_usb_driver);
DBFEXIT;
};
module_init(prism2usb_init);
module_exit(prism2usb_cleanup);
| gpl-2.0 |
slade87/semc_mimmi_kernel_3.0.1.G.0.75 | drivers/staging/meilhaus/medlist.c | 140 | 2634 | /**
* @file me_dlist.c
*
* @brief Implements the device list class.
* @note Copyright (C) 2007 Meilhaus Electronic GmbH (support@meilhaus.de)
* @author Guenter Gebhardt
*/
/*
* Copyright (C) 2007 Meilhaus Electronic GmbH (support@meilhaus.de)
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "meerror.h"
#include "medefines.h"
#include "medlist.h"
#include "medebug.h"
int me_dlist_query_number_devices(struct me_dlist *dlist, int *number)
{
PDEBUG_LOCKS("called.\n");
*number = dlist->n;
return ME_ERRNO_SUCCESS;
}
unsigned int me_dlist_get_number_devices(struct me_dlist *dlist)
{
PDEBUG_LOCKS("called.\n");
return dlist->n;
}
me_device_t *me_dlist_get_device(struct me_dlist * dlist, unsigned int index)
{
struct list_head *pos;
me_device_t *device = NULL;
unsigned int i = 0;
PDEBUG_LOCKS("called.\n");
if (index >= dlist->n) {
PERROR("Index out of range.\n");
return NULL;
}
list_for_each(pos, &dlist->head) {
if (i == index) {
device = list_entry(pos, me_device_t, list);
break;
}
++i;
}
return device;
}
void me_dlist_add_device_tail(struct me_dlist *dlist, me_device_t * device)
{
PDEBUG_LOCKS("called.\n");
list_add_tail(&device->list, &dlist->head);
++dlist->n;
}
me_device_t *me_dlist_del_device_tail(struct me_dlist *dlist)
{
struct list_head *last;
me_device_t *device;
PDEBUG_LOCKS("called.\n");
if (list_empty(&dlist->head))
return NULL;
last = dlist->head.prev;
device = list_entry(last, me_device_t, list);
list_del(last);
--dlist->n;
return device;
}
int me_dlist_init(me_dlist_t * dlist)
{
PDEBUG_LOCKS("called.\n");
INIT_LIST_HEAD(&dlist->head);
dlist->n = 0;
return 0;
}
void me_dlist_deinit(me_dlist_t * dlist)
{
struct list_head *s;
me_device_t *device;
PDEBUG_LOCKS("called.\n");
while (!list_empty(&dlist->head)) {
s = dlist->head.next;
list_del(s);
device = list_entry(s, me_device_t, list);
device->me_device_destructor(device);
}
dlist->n = 0;
}
| gpl-2.0 |
wesgarner/wg-android-kernel | net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | 140 | 10069 | /* ip_conntrack proc compat - based on ip_conntrack_standalone.c
*
* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/percpu.h>
#include <net/net_namespace.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_acct.h>
struct ct_iter_state {
struct seq_net_private p;
unsigned int bucket;
};
static struct hlist_node *ct_get_first(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
struct hlist_node *n;
for (st->bucket = 0;
st->bucket < nf_conntrack_htable_size;
st->bucket++) {
n = rcu_dereference(net->ct.hash[st->bucket].first);
if (n)
return n;
}
return NULL;
}
static struct hlist_node *ct_get_next(struct seq_file *seq,
struct hlist_node *head)
{
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
head = rcu_dereference(head->next);
while (head == NULL) {
if (++st->bucket >= nf_conntrack_htable_size)
return NULL;
head = rcu_dereference(net->ct.hash[st->bucket].first);
}
return head;
}
static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos)
{
struct hlist_node *head = ct_get_first(seq);
if (head)
while (pos && (head = ct_get_next(seq, head)))
pos--;
return pos ? NULL : head;
}
static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
return ct_get_idx(seq, *pos);
}
static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
(*pos)++;
return ct_get_next(s, v);
}
static void ct_seq_stop(struct seq_file *s, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int ct_seq_show(struct seq_file *s, void *v)
{
const struct nf_conntrack_tuple_hash *hash = v;
const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
NF_CT_ASSERT(ct);
/* we only want to print DIR_ORIGINAL */
if (NF_CT_DIRECTION(hash))
return 0;
if (nf_ct_l3num(ct) != AF_INET)
return 0;
l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
NF_CT_ASSERT(l3proto);
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
NF_CT_ASSERT(l4proto);
if (seq_printf(s, "%-8s %u %ld ",
l4proto->name, nf_ct_protonum(ct),
timer_pending(&ct->timeout)
? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
return -ENOSPC;
if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
return -ENOSPC;
if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
l3proto, l4proto))
return -ENOSPC;
if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
return -ENOSPC;
if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
if (seq_printf(s, "[UNREPLIED] "))
return -ENOSPC;
if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
l3proto, l4proto))
return -ENOSPC;
if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
return -ENOSPC;
if (test_bit(IPS_ASSURED_BIT, &ct->status))
if (seq_printf(s, "[ASSURED] "))
return -ENOSPC;
#ifdef CONFIG_NF_CONNTRACK_MARK
if (seq_printf(s, "mark=%u ", ct->mark))
return -ENOSPC;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
if (seq_printf(s, "secmark=%u ", ct->secmark))
return -ENOSPC;
#endif
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
return -ENOSPC;
return 0;
}
static const struct seq_operations ct_seq_ops = {
.start = ct_seq_start,
.next = ct_seq_next,
.stop = ct_seq_stop,
.show = ct_seq_show
};
static int ct_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ct_seq_ops,
sizeof(struct ct_iter_state));
}
static const struct file_operations ct_file_ops = {
.owner = THIS_MODULE,
.open = ct_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
/* expects */
struct ct_expect_iter_state {
struct seq_net_private p;
unsigned int bucket;
};
static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct ct_expect_iter_state *st = seq->private;
struct hlist_node *n;
for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
if (n)
return n;
}
return NULL;
}
static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
struct hlist_node *head)
{
struct net *net = seq_file_net(seq);
struct ct_expect_iter_state *st = seq->private;
head = rcu_dereference(head->next);
while (head == NULL) {
if (++st->bucket >= nf_ct_expect_hsize)
return NULL;
head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
}
return head;
}
static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
{
struct hlist_node *head = ct_expect_get_first(seq);
if (head)
while (pos && (head = ct_expect_get_next(seq, head)))
pos--;
return pos ? NULL : head;
}
static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
return ct_expect_get_idx(seq, *pos);
}
static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
(*pos)++;
return ct_expect_get_next(seq, v);
}
static void exp_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int exp_seq_show(struct seq_file *s, void *v)
{
struct nf_conntrack_expect *exp;
const struct hlist_node *n = v;
exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
if (exp->tuple.src.l3num != AF_INET)
return 0;
if (exp->timeout.function)
seq_printf(s, "%ld ", timer_pending(&exp->timeout)
? (long)(exp->timeout.expires - jiffies)/HZ : 0);
else
seq_printf(s, "- ");
seq_printf(s, "proto=%u ", exp->tuple.dst.protonum);
print_tuple(s, &exp->tuple,
__nf_ct_l3proto_find(exp->tuple.src.l3num),
__nf_ct_l4proto_find(exp->tuple.src.l3num,
exp->tuple.dst.protonum));
return seq_putc(s, '\n');
}
static const struct seq_operations exp_seq_ops = {
.start = exp_seq_start,
.next = exp_seq_next,
.stop = exp_seq_stop,
.show = exp_seq_show
};
static int exp_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &exp_seq_ops,
sizeof(struct ct_expect_iter_state));
}
static const struct file_operations ip_exp_file_ops = {
.owner = THIS_MODULE,
.open = exp_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
return per_cpu_ptr(net->ct.stat, cpu);
}
return NULL;
}
static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
return per_cpu_ptr(net->ct.stat, cpu);
}
return NULL;
}
static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
{
}
static int ct_cpu_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_net(seq);
unsigned int nr_conntracks = atomic_read(&net->ct.count);
const struct ip_conntrack_stat *st = v;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n");
return 0;
}
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
"%08x %08x %08x %08x %08x %08x %08x %08x \n",
nr_conntracks,
st->searched,
st->found,
st->new,
st->invalid,
st->ignore,
st->delete,
st->delete_list,
st->insert,
st->insert_failed,
st->drop,
st->early_drop,
st->error,
st->expect_new,
st->expect_create,
st->expect_delete
);
return 0;
}
static const struct seq_operations ct_cpu_seq_ops = {
.start = ct_cpu_seq_start,
.next = ct_cpu_seq_next,
.stop = ct_cpu_seq_stop,
.show = ct_cpu_seq_show,
};
static int ct_cpu_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ct_cpu_seq_ops,
sizeof(struct seq_net_private));
}
static const struct file_operations ct_cpu_seq_fops = {
.owner = THIS_MODULE,
.open = ct_cpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int __net_init ip_conntrack_net_init(struct net *net)
{
struct proc_dir_entry *proc, *proc_exp, *proc_stat;
proc = proc_net_fops_create(net, "ip_conntrack", 0440, &ct_file_ops);
if (!proc)
goto err1;
proc_exp = proc_net_fops_create(net, "ip_conntrack_expect", 0440,
&ip_exp_file_ops);
if (!proc_exp)
goto err2;
proc_stat = proc_create("ip_conntrack", S_IRUGO,
net->proc_net_stat, &ct_cpu_seq_fops);
if (!proc_stat)
goto err3;
return 0;
err3:
proc_net_remove(net, "ip_conntrack_expect");
err2:
proc_net_remove(net, "ip_conntrack");
err1:
return -ENOMEM;
}
static void __net_exit ip_conntrack_net_exit(struct net *net)
{
remove_proc_entry("ip_conntrack", net->proc_net_stat);
proc_net_remove(net, "ip_conntrack_expect");
proc_net_remove(net, "ip_conntrack");
}
static struct pernet_operations ip_conntrack_net_ops = {
.init = ip_conntrack_net_init,
.exit = ip_conntrack_net_exit,
};
int __init nf_conntrack_ipv4_compat_init(void)
{
return register_pernet_subsys(&ip_conntrack_net_ops);
}
void __exit nf_conntrack_ipv4_compat_fini(void)
{
unregister_pernet_subsys(&ip_conntrack_net_ops);
}
| gpl-2.0 |
mirror-androidarmv6/android_kernel_semc_msm7x27 | drivers/ide/scc_pata.c | 140 | 24877 | /*
* Support for IDE interfaces on Celleb platform
*
* (C) Copyright 2006 TOSHIBA CORPORATION
*
* This code is based on drivers/ide/pci/siimage.c:
* Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2003 Red Hat
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ide.h>
#include <linux/init.h>
#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
#define SCC_PATA_NAME "scc IDE"
#define TDVHSEL_MASTER 0x00000001
#define TDVHSEL_SLAVE 0x00000004
#define MODE_JCUSFEN 0x00000080
#define CCKCTRL_ATARESET 0x00040000
#define CCKCTRL_BUFCNT 0x00020000
#define CCKCTRL_CRST 0x00010000
#define CCKCTRL_OCLKEN 0x00000100
#define CCKCTRL_ATACLKOEN 0x00000002
#define CCKCTRL_LCLKEN 0x00000001
#define QCHCD_IOS_SS 0x00000001
#define QCHSD_STPDIAG 0x00020000
#define INTMASK_MSK 0xD1000012
#define INTSTS_SERROR 0x80000000
#define INTSTS_PRERR 0x40000000
#define INTSTS_RERR 0x10000000
#define INTSTS_ICERR 0x01000000
#define INTSTS_BMSINT 0x00000010
#define INTSTS_BMHE 0x00000008
#define INTSTS_IOIRQS 0x00000004
#define INTSTS_INTRQ 0x00000002
#define INTSTS_ACTEINT 0x00000001
#define ECMODE_VALUE 0x01
static struct scc_ports {
unsigned long ctl, dma;
struct ide_host *host; /* for removing port from system */
} scc_ports[MAX_HWIFS];
/* PIO transfer mode table */
/* JCHST */
static unsigned long JCHSTtbl[2][7] = {
{0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
{0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
};
/* JCHHT */
static unsigned long JCHHTtbl[2][7] = {
{0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
{0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
};
/* JCHCT */
static unsigned long JCHCTtbl[2][7] = {
{0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
{0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
};
/* DMA transfer mode table */
/* JCHDCTM/JCHDCTS */
static unsigned long JCHDCTxtbl[2][7] = {
{0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
{0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
};
/* JCSTWTM/JCSTWTS */
static unsigned long JCSTWTxtbl[2][7] = {
{0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
{0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
};
/* JCTSS */
static unsigned long JCTSStbl[2][7] = {
{0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
{0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
};
/* JCENVT */
static unsigned long JCENVTtbl[2][7] = {
{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
{0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
};
/* JCACTSELS/JCACTSELM */
static unsigned long JCACTSELtbl[2][7] = {
{0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
};
static u8 scc_ide_inb(unsigned long port)
{
u32 data = in_be32((void*)port);
return (u8)data;
}
static void scc_exec_command(ide_hwif_t *hwif, u8 cmd)
{
out_be32((void *)hwif->io_ports.command_addr, cmd);
eieio();
in_be32((void *)(hwif->dma_base + 0x01c));
eieio();
}
static u8 scc_read_status(ide_hwif_t *hwif)
{
return (u8)in_be32((void *)hwif->io_ports.status_addr);
}
static u8 scc_read_altstatus(ide_hwif_t *hwif)
{
return (u8)in_be32((void *)hwif->io_ports.ctl_addr);
}
static u8 scc_dma_sff_read_status(ide_hwif_t *hwif)
{
return (u8)in_be32((void *)(hwif->dma_base + 4));
}
static void scc_set_irq(ide_hwif_t *hwif, int on)
{
u8 ctl = ATA_DEVCTL_OBS;
if (on == 4) { /* hack for SRST */
ctl |= 4;
on &= ~4;
}
ctl |= on ? 0 : 2;
out_be32((void *)hwif->io_ports.ctl_addr, ctl);
eieio();
in_be32((void *)(hwif->dma_base + 0x01c));
eieio();
}
static void scc_ide_insw(unsigned long port, void *addr, u32 count)
{
u16 *ptr = (u16 *)addr;
while (count--) {
*ptr++ = le16_to_cpu(in_be32((void*)port));
}
}
static void scc_ide_insl(unsigned long port, void *addr, u32 count)
{
u16 *ptr = (u16 *)addr;
while (count--) {
*ptr++ = le16_to_cpu(in_be32((void*)port));
*ptr++ = le16_to_cpu(in_be32((void*)port));
}
}
static void scc_ide_outb(u8 addr, unsigned long port)
{
out_be32((void*)port, addr);
}
static void
scc_ide_outsw(unsigned long port, void *addr, u32 count)
{
u16 *ptr = (u16 *)addr;
while (count--) {
out_be32((void*)port, cpu_to_le16(*ptr++));
}
}
static void
scc_ide_outsl(unsigned long port, void *addr, u32 count)
{
u16 *ptr = (u16 *)addr;
while (count--) {
out_be32((void*)port, cpu_to_le16(*ptr++));
out_be32((void*)port, cpu_to_le16(*ptr++));
}
}
/**
* scc_set_pio_mode - set host controller for PIO mode
* @drive: drive
* @pio: PIO mode number
*
* Load the timing settings for this device mode into the
* controller.
*/
static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
{
ide_hwif_t *hwif = drive->hwif;
struct scc_ports *ports = ide_get_hwifdata(hwif);
unsigned long ctl_base = ports->ctl;
unsigned long cckctrl_port = ctl_base + 0xff0;
unsigned long piosht_port = ctl_base + 0x000;
unsigned long pioct_port = ctl_base + 0x004;
unsigned long reg;
int offset;
reg = in_be32((void __iomem *)cckctrl_port);
if (reg & CCKCTRL_ATACLKOEN) {
offset = 1; /* 133MHz */
} else {
offset = 0; /* 100MHz */
}
reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
out_be32((void __iomem *)piosht_port, reg);
reg = JCHCTtbl[offset][pio];
out_be32((void __iomem *)pioct_port, reg);
}
/**
* scc_set_dma_mode - set host controller for DMA mode
* @drive: drive
* @speed: DMA mode
*
* Load the timing settings for this device mode into the
* controller.
*/
static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
{
ide_hwif_t *hwif = drive->hwif;
struct scc_ports *ports = ide_get_hwifdata(hwif);
unsigned long ctl_base = ports->ctl;
unsigned long cckctrl_port = ctl_base + 0xff0;
unsigned long mdmact_port = ctl_base + 0x008;
unsigned long mcrcst_port = ctl_base + 0x00c;
unsigned long sdmact_port = ctl_base + 0x010;
unsigned long scrcst_port = ctl_base + 0x014;
unsigned long udenvt_port = ctl_base + 0x018;
unsigned long tdvhsel_port = ctl_base + 0x020;
int is_slave = drive->dn & 1;
int offset, idx;
unsigned long reg;
unsigned long jcactsel;
reg = in_be32((void __iomem *)cckctrl_port);
if (reg & CCKCTRL_ATACLKOEN) {
offset = 1; /* 133MHz */
} else {
offset = 0; /* 100MHz */
}
idx = speed - XFER_UDMA_0;
jcactsel = JCACTSELtbl[offset][idx];
if (is_slave) {
out_be32((void __iomem *)sdmact_port, JCHDCTxtbl[offset][idx]);
out_be32((void __iomem *)scrcst_port, JCSTWTxtbl[offset][idx]);
jcactsel = jcactsel << 2;
out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_SLAVE) | jcactsel);
} else {
out_be32((void __iomem *)mdmact_port, JCHDCTxtbl[offset][idx]);
out_be32((void __iomem *)mcrcst_port, JCSTWTxtbl[offset][idx]);
out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_MASTER) | jcactsel);
}
reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx];
out_be32((void __iomem *)udenvt_port, reg);
}
static void scc_dma_host_set(ide_drive_t *drive, int on)
{
ide_hwif_t *hwif = drive->hwif;
u8 unit = drive->dn & 1;
u8 dma_stat = scc_dma_sff_read_status(hwif);
if (on)
dma_stat |= (1 << (5 + unit));
else
dma_stat &= ~(1 << (5 + unit));
scc_ide_outb(dma_stat, hwif->dma_base + 4);
}
/**
* scc_ide_dma_setup - begin a DMA phase
* @drive: target device
*
* Build an IDE DMA PRD (IDE speak for scatter gather table)
* and then set up the DMA transfer registers.
*
* Returns 0 on success. If a PIO fallback is required then 1
* is returned.
*/
static int scc_dma_setup(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
struct request *rq = hwif->rq;
unsigned int reading;
u8 dma_stat;
if (rq_data_dir(rq))
reading = 0;
else
reading = 1 << 3;
/* fall back to pio! */
if (!ide_build_dmatable(drive, rq)) {
ide_map_sg(drive, rq);
return 1;
}
/* PRD table */
out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
/* specify r/w */
out_be32((void __iomem *)hwif->dma_base, reading);
/* read DMA status for INTR & ERROR flags */
dma_stat = scc_dma_sff_read_status(hwif);
/* clear INTR & ERROR flags */
out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6);
drive->waiting_for_dma = 1;
return 0;
}
static void scc_dma_start(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
u8 dma_cmd = scc_ide_inb(hwif->dma_base);
/* start DMA */
scc_ide_outb(dma_cmd | 1, hwif->dma_base);
wmb();
}
static int __scc_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
u8 dma_stat, dma_cmd;
drive->waiting_for_dma = 0;
/* get DMA command mode */
dma_cmd = scc_ide_inb(hwif->dma_base);
/* stop DMA */
scc_ide_outb(dma_cmd & ~1, hwif->dma_base);
/* get DMA status */
dma_stat = scc_dma_sff_read_status(hwif);
/* clear the INTR & ERROR bits */
scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
/* purge DMA mappings */
ide_destroy_dmatable(drive);
/* verify good DMA status */
wmb();
return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
}
/**
* scc_dma_end - Stop DMA
* @drive: IDE drive
*
* Check and clear INT Status register.
* Then call __scc_dma_end().
*/
static int scc_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
void __iomem *dma_base = (void __iomem *)hwif->dma_base;
unsigned long intsts_port = hwif->dma_base + 0x014;
u32 reg;
int dma_stat, data_loss = 0;
static int retry = 0;
/* errata A308 workaround: Step5 (check data loss) */
/* We don't check non ide_disk because it is limited to UDMA4 */
if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
& ATA_ERR) &&
drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
reg = in_be32((void __iomem *)intsts_port);
if (!(reg & INTSTS_ACTEINT)) {
printk(KERN_WARNING "%s: operation failed (transfer data loss)\n",
drive->name);
data_loss = 1;
if (retry++) {
struct request *rq = hwif->rq;
ide_drive_t *drive;
int i;
/* ERROR_RESET and drive->crc_count are needed
* to reduce DMA transfer mode in retry process.
*/
if (rq)
rq->errors |= ERROR_RESET;
ide_port_for_each_dev(i, drive, hwif)
drive->crc_count++;
}
}
}
while (1) {
reg = in_be32((void __iomem *)intsts_port);
if (reg & INTSTS_SERROR) {
printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
continue;
}
if (reg & INTSTS_PRERR) {
u32 maea0, maec0;
unsigned long ctl_base = hwif->config_data;
maea0 = in_be32((void __iomem *)(ctl_base + 0xF50));
maec0 = in_be32((void __iomem *)(ctl_base + 0xF54));
printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0);
out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
continue;
}
if (reg & INTSTS_RERR) {
printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
continue;
}
if (reg & INTSTS_ICERR) {
out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
continue;
}
if (reg & INTSTS_BMSINT) {
printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME);
out_be32((void __iomem *)intsts_port, INTSTS_BMSINT);
ide_do_reset(drive);
continue;
}
if (reg & INTSTS_BMHE) {
out_be32((void __iomem *)intsts_port, INTSTS_BMHE);
continue;
}
if (reg & INTSTS_ACTEINT) {
out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT);
continue;
}
if (reg & INTSTS_IOIRQS) {
out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS);
continue;
}
break;
}
dma_stat = __scc_dma_end(drive);
if (data_loss)
dma_stat |= 2; /* emulate DMA error (to retry command) */
return dma_stat;
}
/* returns 1 if dma irq issued, 0 otherwise */
static int scc_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
/* SCC errata A252,A308 workaround: Step4 */
if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
& ATA_ERR) &&
(int_stat & INTSTS_INTRQ))
return 1;
/* SCC errata A308 workaround: Step5 (polling IOIRQS) */
if (int_stat & INTSTS_IOIRQS)
return 1;
return 0;
}
static u8 scc_udma_filter(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
u8 mask = hwif->ultra_mask;
/* errata A308 workaround: limit non ide_disk drive to UDMA4 */
if ((drive->media != ide_disk) && (mask & 0xE0)) {
printk(KERN_INFO "%s: limit %s to UDMA4\n",
SCC_PATA_NAME, drive->name);
mask = ATA_UDMA4;
}
return mask;
}
/**
* setup_mmio_scc - map CTRL/BMID region
* @dev: PCI device we are configuring
* @name: device name
*
*/
static int setup_mmio_scc (struct pci_dev *dev, const char *name)
{
void __iomem *ctl_addr;
void __iomem *dma_addr;
int i, ret;
for (i = 0; i < MAX_HWIFS; i++) {
if (scc_ports[i].ctl == 0)
break;
}
if (i >= MAX_HWIFS)
return -ENOMEM;
ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
if (ret < 0) {
printk(KERN_ERR "%s: can't reserve resources\n", name);
return ret;
}
ctl_addr = pci_ioremap_bar(dev, 0);
if (!ctl_addr)
goto fail_0;
dma_addr = pci_ioremap_bar(dev, 1);
if (!dma_addr)
goto fail_1;
pci_set_master(dev);
scc_ports[i].ctl = (unsigned long)ctl_addr;
scc_ports[i].dma = (unsigned long)dma_addr;
pci_set_drvdata(dev, (void *) &scc_ports[i]);
return 1;
fail_1:
iounmap(ctl_addr);
fail_0:
return -ENOMEM;
}
static int scc_ide_setup_pci_device(struct pci_dev *dev,
const struct ide_port_info *d)
{
struct scc_ports *ports = pci_get_drvdata(dev);
struct ide_host *host;
hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
int i, rc;
memset(&hw, 0, sizeof(hw));
for (i = 0; i <= 8; i++)
hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
hw.irq = dev->irq;
hw.dev = &dev->dev;
hw.chipset = ide_pci;
rc = ide_host_add(d, hws, &host);
if (rc)
return rc;
ports->host = host;
return 0;
}
/**
* init_setup_scc - set up an SCC PATA Controller
* @dev: PCI device
* @d: IDE port info
*
* Perform the initial set up for this device.
*/
static int __devinit init_setup_scc(struct pci_dev *dev,
const struct ide_port_info *d)
{
unsigned long ctl_base;
unsigned long dma_base;
unsigned long cckctrl_port;
unsigned long intmask_port;
unsigned long mode_port;
unsigned long ecmode_port;
u32 reg = 0;
struct scc_ports *ports;
int rc;
rc = pci_enable_device(dev);
if (rc)
goto end;
rc = setup_mmio_scc(dev, d->name);
if (rc < 0)
goto end;
ports = pci_get_drvdata(dev);
ctl_base = ports->ctl;
dma_base = ports->dma;
cckctrl_port = ctl_base + 0xff0;
intmask_port = dma_base + 0x010;
mode_port = ctl_base + 0x024;
ecmode_port = ctl_base + 0xf00;
/* controller initialization */
reg = 0;
out_be32((void*)cckctrl_port, reg);
reg |= CCKCTRL_ATACLKOEN;
out_be32((void*)cckctrl_port, reg);
reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
out_be32((void*)cckctrl_port, reg);
reg |= CCKCTRL_CRST;
out_be32((void*)cckctrl_port, reg);
for (;;) {
reg = in_be32((void*)cckctrl_port);
if (reg & CCKCTRL_CRST)
break;
udelay(5000);
}
reg |= CCKCTRL_ATARESET;
out_be32((void*)cckctrl_port, reg);
out_be32((void*)ecmode_port, ECMODE_VALUE);
out_be32((void*)mode_port, MODE_JCUSFEN);
out_be32((void*)intmask_port, INTMASK_MSK);
rc = scc_ide_setup_pci_device(dev, d);
end:
return rc;
}
static void scc_tf_load(ide_drive_t *drive, ide_task_t *task)
{
struct ide_io_ports *io_ports = &drive->hwif->io_ports;
struct ide_taskfile *tf = &task->tf;
u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
if (task->tf_flags & IDE_TFLAG_FLAGGED)
HIHI = 0xFF;
if (task->tf_flags & IDE_TFLAG_OUT_DATA)
out_be32((void *)io_ports->data_addr,
(tf->hob_data << 8) | tf->data);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
scc_ide_outb(tf->hob_feature, io_ports->feature_addr);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
scc_ide_outb(tf->hob_nsect, io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
scc_ide_outb(tf->hob_lbal, io_ports->lbal_addr);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
scc_ide_outb(tf->hob_lbam, io_ports->lbam_addr);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
scc_ide_outb(tf->hob_lbah, io_ports->lbah_addr);
if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
scc_ide_outb(tf->feature, io_ports->feature_addr);
if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
scc_ide_outb(tf->nsect, io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
scc_ide_outb(tf->lbal, io_ports->lbal_addr);
if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
scc_ide_outb(tf->lbam, io_ports->lbam_addr);
if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
scc_ide_outb(tf->lbah, io_ports->lbah_addr);
if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
scc_ide_outb((tf->device & HIHI) | drive->select,
io_ports->device_addr);
}
static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
{
struct ide_io_ports *io_ports = &drive->hwif->io_ports;
struct ide_taskfile *tf = &task->tf;
if (task->tf_flags & IDE_TFLAG_IN_DATA) {
u16 data = (u16)in_be32((void *)io_ports->data_addr);
tf->data = data & 0xff;
tf->hob_data = (data >> 8) & 0xff;
}
/* be sure we're looking at the low order bits */
scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
tf->feature = scc_ide_inb(io_ports->feature_addr);
if (task->tf_flags & IDE_TFLAG_IN_NSECT)
tf->nsect = scc_ide_inb(io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_IN_LBAL)
tf->lbal = scc_ide_inb(io_ports->lbal_addr);
if (task->tf_flags & IDE_TFLAG_IN_LBAM)
tf->lbam = scc_ide_inb(io_ports->lbam_addr);
if (task->tf_flags & IDE_TFLAG_IN_LBAH)
tf->lbah = scc_ide_inb(io_ports->lbah_addr);
if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
tf->device = scc_ide_inb(io_ports->device_addr);
if (task->tf_flags & IDE_TFLAG_LBA48) {
scc_ide_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
tf->hob_feature = scc_ide_inb(io_ports->feature_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
tf->hob_nsect = scc_ide_inb(io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
tf->hob_lbal = scc_ide_inb(io_ports->lbal_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
tf->hob_lbam = scc_ide_inb(io_ports->lbam_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
tf->hob_lbah = scc_ide_inb(io_ports->lbah_addr);
}
}
static void scc_input_data(ide_drive_t *drive, struct request *rq,
void *buf, unsigned int len)
{
unsigned long data_addr = drive->hwif->io_ports.data_addr;
len++;
if (drive->io_32bit) {
scc_ide_insl(data_addr, buf, len / 4);
if ((len & 3) >= 2)
scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1);
} else
scc_ide_insw(data_addr, buf, len / 2);
}
static void scc_output_data(ide_drive_t *drive, struct request *rq,
void *buf, unsigned int len)
{
unsigned long data_addr = drive->hwif->io_ports.data_addr;
len++;
if (drive->io_32bit) {
scc_ide_outsl(data_addr, buf, len / 4);
if ((len & 3) >= 2)
scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1);
} else
scc_ide_outsw(data_addr, buf, len / 2);
}
/**
* init_mmio_iops_scc - set up the iops for MMIO
* @hwif: interface to set up
*
*/
static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct scc_ports *ports = pci_get_drvdata(dev);
unsigned long dma_base = ports->dma;
ide_set_hwifdata(hwif, ports);
hwif->dma_base = dma_base;
hwif->config_data = ports->ctl;
}
/**
* init_iops_scc - set up iops
* @hwif: interface to set up
*
* Do the basic setup for the SCC hardware interface
* and then do the MMIO setup.
*/
static void __devinit init_iops_scc(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
hwif->hwif_data = NULL;
if (pci_get_drvdata(dev) == NULL)
return;
init_mmio_iops_scc(hwif);
}
static int __devinit scc_init_dma(ide_hwif_t *hwif,
const struct ide_port_info *d)
{
return ide_allocate_dma_engine(hwif);
}
static u8 scc_cable_detect(ide_hwif_t *hwif)
{
return ATA_CBL_PATA80;
}
/**
* init_hwif_scc - set up hwif
* @hwif: interface to set up
*
* We do the basic set up of the interface structure. The SCC
* requires several custom handlers so we override the default
* ide DMA handlers appropriately.
*/
static void __devinit init_hwif_scc(ide_hwif_t *hwif)
{
/* PTERADD */
out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
else
hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
}
static const struct ide_tp_ops scc_tp_ops = {
.exec_command = scc_exec_command,
.read_status = scc_read_status,
.read_altstatus = scc_read_altstatus,
.set_irq = scc_set_irq,
.tf_load = scc_tf_load,
.tf_read = scc_tf_read,
.input_data = scc_input_data,
.output_data = scc_output_data,
};
static const struct ide_port_ops scc_port_ops = {
.set_pio_mode = scc_set_pio_mode,
.set_dma_mode = scc_set_dma_mode,
.udma_filter = scc_udma_filter,
.cable_detect = scc_cable_detect,
};
static const struct ide_dma_ops scc_dma_ops = {
.dma_host_set = scc_dma_host_set,
.dma_setup = scc_dma_setup,
.dma_exec_cmd = ide_dma_exec_cmd,
.dma_start = scc_dma_start,
.dma_end = scc_dma_end,
.dma_test_irq = scc_dma_test_irq,
.dma_lost_irq = ide_dma_lost_irq,
.dma_timeout = ide_dma_timeout,
.dma_sff_read_status = scc_dma_sff_read_status,
};
#define DECLARE_SCC_DEV(name_str) \
{ \
.name = name_str, \
.init_iops = init_iops_scc, \
.init_dma = scc_init_dma, \
.init_hwif = init_hwif_scc, \
.tp_ops = &scc_tp_ops, \
.port_ops = &scc_port_ops, \
.dma_ops = &scc_dma_ops, \
.host_flags = IDE_HFLAG_SINGLE, \
.pio_mask = ATA_PIO4, \
}
static const struct ide_port_info scc_chipsets[] __devinitdata = {
/* 0 */ DECLARE_SCC_DEV("sccIDE"),
};
/**
* scc_init_one - pci layer discovery entry
* @dev: PCI device
* @id: ident table entry
*
* Called by the PCI code when it finds an SCC PATA controller.
* We then use the IDE PCI generic helper to do most of the work.
*/
static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return init_setup_scc(dev, &scc_chipsets[id->driver_data]);
}
/**
* scc_remove - pci layer remove entry
* @dev: PCI device
*
* Called by the PCI code when it removes an SCC PATA controller.
*/
static void __devexit scc_remove(struct pci_dev *dev)
{
struct scc_ports *ports = pci_get_drvdata(dev);
struct ide_host *host = ports->host;
ide_host_remove(host);
iounmap((void*)ports->dma);
iounmap((void*)ports->ctl);
pci_release_selected_regions(dev, (1 << 2) - 1);
memset(ports, 0, sizeof(*ports));
}
static const struct pci_device_id scc_pci_tbl[] = {
{ PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
static struct pci_driver scc_pci_driver = {
.name = "SCC IDE",
.id_table = scc_pci_tbl,
.probe = scc_init_one,
.remove = __devexit_p(scc_remove),
};
static int scc_ide_init(void)
{
return ide_pci_register_driver(&scc_pci_driver);
}
module_init(scc_ide_init);
/* -- No exit code?
static void scc_ide_exit(void)
{
ide_pci_unregister_driver(&scc_pci_driver);
}
module_exit(scc_ide_exit);
*/
MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
MODULE_LICENSE("GPL");
| gpl-2.0 |
javilonas/Enki-GT-I9300 | drivers/usb/serial/garmin_gps.c | 652 | 40777 | /*
* Garmin GPS driver
*
* Copyright (C) 2006-2011 Hermann Kneissel herkne@gmx.de
*
* The latest version of the driver can be found at
* http://sourceforge.net/projects/garmin-gps/
*
* This driver has been derived from v2.1 of the visor driver.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111 USA
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <asm/atomic.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
/* the mode to be set when the port ist opened */
static int initial_mode = 1;
/* debug flag */
static int debug;
#define GARMIN_VENDOR_ID 0x091E
/*
* Version Information
*/
#define VERSION_MAJOR 0
#define VERSION_MINOR 36
#define _STR(s) #s
#define _DRIVER_VERSION(a, b) "v" _STR(a) "." _STR(b)
#define DRIVER_VERSION _DRIVER_VERSION(VERSION_MAJOR, VERSION_MINOR)
#define DRIVER_AUTHOR "hermann kneissel"
#define DRIVER_DESC "garmin gps driver"
/* error codes returned by the driver */
#define EINVPKT 1000 /* invalid packet structure */
/* size of the header of a packet using the usb protocol */
#define GARMIN_PKTHDR_LENGTH 12
/* max. possible size of a packet using the serial protocol */
#define MAX_SERIAL_PKT_SIZ (3 + 255 + 3)
/* max. possible size of a packet with worst case stuffing */
#define MAX_SERIAL_PKT_SIZ_STUFFED (MAX_SERIAL_PKT_SIZ + 256)
/* size of a buffer able to hold a complete (no stuffing) packet
* (the document protocol does not contain packets with a larger
* size, but in theory a packet may be 64k+12 bytes - if in
* later protocol versions larger packet sizes occur, this value
* should be increased accordingly, so the input buffer is always
* large enough the store a complete packet inclusive header) */
#define GPS_IN_BUFSIZ (GARMIN_PKTHDR_LENGTH+MAX_SERIAL_PKT_SIZ)
/* size of a buffer able to hold a complete (incl. stuffing) packet */
#define GPS_OUT_BUFSIZ (GARMIN_PKTHDR_LENGTH+MAX_SERIAL_PKT_SIZ_STUFFED)
/* where to place the packet id of a serial packet, so we can
* prepend the usb-packet header without the need to move the
* packets data */
#define GSP_INITIAL_OFFSET (GARMIN_PKTHDR_LENGTH-2)
/* max. size of incoming private packets (header+1 param) */
#define PRIVPKTSIZ (GARMIN_PKTHDR_LENGTH+4)
#define GARMIN_LAYERID_TRANSPORT 0
#define GARMIN_LAYERID_APPL 20
/* our own layer-id to use for some control mechanisms */
#define GARMIN_LAYERID_PRIVATE 0x01106E4B
#define GARMIN_PKTID_PVT_DATA 51
#define GARMIN_PKTID_L001_COMMAND_DATA 10
#define CMND_ABORT_TRANSFER 0
/* packet ids used in private layer */
#define PRIV_PKTID_SET_DEBUG 1
#define PRIV_PKTID_SET_MODE 2
#define PRIV_PKTID_INFO_REQ 3
#define PRIV_PKTID_INFO_RESP 4
#define PRIV_PKTID_RESET_REQ 5
#define PRIV_PKTID_SET_DEF_MODE 6
#define ETX 0x03
#define DLE 0x10
#define ACK 0x06
#define NAK 0x15
/* structure used to queue incoming packets */
struct garmin_packet {
struct list_head list;
int seq;
/* the real size of the data array, always > 0 */
int size;
__u8 data[1];
};
/* structure used to keep the current state of the driver */
struct garmin_data {
__u8 state;
__u16 flags;
__u8 mode;
__u8 count;
__u8 pkt_id;
__u32 serial_num;
struct timer_list timer;
struct usb_serial_port *port;
int seq_counter;
int insize;
int outsize;
__u8 inbuffer [GPS_IN_BUFSIZ]; /* tty -> usb */
__u8 outbuffer[GPS_OUT_BUFSIZ]; /* usb -> tty */
__u8 privpkt[4*6];
spinlock_t lock;
struct list_head pktlist;
};
#define STATE_NEW 0
#define STATE_INITIAL_DELAY 1
#define STATE_TIMEOUT 2
#define STATE_SESSION_REQ1 3
#define STATE_SESSION_REQ2 4
#define STATE_ACTIVE 5
#define STATE_RESET 8
#define STATE_DISCONNECTED 9
#define STATE_WAIT_TTY_ACK 10
#define STATE_GSP_WAIT_DATA 11
#define MODE_NATIVE 0
#define MODE_GARMIN_SERIAL 1
/* Flags used in garmin_data.flags: */
#define FLAGS_SESSION_REPLY_MASK 0x00C0
#define FLAGS_SESSION_REPLY1_SEEN 0x0080
#define FLAGS_SESSION_REPLY2_SEEN 0x0040
#define FLAGS_BULK_IN_ACTIVE 0x0020
#define FLAGS_BULK_IN_RESTART 0x0010
#define FLAGS_THROTTLED 0x0008
#define APP_REQ_SEEN 0x0004
#define APP_RESP_SEEN 0x0002
#define CLEAR_HALT_REQUIRED 0x0001
#define FLAGS_QUEUING 0x0100
#define FLAGS_DROP_DATA 0x0800
#define FLAGS_GSP_SKIP 0x1000
#define FLAGS_GSP_DLESEEN 0x2000
/* function prototypes */
static int gsp_next_packet(struct garmin_data *garmin_data_p);
static int garmin_write_bulk(struct usb_serial_port *port,
const unsigned char *buf, int count,
int dismiss_ack);
/* some special packets to be send or received */
static unsigned char const GARMIN_START_SESSION_REQ[]
= { 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0 };
static unsigned char const GARMIN_START_SESSION_REPLY[]
= { 0, 0, 0, 0, 6, 0, 0, 0, 4, 0, 0, 0 };
static unsigned char const GARMIN_BULK_IN_AVAIL_REPLY[]
= { 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 };
static unsigned char const GARMIN_APP_LAYER_REPLY[]
= { 0x14, 0, 0, 0 };
static unsigned char const GARMIN_START_PVT_REQ[]
= { 20, 0, 0, 0, 10, 0, 0, 0, 2, 0, 0, 0, 49, 0 };
static unsigned char const GARMIN_STOP_PVT_REQ[]
= { 20, 0, 0, 0, 10, 0, 0, 0, 2, 0, 0, 0, 50, 0 };
static unsigned char const GARMIN_STOP_TRANSFER_REQ[]
= { 20, 0, 0, 0, 10, 0, 0, 0, 2, 0, 0, 0, 0, 0 };
static unsigned char const GARMIN_STOP_TRANSFER_REQ_V2[]
= { 20, 0, 0, 0, 10, 0, 0, 0, 1, 0, 0, 0, 0 };
static unsigned char const PRIVATE_REQ[]
= { 0x4B, 0x6E, 0x10, 0x01, 0xFF, 0, 0, 0, 0xFF, 0, 0, 0 };
static const struct usb_device_id id_table[] = {
/* the same device id seems to be used by all
usb enabled GPS devices */
{ USB_DEVICE(GARMIN_VENDOR_ID, 3) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_driver garmin_driver = {
.name = "garmin_gps",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
.no_dynamic_id = 1,
};
static inline int getLayerId(const __u8 *usbPacket)
{
return __le32_to_cpup((__le32 *)(usbPacket));
}
static inline int getPacketId(const __u8 *usbPacket)
{
return __le32_to_cpup((__le32 *)(usbPacket+4));
}
static inline int getDataLength(const __u8 *usbPacket)
{
return __le32_to_cpup((__le32 *)(usbPacket+8));
}
/*
* check if the usb-packet in buf contains an abort-transfer command.
* (if yes, all queued data will be dropped)
*/
static inline int isAbortTrfCmnd(const unsigned char *buf)
{
if (0 == memcmp(buf, GARMIN_STOP_TRANSFER_REQ,
sizeof(GARMIN_STOP_TRANSFER_REQ)) ||
0 == memcmp(buf, GARMIN_STOP_TRANSFER_REQ_V2,
sizeof(GARMIN_STOP_TRANSFER_REQ_V2)))
return 1;
else
return 0;
}
static void send_to_tty(struct usb_serial_port *port,
char *data, unsigned int actual_length)
{
struct tty_struct *tty = tty_port_tty_get(&port->port);
if (tty && actual_length) {
usb_serial_debug_data(debug, &port->dev,
__func__, actual_length, data);
tty_insert_flip_string(tty, data, actual_length);
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
}
/******************************************************************************
* packet queue handling
******************************************************************************/
/*
* queue a received (usb-)packet for later processing
*/
static int pkt_add(struct garmin_data *garmin_data_p,
unsigned char *data, unsigned int data_length)
{
int state = 0;
int result = 0;
unsigned long flags;
struct garmin_packet *pkt;
/* process only packets containg data ... */
if (data_length) {
pkt = kmalloc(sizeof(struct garmin_packet)+data_length,
GFP_ATOMIC);
if (pkt == NULL) {
dev_err(&garmin_data_p->port->dev, "out of memory\n");
return 0;
}
pkt->size = data_length;
memcpy(pkt->data, data, data_length);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_QUEUING;
result = list_empty(&garmin_data_p->pktlist);
pkt->seq = garmin_data_p->seq_counter++;
list_add_tail(&pkt->list, &garmin_data_p->pktlist);
state = garmin_data_p->state;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
dbg("%s - added: pkt: %d - %d bytes",
__func__, pkt->seq, data_length);
/* in serial mode, if someone is waiting for data from
the device, convert and send the next packet to tty. */
if (result && (state == STATE_GSP_WAIT_DATA))
gsp_next_packet(garmin_data_p);
}
return result;
}
/* get the next pending packet */
static struct garmin_packet *pkt_pop(struct garmin_data *garmin_data_p)
{
unsigned long flags;
struct garmin_packet *result = NULL;
spin_lock_irqsave(&garmin_data_p->lock, flags);
if (!list_empty(&garmin_data_p->pktlist)) {
result = (struct garmin_packet *)garmin_data_p->pktlist.next;
list_del(&result->list);
}
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
return result;
}
/* free up all queued data */
static void pkt_clear(struct garmin_data *garmin_data_p)
{
unsigned long flags;
struct garmin_packet *result = NULL;
dbg("%s", __func__);
spin_lock_irqsave(&garmin_data_p->lock, flags);
while (!list_empty(&garmin_data_p->pktlist)) {
result = (struct garmin_packet *)garmin_data_p->pktlist.next;
list_del(&result->list);
kfree(result);
}
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
}
/******************************************************************************
* garmin serial protocol handling handling
******************************************************************************/
/* send an ack packet back to the tty */
static int gsp_send_ack(struct garmin_data *garmin_data_p, __u8 pkt_id)
{
__u8 pkt[10];
__u8 cksum = 0;
__u8 *ptr = pkt;
unsigned l = 0;
dbg("%s - pkt-id: 0x%X.", __func__, 0xFF & pkt_id);
*ptr++ = DLE;
*ptr++ = ACK;
cksum += ACK;
*ptr++ = 2;
cksum += 2;
*ptr++ = pkt_id;
cksum += pkt_id;
if (pkt_id == DLE)
*ptr++ = DLE;
*ptr++ = 0;
*ptr++ = 0xFF & (-cksum);
*ptr++ = DLE;
*ptr++ = ETX;
l = ptr-pkt;
send_to_tty(garmin_data_p->port, pkt, l);
return 0;
}
/*
* called for a complete packet received from tty layer
*
* the complete packet (pktid ... cksum) is in garmin_data_p->inbuf starting
* at GSP_INITIAL_OFFSET.
*
* count - number of bytes in the input buffer including space reserved for
* the usb header: GSP_INITIAL_OFFSET + number of bytes in packet
* (including pkt-id, data-length a. cksum)
*/
static int gsp_rec_packet(struct garmin_data *garmin_data_p, int count)
{
unsigned long flags;
const __u8 *recpkt = garmin_data_p->inbuffer+GSP_INITIAL_OFFSET;
__le32 *usbdata = (__le32 *) garmin_data_p->inbuffer;
int cksum = 0;
int n = 0;
int pktid = recpkt[0];
int size = recpkt[1];
usb_serial_debug_data(debug, &garmin_data_p->port->dev,
__func__, count-GSP_INITIAL_OFFSET, recpkt);
if (size != (count-GSP_INITIAL_OFFSET-3)) {
dbg("%s - invalid size, expected %d bytes, got %d",
__func__, size, (count-GSP_INITIAL_OFFSET-3));
return -EINVPKT;
}
cksum += *recpkt++;
cksum += *recpkt++;
/* sanity check, remove after test ... */
if ((__u8 *)&(usbdata[3]) != recpkt) {
dbg("%s - ptr mismatch %p - %p",
__func__, &(usbdata[4]), recpkt);
return -EINVPKT;
}
while (n < size) {
cksum += *recpkt++;
n++;
}
if ((0xff & (cksum + *recpkt)) != 0) {
dbg("%s - invalid checksum, expected %02x, got %02x",
__func__, 0xff & -cksum, 0xff & *recpkt);
return -EINVPKT;
}
usbdata[0] = __cpu_to_le32(GARMIN_LAYERID_APPL);
usbdata[1] = __cpu_to_le32(pktid);
usbdata[2] = __cpu_to_le32(size);
garmin_write_bulk(garmin_data_p->port, garmin_data_p->inbuffer,
GARMIN_PKTHDR_LENGTH+size, 0);
/* if this was an abort-transfer command, flush all
queued data. */
if (isAbortTrfCmnd(garmin_data_p->inbuffer)) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_DROP_DATA;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
pkt_clear(garmin_data_p);
}
return count;
}
/*
* Called for data received from tty
*
* buf contains the data read, it may span more than one packet or even
* incomplete packets
*
* input record should be a serial-record, but it may not be complete.
* Copy it into our local buffer, until an etx is seen (or an error
* occurs).
* Once the record is complete, convert into a usb packet and send it
* to the bulk pipe, send an ack back to the tty.
*
* If the input is an ack, just send the last queued packet to the
* tty layer.
*
* if the input is an abort command, drop all queued data.
*/
static int gsp_receive(struct garmin_data *garmin_data_p,
const unsigned char *buf, int count)
{
unsigned long flags;
int offs = 0;
int ack_or_nak_seen = 0;
__u8 *dest;
int size;
/* dleSeen: set if last byte read was a DLE */
int dleSeen;
/* skip: if set, skip incoming data until possible start of
* new packet
*/
int skip;
__u8 data;
spin_lock_irqsave(&garmin_data_p->lock, flags);
dest = garmin_data_p->inbuffer;
size = garmin_data_p->insize;
dleSeen = garmin_data_p->flags & FLAGS_GSP_DLESEEN;
skip = garmin_data_p->flags & FLAGS_GSP_SKIP;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
/* dbg("%s - dle=%d skip=%d size=%d count=%d",
__func__, dleSeen, skip, size, count); */
if (size == 0)
size = GSP_INITIAL_OFFSET;
while (offs < count) {
data = *(buf+offs);
offs++;
if (data == DLE) {
if (skip) { /* start of a new pkt */
skip = 0;
size = GSP_INITIAL_OFFSET;
dleSeen = 1;
} else if (dleSeen) {
dest[size++] = data;
dleSeen = 0;
} else {
dleSeen = 1;
}
} else if (data == ETX) {
if (dleSeen) {
/* packet complete */
data = dest[GSP_INITIAL_OFFSET];
if (data == ACK) {
ack_or_nak_seen = ACK;
dbg("ACK packet complete.");
} else if (data == NAK) {
ack_or_nak_seen = NAK;
dbg("NAK packet complete.");
} else {
dbg("packet complete - id=0x%X.",
0xFF & data);
gsp_rec_packet(garmin_data_p, size);
}
skip = 1;
size = GSP_INITIAL_OFFSET;
dleSeen = 0;
} else {
dest[size++] = data;
}
} else if (!skip) {
if (dleSeen) {
size = GSP_INITIAL_OFFSET;
dleSeen = 0;
}
dest[size++] = data;
}
if (size >= GPS_IN_BUFSIZ) {
dbg("%s - packet too large.", __func__);
skip = 1;
size = GSP_INITIAL_OFFSET;
dleSeen = 0;
}
}
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->insize = size;
/* copy flags back to structure */
if (skip)
garmin_data_p->flags |= FLAGS_GSP_SKIP;
else
garmin_data_p->flags &= ~FLAGS_GSP_SKIP;
if (dleSeen)
garmin_data_p->flags |= FLAGS_GSP_DLESEEN;
else
garmin_data_p->flags &= ~FLAGS_GSP_DLESEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
if (ack_or_nak_seen) {
if (gsp_next_packet(garmin_data_p) > 0)
garmin_data_p->state = STATE_ACTIVE;
else
garmin_data_p->state = STATE_GSP_WAIT_DATA;
}
return count;
}
/*
* Sends a usb packet to the tty
*
* Assumes, that all packages and at an usb-packet boundary.
*
* return <0 on error, 0 if packet is incomplete or > 0 if packet was sent
*/
static int gsp_send(struct garmin_data *garmin_data_p,
const unsigned char *buf, int count)
{
const unsigned char *src;
unsigned char *dst;
int pktid = 0;
int datalen = 0;
int cksum = 0;
int i = 0;
int k;
dbg("%s - state %d - %d bytes.", __func__,
garmin_data_p->state, count);
k = garmin_data_p->outsize;
if ((k+count) > GPS_OUT_BUFSIZ) {
dbg("packet too large");
garmin_data_p->outsize = 0;
return -4;
}
memcpy(garmin_data_p->outbuffer+k, buf, count);
k += count;
garmin_data_p->outsize = k;
if (k >= GARMIN_PKTHDR_LENGTH) {
pktid = getPacketId(garmin_data_p->outbuffer);
datalen = getDataLength(garmin_data_p->outbuffer);
i = GARMIN_PKTHDR_LENGTH + datalen;
if (k < i)
return 0;
} else {
return 0;
}
dbg("%s - %d bytes in buffer, %d bytes in pkt.", __func__, k, i);
/* garmin_data_p->outbuffer now contains a complete packet */
usb_serial_debug_data(debug, &garmin_data_p->port->dev,
__func__, k, garmin_data_p->outbuffer);
garmin_data_p->outsize = 0;
if (GARMIN_LAYERID_APPL != getLayerId(garmin_data_p->outbuffer)) {
dbg("not an application packet (%d)",
getLayerId(garmin_data_p->outbuffer));
return -1;
}
if (pktid > 255) {
dbg("packet-id %d too large", pktid);
return -2;
}
if (datalen > 255) {
dbg("packet-size %d too large", datalen);
return -3;
}
/* the serial protocol should be able to handle this packet */
k = 0;
src = garmin_data_p->outbuffer+GARMIN_PKTHDR_LENGTH;
for (i = 0; i < datalen; i++) {
if (*src++ == DLE)
k++;
}
src = garmin_data_p->outbuffer+GARMIN_PKTHDR_LENGTH;
if (k > (GARMIN_PKTHDR_LENGTH-2)) {
/* can't add stuffing DLEs in place, move data to end
of buffer ... */
dst = garmin_data_p->outbuffer+GPS_OUT_BUFSIZ-datalen;
memcpy(dst, src, datalen);
src = dst;
}
dst = garmin_data_p->outbuffer;
*dst++ = DLE;
*dst++ = pktid;
cksum += pktid;
*dst++ = datalen;
cksum += datalen;
if (datalen == DLE)
*dst++ = DLE;
for (i = 0; i < datalen; i++) {
__u8 c = *src++;
*dst++ = c;
cksum += c;
if (c == DLE)
*dst++ = DLE;
}
cksum = 0xFF & -cksum;
*dst++ = cksum;
if (cksum == DLE)
*dst++ = DLE;
*dst++ = DLE;
*dst++ = ETX;
i = dst-garmin_data_p->outbuffer;
send_to_tty(garmin_data_p->port, garmin_data_p->outbuffer, i);
garmin_data_p->pkt_id = pktid;
garmin_data_p->state = STATE_WAIT_TTY_ACK;
return i;
}
/*
* Process the next pending data packet - if there is one
*/
static int gsp_next_packet(struct garmin_data *garmin_data_p)
{
int result = 0;
struct garmin_packet *pkt = NULL;
while ((pkt = pkt_pop(garmin_data_p)) != NULL) {
dbg("%s - next pkt: %d", __func__, pkt->seq);
result = gsp_send(garmin_data_p, pkt->data, pkt->size);
if (result > 0) {
kfree(pkt);
return result;
}
kfree(pkt);
}
return result;
}
/******************************************************************************
* garmin native mode
******************************************************************************/
/*
* Called for data received from tty
*
* The input data is expected to be in garmin usb-packet format.
*
* buf contains the data read, it may span more than one packet
* or even incomplete packets
*/
static int nat_receive(struct garmin_data *garmin_data_p,
const unsigned char *buf, int count)
{
unsigned long flags;
__u8 *dest;
int offs = 0;
int result = count;
int len;
while (offs < count) {
/* if buffer contains header, copy rest of data */
if (garmin_data_p->insize >= GARMIN_PKTHDR_LENGTH)
len = GARMIN_PKTHDR_LENGTH
+getDataLength(garmin_data_p->inbuffer);
else
len = GARMIN_PKTHDR_LENGTH;
if (len >= GPS_IN_BUFSIZ) {
/* seems to be an invalid packet, ignore rest
of input */
dbg("%s - packet size too large: %d", __func__, len);
garmin_data_p->insize = 0;
count = 0;
result = -EINVPKT;
} else {
len -= garmin_data_p->insize;
if (len > (count-offs))
len = (count-offs);
if (len > 0) {
dest = garmin_data_p->inbuffer
+ garmin_data_p->insize;
memcpy(dest, buf+offs, len);
garmin_data_p->insize += len;
offs += len;
}
}
/* do we have a complete packet ? */
if (garmin_data_p->insize >= GARMIN_PKTHDR_LENGTH) {
len = GARMIN_PKTHDR_LENGTH+
getDataLength(garmin_data_p->inbuffer);
if (garmin_data_p->insize >= len) {
garmin_write_bulk(garmin_data_p->port,
garmin_data_p->inbuffer,
len, 0);
garmin_data_p->insize = 0;
/* if this was an abort-transfer command,
flush all queued data. */
if (isAbortTrfCmnd(garmin_data_p->inbuffer)) {
spin_lock_irqsave(&garmin_data_p->lock,
flags);
garmin_data_p->flags |= FLAGS_DROP_DATA;
spin_unlock_irqrestore(
&garmin_data_p->lock, flags);
pkt_clear(garmin_data_p);
}
}
}
}
return result;
}
/******************************************************************************
* private packets
******************************************************************************/
static void priv_status_resp(struct usb_serial_port *port)
{
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
__le32 *pkt = (__le32 *)garmin_data_p->privpkt;
pkt[0] = __cpu_to_le32(GARMIN_LAYERID_PRIVATE);
pkt[1] = __cpu_to_le32(PRIV_PKTID_INFO_RESP);
pkt[2] = __cpu_to_le32(12);
pkt[3] = __cpu_to_le32(VERSION_MAJOR << 16 | VERSION_MINOR);
pkt[4] = __cpu_to_le32(garmin_data_p->mode);
pkt[5] = __cpu_to_le32(garmin_data_p->serial_num);
send_to_tty(port, (__u8 *)pkt, 6 * 4);
}
/******************************************************************************
* Garmin specific driver functions
******************************************************************************/
static int process_resetdev_request(struct usb_serial_port *port)
{
unsigned long flags;
int status;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~(CLEAR_HALT_REQUIRED);
garmin_data_p->state = STATE_RESET;
garmin_data_p->serial_num = 0;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
usb_kill_urb(port->interrupt_in_urb);
dbg("%s - usb_reset_device", __func__);
status = usb_reset_device(port->serial->dev);
if (status)
dbg("%s - usb_reset_device failed: %d",
__func__, status);
return status;
}
/*
* clear all cached data
*/
static int garmin_clear(struct garmin_data *garmin_data_p)
{
unsigned long flags;
int status = 0;
/* flush all queued data */
pkt_clear(garmin_data_p);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->insize = 0;
garmin_data_p->outsize = 0;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
return status;
}
static int garmin_init_session(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
int status = 0;
int i = 0;
if (status == 0) {
usb_kill_urb(port->interrupt_in_urb);
dbg("%s - adding interrupt input", __func__);
port->interrupt_in_urb->dev = serial->dev;
status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (status)
dev_err(&serial->dev->dev,
"%s - failed submitting interrupt urb, error %d\n",
__func__, status);
}
/*
* using the initialization method from gpsbabel. See comments in
* gpsbabel/jeeps/gpslibusb.c gusb_reset_toggles()
*/
if (status == 0) {
dbg("%s - starting session ...", __func__);
garmin_data_p->state = STATE_ACTIVE;
for (i = 0; i < 3; i++) {
status = garmin_write_bulk(port,
GARMIN_START_SESSION_REQ,
sizeof(GARMIN_START_SESSION_REQ), 0);
if (status < 0)
break;
}
if (status > 0)
status = 0;
}
return status;
}
static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port)
{
unsigned long flags;
int status = 0;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
dbg("%s - port %d", __func__, port->number);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->mode = initial_mode;
garmin_data_p->count = 0;
garmin_data_p->flags &= FLAGS_SESSION_REPLY1_SEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
/* shutdown any bulk reads that might be going on */
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
if (garmin_data_p->state == STATE_RESET)
status = garmin_init_session(port);
garmin_data_p->state = STATE_ACTIVE;
return status;
}
static void garmin_close(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
dbg("%s - port %d - mode=%d state=%d flags=0x%X", __func__,
port->number, garmin_data_p->mode,
garmin_data_p->state, garmin_data_p->flags);
if (!serial)
return;
garmin_clear(garmin_data_p);
/* shutdown our urbs */
usb_kill_urb(port->read_urb);
usb_kill_urb(port->write_urb);
/* keep reset state so we know that we must start a new session */
if (garmin_data_p->state != STATE_RESET)
garmin_data_p->state = STATE_DISCONNECTED;
}
static void garmin_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
if (port) {
struct garmin_data *garmin_data_p =
usb_get_serial_port_data(port);
dbg("%s - port %d", __func__, port->number);
if (GARMIN_LAYERID_APPL == getLayerId(urb->transfer_buffer)) {
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
gsp_send_ack(garmin_data_p,
((__u8 *)urb->transfer_buffer)[4]);
}
}
usb_serial_port_softint(port);
}
/* Ignore errors that resulted from garmin_write_bulk with
dismiss_ack = 1 */
/* free up the transfer buffer, as usb_free_urb() does not do this */
kfree(urb->transfer_buffer);
}
static int garmin_write_bulk(struct usb_serial_port *port,
const unsigned char *buf, int count,
int dismiss_ack)
{
unsigned long flags;
struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
struct urb *urb;
unsigned char *buffer;
int status;
dbg("%s - port %d, state %d", __func__, port->number,
garmin_data_p->state);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~FLAGS_DROP_DATA;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
buffer = kmalloc(count, GFP_ATOMIC);
if (!buffer) {
dev_err(&port->dev, "out of memory\n");
return -ENOMEM;
}
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
dev_err(&port->dev, "no more free urbs\n");
kfree(buffer);
return -ENOMEM;
}
memcpy(buffer, buf, count);
usb_serial_debug_data(debug, &port->dev, __func__, count, buffer);
usb_fill_bulk_urb(urb, serial->dev,
usb_sndbulkpipe(serial->dev,
port->bulk_out_endpointAddress),
buffer, count,
garmin_write_bulk_callback,
dismiss_ack ? NULL : port);
urb->transfer_flags |= URB_ZERO_PACKET;
if (GARMIN_LAYERID_APPL == getLayerId(buffer)) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= APP_REQ_SEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
pkt_clear(garmin_data_p);
garmin_data_p->state = STATE_GSP_WAIT_DATA;
}
}
/* send it down the pipe */
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
dev_err(&port->dev,
"%s - usb_submit_urb(write bulk) failed with status = %d\n",
__func__, status);
count = status;
}
/* we are done with this urb, so let the host driver
* really free it when it is finished with it */
usb_free_urb(urb);
return count;
}
static int garmin_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
int pktid, pktsiz, len;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
__le32 *privpkt = (__le32 *)garmin_data_p->privpkt;
usb_serial_debug_data(debug, &port->dev, __func__, count, buf);
if (garmin_data_p->state == STATE_RESET)
return -EIO;
/* check for our private packets */
if (count >= GARMIN_PKTHDR_LENGTH) {
len = PRIVPKTSIZ;
if (count < len)
len = count;
memcpy(garmin_data_p->privpkt, buf, len);
pktsiz = getDataLength(garmin_data_p->privpkt);
pktid = getPacketId(garmin_data_p->privpkt);
if (count == (GARMIN_PKTHDR_LENGTH+pktsiz)
&& GARMIN_LAYERID_PRIVATE ==
getLayerId(garmin_data_p->privpkt)) {
dbg("%s - processing private request %d",
__func__, pktid);
/* drop all unfinished transfers */
garmin_clear(garmin_data_p);
switch (pktid) {
case PRIV_PKTID_SET_DEBUG:
if (pktsiz != 4)
return -EINVPKT;
debug = __le32_to_cpu(privpkt[3]);
dbg("%s - debug level set to 0x%X",
__func__, debug);
break;
case PRIV_PKTID_SET_MODE:
if (pktsiz != 4)
return -EINVPKT;
garmin_data_p->mode = __le32_to_cpu(privpkt[3]);
dbg("%s - mode set to %d",
__func__, garmin_data_p->mode);
break;
case PRIV_PKTID_INFO_REQ:
priv_status_resp(port);
break;
case PRIV_PKTID_RESET_REQ:
process_resetdev_request(port);
break;
case PRIV_PKTID_SET_DEF_MODE:
if (pktsiz != 4)
return -EINVPKT;
initial_mode = __le32_to_cpu(privpkt[3]);
dbg("%s - initial_mode set to %d",
__func__,
garmin_data_p->mode);
break;
}
return count;
}
}
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
return gsp_receive(garmin_data_p, buf, count);
} else { /* MODE_NATIVE */
return nat_receive(garmin_data_p, buf, count);
}
}
static int garmin_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
/*
* Report back the bytes currently available in the output buffer.
*/
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
return GPS_OUT_BUFSIZ-garmin_data_p->outsize;
}
static void garmin_read_process(struct garmin_data *garmin_data_p,
unsigned char *data, unsigned data_length,
int bulk_data)
{
unsigned long flags;
if (garmin_data_p->flags & FLAGS_DROP_DATA) {
/* abort-transfer cmd is actice */
dbg("%s - pkt dropped", __func__);
} else if (garmin_data_p->state != STATE_DISCONNECTED &&
garmin_data_p->state != STATE_RESET) {
/* if throttling is active or postprecessing is required
put the received data in the input queue, otherwise
send it directly to the tty port */
if (garmin_data_p->flags & FLAGS_QUEUING) {
pkt_add(garmin_data_p, data, data_length);
} else if (bulk_data ||
getLayerId(data) == GARMIN_LAYERID_APPL) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= APP_RESP_SEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
pkt_add(garmin_data_p, data, data_length);
} else {
send_to_tty(garmin_data_p->port, data,
data_length);
}
}
/* ignore system layer packets ... */
}
}
static void garmin_read_bulk_callback(struct urb *urb)
{
unsigned long flags;
struct usb_serial_port *port = urb->context;
struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
int retval;
dbg("%s - port %d", __func__, port->number);
if (!serial) {
dbg("%s - bad serial pointer, exiting", __func__);
return;
}
if (status) {
dbg("%s - nonzero read bulk status received: %d",
__func__, status);
return;
}
usb_serial_debug_data(debug, &port->dev,
__func__, urb->actual_length, data);
garmin_read_process(garmin_data_p, data, urb->actual_length, 1);
if (urb->actual_length == 0 &&
0 != (garmin_data_p->flags & FLAGS_BULK_IN_RESTART)) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~FLAGS_BULK_IN_RESTART;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
__func__, retval);
} else if (urb->actual_length > 0) {
/* Continue trying to read until nothing more is received */
if (0 == (garmin_data_p->flags & FLAGS_THROTTLED)) {
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval)
dev_err(&port->dev,
"%s - failed resubmitting read urb, "
"error %d\n", __func__, retval);
}
} else {
dbg("%s - end of bulk data", __func__);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~FLAGS_BULK_IN_ACTIVE;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
}
}
static void garmin_read_int_callback(struct urb *urb)
{
unsigned long flags;
int retval;
struct usb_serial_port *port = urb->context;
struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
__func__, status);
return;
default:
dbg("%s - nonzero urb status received: %d",
__func__, status);
return;
}
usb_serial_debug_data(debug, &port->dev, __func__,
urb->actual_length, urb->transfer_buffer);
if (urb->actual_length == sizeof(GARMIN_BULK_IN_AVAIL_REPLY) &&
0 == memcmp(data, GARMIN_BULK_IN_AVAIL_REPLY,
sizeof(GARMIN_BULK_IN_AVAIL_REPLY))) {
dbg("%s - bulk data available.", __func__);
if (0 == (garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE)) {
/* bulk data available */
usb_fill_bulk_urb(port->read_urb, serial->dev,
usb_rcvbulkpipe(serial->dev,
port->bulk_in_endpointAddress),
port->read_urb->transfer_buffer,
port->read_urb->transfer_buffer_length,
garmin_read_bulk_callback, port);
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval) {
dev_err(&port->dev,
"%s - failed submitting read urb, error %d\n",
__func__, retval);
} else {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_BULK_IN_ACTIVE;
spin_unlock_irqrestore(&garmin_data_p->lock,
flags);
}
} else {
/* bulk-in transfer still active */
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_BULK_IN_RESTART;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
}
} else if (urb->actual_length == (4+sizeof(GARMIN_START_SESSION_REPLY))
&& 0 == memcmp(data, GARMIN_START_SESSION_REPLY,
sizeof(GARMIN_START_SESSION_REPLY))) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_SESSION_REPLY1_SEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
/* save the serial number */
garmin_data_p->serial_num = __le32_to_cpup(
(__le32 *)(data+GARMIN_PKTHDR_LENGTH));
dbg("%s - start-of-session reply seen - serial %u.",
__func__, garmin_data_p->serial_num);
}
garmin_read_process(garmin_data_p, data, urb->actual_length, 0);
port->interrupt_in_urb->dev = port->serial->dev;
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev,
"%s - Error %d submitting interrupt urb\n",
__func__, retval);
}
/*
* Sends the next queued packt to the tty port (garmin native mode only)
* and then sets a timer to call itself again until all queued data
* is sent.
*/
static int garmin_flush_queue(struct garmin_data *garmin_data_p)
{
unsigned long flags;
struct garmin_packet *pkt;
if ((garmin_data_p->flags & FLAGS_THROTTLED) == 0) {
pkt = pkt_pop(garmin_data_p);
if (pkt != NULL) {
send_to_tty(garmin_data_p->port, pkt->data, pkt->size);
kfree(pkt);
mod_timer(&garmin_data_p->timer, (1)+jiffies);
} else {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~FLAGS_QUEUING;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
}
}
return 0;
}
static void garmin_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
dbg("%s - port %d", __func__, port->number);
/* set flag, data received will be put into a queue
for later processing */
spin_lock_irq(&garmin_data_p->lock);
garmin_data_p->flags |= FLAGS_QUEUING|FLAGS_THROTTLED;
spin_unlock_irq(&garmin_data_p->lock);
}
static void garmin_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
int status;
dbg("%s - port %d", __func__, port->number);
spin_lock_irq(&garmin_data_p->lock);
garmin_data_p->flags &= ~FLAGS_THROTTLED;
spin_unlock_irq(&garmin_data_p->lock);
/* in native mode send queued data to tty, in
serial mode nothing needs to be done here */
if (garmin_data_p->mode == MODE_NATIVE)
garmin_flush_queue(garmin_data_p);
if (0 != (garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE)) {
status = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (status)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
__func__, status);
}
}
/*
* The timer is currently only used to send queued packets to
* the tty in cases where the protocol provides no own handshaking
* to initiate the transfer.
*/
static void timeout_handler(unsigned long data)
{
struct garmin_data *garmin_data_p = (struct garmin_data *) data;
/* send the next queued packet to the tty port */
if (garmin_data_p->mode == MODE_NATIVE)
if (garmin_data_p->flags & FLAGS_QUEUING)
garmin_flush_queue(garmin_data_p);
}
static int garmin_attach(struct usb_serial *serial)
{
int status = 0;
struct usb_serial_port *port = serial->port[0];
struct garmin_data *garmin_data_p = NULL;
dbg("%s", __func__);
garmin_data_p = kzalloc(sizeof(struct garmin_data), GFP_KERNEL);
if (garmin_data_p == NULL) {
dev_err(&port->dev, "%s - Out of memory\n", __func__);
return -ENOMEM;
}
init_timer(&garmin_data_p->timer);
spin_lock_init(&garmin_data_p->lock);
INIT_LIST_HEAD(&garmin_data_p->pktlist);
/* garmin_data_p->timer.expires = jiffies + session_timeout; */
garmin_data_p->timer.data = (unsigned long)garmin_data_p;
garmin_data_p->timer.function = timeout_handler;
garmin_data_p->port = port;
garmin_data_p->state = 0;
garmin_data_p->flags = 0;
garmin_data_p->count = 0;
usb_set_serial_port_data(port, garmin_data_p);
status = garmin_init_session(port);
return status;
}
static void garmin_disconnect(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
dbg("%s", __func__);
usb_kill_urb(port->interrupt_in_urb);
del_timer_sync(&garmin_data_p->timer);
}
static void garmin_release(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
dbg("%s", __func__);
kfree(garmin_data_p);
}
/* All of the device info needed */
static struct usb_serial_driver garmin_device = {
.driver = {
.owner = THIS_MODULE,
.name = "garmin_gps",
},
.description = "Garmin GPS usb/tty",
.usb_driver = &garmin_driver,
.id_table = id_table,
.num_ports = 1,
.open = garmin_open,
.close = garmin_close,
.throttle = garmin_throttle,
.unthrottle = garmin_unthrottle,
.attach = garmin_attach,
.disconnect = garmin_disconnect,
.release = garmin_release,
.write = garmin_write,
.write_room = garmin_write_room,
.write_bulk_callback = garmin_write_bulk_callback,
.read_bulk_callback = garmin_read_bulk_callback,
.read_int_callback = garmin_read_int_callback,
};
static int __init garmin_init(void)
{
int retval;
retval = usb_serial_register(&garmin_device);
if (retval)
goto failed_garmin_register;
retval = usb_register(&garmin_driver);
if (retval)
goto failed_usb_register;
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
return 0;
failed_usb_register:
usb_serial_deregister(&garmin_device);
failed_garmin_register:
return retval;
}
static void __exit garmin_exit(void)
{
usb_deregister(&garmin_driver);
usb_serial_deregister(&garmin_device);
}
module_init(garmin_init);
module_exit(garmin_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(debug, bool, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(debug, "Debug enabled or not");
module_param(initial_mode, int, S_IRUGO);
MODULE_PARM_DESC(initial_mode, "Initial mode");
| gpl-2.0 |
gdyuldin/huawei_u8850_kernel_ics | arch/mips/loongson/common/machtype.c | 908 | 1725 | /*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
* Copyright (c) 2009 Zhang Le <r0bertz@gentoo.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/errno.h>
#include <asm/bootinfo.h>
#include <loongson.h>
#include <machine.h>
/* please ensure the length of the machtype string is less than 50 */
#define MACHTYPE_LEN 50
static const char *system_types[] = {
[MACH_LOONGSON_UNKNOWN] "unknown loongson machine",
[MACH_LEMOTE_FL2E] "lemote-fuloong-2e-box",
[MACH_LEMOTE_FL2F] "lemote-fuloong-2f-box",
[MACH_LEMOTE_ML2F7] "lemote-mengloong-2f-7inches",
[MACH_LEMOTE_YL2F89] "lemote-yeeloong-2f-8.9inches",
[MACH_DEXXON_GDIUM2F10] "dexxon-gdium-2f",
[MACH_LEMOTE_NAS] "lemote-nas-2f",
[MACH_LEMOTE_LL2F] "lemote-lynloong-2f",
[MACH_LOONGSON_END] NULL,
};
const char *get_system_type(void)
{
return system_types[mips_machtype];
}
void __weak __init mach_prom_init_machtype(void)
{
}
void __init prom_init_machtype(void)
{
char *p, str[MACHTYPE_LEN];
int machtype = MACH_LEMOTE_FL2E;
mips_machtype = LOONGSON_MACHTYPE;
p = strstr(arcs_cmdline, "machtype=");
if (!p) {
mach_prom_init_machtype();
return;
}
p += strlen("machtype=");
strncpy(str, p, MACHTYPE_LEN);
p = strstr(str, " ");
if (p)
*p = '\0';
for (; system_types[machtype]; machtype++)
if (strstr(system_types[machtype], str)) {
mips_machtype = machtype;
break;
}
}
| gpl-2.0 |
blueskycoco/d2 | arch/arm/mach-clps711x/ceiva.c | 1676 | 1824 | /*
* linux/arch/arm/mach-clps711x/arch-ceiva.c
*
* Copyright (C) 2002, Rob Scott <rscott@mtrob.fdns.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/string.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <linux/kernel.h>
#include <mach/hardware.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/sizes.h>
#include <asm/mach/map.h>
#include "common.h"
static struct map_desc ceiva_io_desc[] __initdata = {
/* SED1355 controlled video RAM & registers */
{
.virtual = CEIVA_VIRT_SED1355,
.pfn = __phys_to_pfn(CEIVA_PHYS_SED1355),
.length = SZ_2M,
.type = MT_DEVICE
}
};
static void __init ceiva_map_io(void)
{
clps711x_map_io();
iotable_init(ceiva_io_desc, ARRAY_SIZE(ceiva_io_desc));
}
MACHINE_START(CEIVA, "CEIVA/Polaroid Photo MAX Digital Picture Frame")
/* Maintainer: Rob Scott */
.phys_io = 0x80000000,
.io_pg_offst = ((0xff000000) >> 18) & 0xfffc,
.boot_params = 0xc0000100,
.map_io = ceiva_map_io,
.init_irq = clps711x_init_irq,
.timer = &clps711x_timer,
MACHINE_END
| gpl-2.0 |
jaej-dev/Kernel_Unico | arch/blackfin/kernel/bfin_gpio.c | 2188 | 30914 | /*
* GPIO Abstraction Layer
*
* Copyright 2006-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/blackfin.h>
#include <asm/gpio.h>
#include <asm/portmux.h>
#include <linux/irq.h>
#include <asm/irq_handler.h>
#if ANOMALY_05000311 || ANOMALY_05000323
enum {
AWA_data = SYSCR,
AWA_data_clear = SYSCR,
AWA_data_set = SYSCR,
AWA_toggle = SYSCR,
AWA_maska = BFIN_UART_SCR,
AWA_maska_clear = BFIN_UART_SCR,
AWA_maska_set = BFIN_UART_SCR,
AWA_maska_toggle = BFIN_UART_SCR,
AWA_maskb = BFIN_UART_GCTL,
AWA_maskb_clear = BFIN_UART_GCTL,
AWA_maskb_set = BFIN_UART_GCTL,
AWA_maskb_toggle = BFIN_UART_GCTL,
AWA_dir = SPORT1_STAT,
AWA_polar = SPORT1_STAT,
AWA_edge = SPORT1_STAT,
AWA_both = SPORT1_STAT,
#if ANOMALY_05000311
AWA_inen = TIMER_ENABLE,
#elif ANOMALY_05000323
AWA_inen = DMA1_1_CONFIG,
#endif
};
/* Anomaly Workaround */
#define AWA_DUMMY_READ(name) bfin_read16(AWA_ ## name)
#else
#define AWA_DUMMY_READ(...) do { } while (0)
#endif
static struct gpio_port_t * const gpio_array[] = {
#if defined(BF533_FAMILY) || defined(BF538_FAMILY)
(struct gpio_port_t *) FIO_FLAG_D,
#elif defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x)
(struct gpio_port_t *) PORTFIO,
(struct gpio_port_t *) PORTGIO,
(struct gpio_port_t *) PORTHIO,
#elif defined(BF561_FAMILY)
(struct gpio_port_t *) FIO0_FLAG_D,
(struct gpio_port_t *) FIO1_FLAG_D,
(struct gpio_port_t *) FIO2_FLAG_D,
#elif defined(CONFIG_BF54x)
(struct gpio_port_t *)PORTA_FER,
(struct gpio_port_t *)PORTB_FER,
(struct gpio_port_t *)PORTC_FER,
(struct gpio_port_t *)PORTD_FER,
(struct gpio_port_t *)PORTE_FER,
(struct gpio_port_t *)PORTF_FER,
(struct gpio_port_t *)PORTG_FER,
(struct gpio_port_t *)PORTH_FER,
(struct gpio_port_t *)PORTI_FER,
(struct gpio_port_t *)PORTJ_FER,
#else
# error no gpio arrays defined
#endif
};
#if defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x)
static unsigned short * const port_fer[] = {
(unsigned short *) PORTF_FER,
(unsigned short *) PORTG_FER,
(unsigned short *) PORTH_FER,
};
# if !defined(BF537_FAMILY)
static unsigned short * const port_mux[] = {
(unsigned short *) PORTF_MUX,
(unsigned short *) PORTG_MUX,
(unsigned short *) PORTH_MUX,
};
static const
u8 pmux_offset[][16] = {
# if defined(CONFIG_BF52x)
{ 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 4, 6, 8, 8, 10, 10 }, /* PORTF */
{ 0, 0, 0, 0, 0, 2, 2, 4, 4, 6, 8, 10, 10, 10, 12, 12 }, /* PORTG */
{ 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 4, 4, 4, 4, 4 }, /* PORTH */
# elif defined(CONFIG_BF51x)
{ 0, 2, 2, 2, 2, 2, 2, 4, 6, 6, 6, 8, 8, 8, 8, 10 }, /* PORTF */
{ 0, 0, 0, 2, 4, 6, 6, 6, 8, 10, 10, 12, 14, 14, 14, 14 }, /* PORTG */
{ 0, 0, 0, 0, 2, 2, 4, 6, 10, 10, 10, 10, 10, 10, 10, 10 }, /* PORTH */
# endif
};
# endif
#elif defined(BF538_FAMILY)
static unsigned short * const port_fer[] = {
(unsigned short *) PORTCIO_FER,
(unsigned short *) PORTDIO_FER,
(unsigned short *) PORTEIO_FER,
};
#endif
#define RESOURCE_LABEL_SIZE 16
static struct str_ident {
char name[RESOURCE_LABEL_SIZE];
} str_ident[MAX_RESOURCES];
#if defined(CONFIG_PM)
static struct gpio_port_s gpio_bank_saved[GPIO_BANK_NUM];
#endif
static void gpio_error(unsigned gpio)
{
printk(KERN_ERR "bfin-gpio: GPIO %d wasn't requested!\n", gpio);
}
static void set_label(unsigned short ident, const char *label)
{
if (label) {
strncpy(str_ident[ident].name, label,
RESOURCE_LABEL_SIZE);
str_ident[ident].name[RESOURCE_LABEL_SIZE - 1] = 0;
}
}
static char *get_label(unsigned short ident)
{
return (*str_ident[ident].name ? str_ident[ident].name : "UNKNOWN");
}
static int cmp_label(unsigned short ident, const char *label)
{
if (label == NULL) {
dump_stack();
printk(KERN_ERR "Please provide none-null label\n");
}
if (label)
return strcmp(str_ident[ident].name, label);
else
return -EINVAL;
}
#define map_entry(m, i) reserved_##m##_map[gpio_bank(i)]
#define is_reserved(m, i, e) (map_entry(m, i) & gpio_bit(i))
#define reserve(m, i) (map_entry(m, i) |= gpio_bit(i))
#define unreserve(m, i) (map_entry(m, i) &= ~gpio_bit(i))
#define DECLARE_RESERVED_MAP(m, c) static unsigned short reserved_##m##_map[c]
DECLARE_RESERVED_MAP(gpio, GPIO_BANK_NUM);
DECLARE_RESERVED_MAP(peri, DIV_ROUND_UP(MAX_RESOURCES, GPIO_BANKSIZE));
DECLARE_RESERVED_MAP(gpio_irq, GPIO_BANK_NUM);
inline int check_gpio(unsigned gpio)
{
#if defined(CONFIG_BF54x)
if (gpio == GPIO_PB15 || gpio == GPIO_PC14 || gpio == GPIO_PC15
|| gpio == GPIO_PH14 || gpio == GPIO_PH15
|| gpio == GPIO_PJ14 || gpio == GPIO_PJ15)
return -EINVAL;
#endif
if (gpio >= MAX_BLACKFIN_GPIOS)
return -EINVAL;
return 0;
}
static void port_setup(unsigned gpio, unsigned short usage)
{
#if defined(BF538_FAMILY)
/*
* BF538/9 Port C,D and E are special.
* Inverted PORT_FER polarity on CDE and no PORF_FER on F
* Regular PORT F GPIOs are handled here, CDE are exclusively
* managed by GPIOLIB
*/
if (gpio < MAX_BLACKFIN_GPIOS || gpio >= MAX_RESOURCES)
return;
gpio -= MAX_BLACKFIN_GPIOS;
if (usage == GPIO_USAGE)
*port_fer[gpio_bank(gpio)] |= gpio_bit(gpio);
else
*port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio);
SSYNC();
return;
#endif
if (check_gpio(gpio))
return;
#if defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x)
if (usage == GPIO_USAGE)
*port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio);
else
*port_fer[gpio_bank(gpio)] |= gpio_bit(gpio);
SSYNC();
#elif defined(CONFIG_BF54x)
if (usage == GPIO_USAGE)
gpio_array[gpio_bank(gpio)]->port_fer &= ~gpio_bit(gpio);
else
gpio_array[gpio_bank(gpio)]->port_fer |= gpio_bit(gpio);
SSYNC();
#endif
}
#ifdef BF537_FAMILY
static const s8 port_mux[] = {
[GPIO_PF0] = 3,
[GPIO_PF1] = 3,
[GPIO_PF2] = 4,
[GPIO_PF3] = 4,
[GPIO_PF4] = 5,
[GPIO_PF5] = 6,
[GPIO_PF6] = 7,
[GPIO_PF7] = 8,
[GPIO_PF8 ... GPIO_PF15] = -1,
[GPIO_PG0 ... GPIO_PG7] = -1,
[GPIO_PG8] = 9,
[GPIO_PG9] = 9,
[GPIO_PG10] = 10,
[GPIO_PG11] = 10,
[GPIO_PG12] = 10,
[GPIO_PG13] = 11,
[GPIO_PG14] = 11,
[GPIO_PG15] = 11,
[GPIO_PH0 ... GPIO_PH15] = -1,
[PORT_PJ0 ... PORT_PJ3] = -1,
[PORT_PJ4] = 1,
[PORT_PJ5] = 1,
[PORT_PJ6 ... PORT_PJ9] = -1,
[PORT_PJ10] = 0,
[PORT_PJ11] = 0,
};
static int portmux_group_check(unsigned short per)
{
u16 ident = P_IDENT(per);
u16 function = P_FUNCT2MUX(per);
s8 offset = port_mux[ident];
u16 m, pmux, pfunc;
if (offset < 0)
return 0;
pmux = bfin_read_PORT_MUX();
for (m = 0; m < ARRAY_SIZE(port_mux); ++m) {
if (m == ident)
continue;
if (port_mux[m] != offset)
continue;
if (!is_reserved(peri, m, 1))
continue;
if (offset == 1)
pfunc = (pmux >> offset) & 3;
else
pfunc = (pmux >> offset) & 1;
if (pfunc != function) {
pr_err("pin group conflict! request pin %d func %d conflict with pin %d func %d\n",
ident, function, m, pfunc);
return -EINVAL;
}
}
return 0;
}
static void portmux_setup(unsigned short per)
{
u16 ident = P_IDENT(per);
u16 function = P_FUNCT2MUX(per);
s8 offset = port_mux[ident];
u16 pmux;
if (offset == -1)
return;
pmux = bfin_read_PORT_MUX();
if (offset != 1)
pmux &= ~(1 << offset);
else
pmux &= ~(3 << 1);
pmux |= (function << offset);
bfin_write_PORT_MUX(pmux);
}
#elif defined(CONFIG_BF54x)
inline void portmux_setup(unsigned short per)
{
u16 ident = P_IDENT(per);
u16 function = P_FUNCT2MUX(per);
u32 pmux;
pmux = gpio_array[gpio_bank(ident)]->port_mux;
pmux &= ~(0x3 << (2 * gpio_sub_n(ident)));
pmux |= (function & 0x3) << (2 * gpio_sub_n(ident));
gpio_array[gpio_bank(ident)]->port_mux = pmux;
}
inline u16 get_portmux(unsigned short per)
{
u16 ident = P_IDENT(per);
u32 pmux = gpio_array[gpio_bank(ident)]->port_mux;
return (pmux >> (2 * gpio_sub_n(ident)) & 0x3);
}
static int portmux_group_check(unsigned short per)
{
return 0;
}
#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
static int portmux_group_check(unsigned short per)
{
u16 ident = P_IDENT(per);
u16 function = P_FUNCT2MUX(per);
u8 offset = pmux_offset[gpio_bank(ident)][gpio_sub_n(ident)];
u16 pin, gpiopin, pfunc;
for (pin = 0; pin < GPIO_BANKSIZE; ++pin) {
if (offset != pmux_offset[gpio_bank(ident)][pin])
continue;
gpiopin = gpio_bank(ident) * GPIO_BANKSIZE + pin;
if (gpiopin == ident)
continue;
if (!is_reserved(peri, gpiopin, 1))
continue;
pfunc = *port_mux[gpio_bank(ident)];
pfunc = (pfunc >> offset) & 3;
if (pfunc != function) {
pr_err("pin group conflict! request pin %d func %d conflict with pin %d func %d\n",
ident, function, gpiopin, pfunc);
return -EINVAL;
}
}
return 0;
}
inline void portmux_setup(unsigned short per)
{
u16 ident = P_IDENT(per);
u16 function = P_FUNCT2MUX(per);
u8 offset = pmux_offset[gpio_bank(ident)][gpio_sub_n(ident)];
u16 pmux;
pmux = *port_mux[gpio_bank(ident)];
if (((pmux >> offset) & 3) == function)
return;
pmux &= ~(3 << offset);
pmux |= (function & 3) << offset;
*port_mux[gpio_bank(ident)] = pmux;
SSYNC();
}
#else
# define portmux_setup(...) do { } while (0)
static int portmux_group_check(unsigned short per)
{
return 0;
}
#endif
#ifndef CONFIG_BF54x
/***********************************************************
*
* FUNCTIONS: Blackfin General Purpose Ports Access Functions
*
* INPUTS/OUTPUTS:
* gpio - GPIO Number between 0 and MAX_BLACKFIN_GPIOS
*
*
* DESCRIPTION: These functions abstract direct register access
* to Blackfin processor General Purpose
* Ports Regsiters
*
* CAUTION: These functions do not belong to the GPIO Driver API
*************************************************************
* MODIFICATION HISTORY :
**************************************************************/
/* Set a specific bit */
#define SET_GPIO(name) \
void set_gpio_ ## name(unsigned gpio, unsigned short arg) \
{ \
unsigned long flags; \
flags = hard_local_irq_save(); \
if (arg) \
gpio_array[gpio_bank(gpio)]->name |= gpio_bit(gpio); \
else \
gpio_array[gpio_bank(gpio)]->name &= ~gpio_bit(gpio); \
AWA_DUMMY_READ(name); \
hard_local_irq_restore(flags); \
} \
EXPORT_SYMBOL(set_gpio_ ## name);
SET_GPIO(dir) /* set_gpio_dir() */
SET_GPIO(inen) /* set_gpio_inen() */
SET_GPIO(polar) /* set_gpio_polar() */
SET_GPIO(edge) /* set_gpio_edge() */
SET_GPIO(both) /* set_gpio_both() */
#define SET_GPIO_SC(name) \
void set_gpio_ ## name(unsigned gpio, unsigned short arg) \
{ \
unsigned long flags; \
if (ANOMALY_05000311 || ANOMALY_05000323) \
flags = hard_local_irq_save(); \
if (arg) \
gpio_array[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \
else \
gpio_array[gpio_bank(gpio)]->name ## _clear = gpio_bit(gpio); \
if (ANOMALY_05000311 || ANOMALY_05000323) { \
AWA_DUMMY_READ(name); \
hard_local_irq_restore(flags); \
} \
} \
EXPORT_SYMBOL(set_gpio_ ## name);
SET_GPIO_SC(maska)
SET_GPIO_SC(maskb)
SET_GPIO_SC(data)
void set_gpio_toggle(unsigned gpio)
{
unsigned long flags;
if (ANOMALY_05000311 || ANOMALY_05000323)
flags = hard_local_irq_save();
gpio_array[gpio_bank(gpio)]->toggle = gpio_bit(gpio);
if (ANOMALY_05000311 || ANOMALY_05000323) {
AWA_DUMMY_READ(toggle);
hard_local_irq_restore(flags);
}
}
EXPORT_SYMBOL(set_gpio_toggle);
/*Set current PORT date (16-bit word)*/
#define SET_GPIO_P(name) \
void set_gpiop_ ## name(unsigned gpio, unsigned short arg) \
{ \
unsigned long flags; \
if (ANOMALY_05000311 || ANOMALY_05000323) \
flags = hard_local_irq_save(); \
gpio_array[gpio_bank(gpio)]->name = arg; \
if (ANOMALY_05000311 || ANOMALY_05000323) { \
AWA_DUMMY_READ(name); \
hard_local_irq_restore(flags); \
} \
} \
EXPORT_SYMBOL(set_gpiop_ ## name);
SET_GPIO_P(data)
SET_GPIO_P(dir)
SET_GPIO_P(inen)
SET_GPIO_P(polar)
SET_GPIO_P(edge)
SET_GPIO_P(both)
SET_GPIO_P(maska)
SET_GPIO_P(maskb)
/* Get a specific bit */
#define GET_GPIO(name) \
unsigned short get_gpio_ ## name(unsigned gpio) \
{ \
unsigned long flags; \
unsigned short ret; \
if (ANOMALY_05000311 || ANOMALY_05000323) \
flags = hard_local_irq_save(); \
ret = 0x01 & (gpio_array[gpio_bank(gpio)]->name >> gpio_sub_n(gpio)); \
if (ANOMALY_05000311 || ANOMALY_05000323) { \
AWA_DUMMY_READ(name); \
hard_local_irq_restore(flags); \
} \
return ret; \
} \
EXPORT_SYMBOL(get_gpio_ ## name);
GET_GPIO(data)
GET_GPIO(dir)
GET_GPIO(inen)
GET_GPIO(polar)
GET_GPIO(edge)
GET_GPIO(both)
GET_GPIO(maska)
GET_GPIO(maskb)
/*Get current PORT date (16-bit word)*/
#define GET_GPIO_P(name) \
unsigned short get_gpiop_ ## name(unsigned gpio) \
{ \
unsigned long flags; \
unsigned short ret; \
if (ANOMALY_05000311 || ANOMALY_05000323) \
flags = hard_local_irq_save(); \
ret = (gpio_array[gpio_bank(gpio)]->name); \
if (ANOMALY_05000311 || ANOMALY_05000323) { \
AWA_DUMMY_READ(name); \
hard_local_irq_restore(flags); \
} \
return ret; \
} \
EXPORT_SYMBOL(get_gpiop_ ## name);
GET_GPIO_P(data)
GET_GPIO_P(dir)
GET_GPIO_P(inen)
GET_GPIO_P(polar)
GET_GPIO_P(edge)
GET_GPIO_P(both)
GET_GPIO_P(maska)
GET_GPIO_P(maskb)
#ifdef CONFIG_PM
DECLARE_RESERVED_MAP(wakeup, GPIO_BANK_NUM);
static const unsigned int sic_iwr_irqs[] = {
#if defined(BF533_FAMILY)
IRQ_PROG_INTB
#elif defined(BF537_FAMILY)
IRQ_PF_INTB_WATCH, IRQ_PORTG_INTB, IRQ_PH_INTB_MAC_TX
#elif defined(BF538_FAMILY)
IRQ_PORTF_INTB
#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
IRQ_PORTF_INTB, IRQ_PORTG_INTB, IRQ_PORTH_INTB
#elif defined(BF561_FAMILY)
IRQ_PROG0_INTB, IRQ_PROG1_INTB, IRQ_PROG2_INTB
#else
# error no SIC_IWR defined
#endif
};
/***********************************************************
*
* FUNCTIONS: Blackfin PM Setup API
*
* INPUTS/OUTPUTS:
* gpio - GPIO Number between 0 and MAX_BLACKFIN_GPIOS
* type -
* PM_WAKE_RISING
* PM_WAKE_FALLING
* PM_WAKE_HIGH
* PM_WAKE_LOW
* PM_WAKE_BOTH_EDGES
*
* DESCRIPTION: Blackfin PM Driver API
*
* CAUTION:
*************************************************************
* MODIFICATION HISTORY :
**************************************************************/
int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl)
{
unsigned long flags;
if (check_gpio(gpio) < 0)
return -EINVAL;
flags = hard_local_irq_save();
if (ctrl)
reserve(wakeup, gpio);
else
unreserve(wakeup, gpio);
set_gpio_maskb(gpio, ctrl);
hard_local_irq_restore(flags);
return 0;
}
int bfin_pm_standby_ctrl(unsigned ctrl)
{
u16 bank, mask, i;
for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
mask = map_entry(wakeup, i);
bank = gpio_bank(i);
if (mask)
bfin_internal_set_wake(sic_iwr_irqs[bank], ctrl);
}
return 0;
}
void bfin_gpio_pm_hibernate_suspend(void)
{
int i, bank;
for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
bank = gpio_bank(i);
#if defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x)
gpio_bank_saved[bank].fer = *port_fer[bank];
#if defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
gpio_bank_saved[bank].mux = *port_mux[bank];
#else
if (bank == 0)
gpio_bank_saved[bank].mux = bfin_read_PORT_MUX();
#endif
#endif
gpio_bank_saved[bank].data = gpio_array[bank]->data;
gpio_bank_saved[bank].inen = gpio_array[bank]->inen;
gpio_bank_saved[bank].polar = gpio_array[bank]->polar;
gpio_bank_saved[bank].dir = gpio_array[bank]->dir;
gpio_bank_saved[bank].edge = gpio_array[bank]->edge;
gpio_bank_saved[bank].both = gpio_array[bank]->both;
gpio_bank_saved[bank].maska = gpio_array[bank]->maska;
}
AWA_DUMMY_READ(maska);
}
void bfin_gpio_pm_hibernate_restore(void)
{
int i, bank;
for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
bank = gpio_bank(i);
#if defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x)
#if defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
*port_mux[bank] = gpio_bank_saved[bank].mux;
#else
if (bank == 0)
bfin_write_PORT_MUX(gpio_bank_saved[bank].mux);
#endif
*port_fer[bank] = gpio_bank_saved[bank].fer;
#endif
gpio_array[bank]->inen = gpio_bank_saved[bank].inen;
gpio_array[bank]->data_set = gpio_bank_saved[bank].data
& gpio_bank_saved[bank].dir;
gpio_array[bank]->dir = gpio_bank_saved[bank].dir;
gpio_array[bank]->polar = gpio_bank_saved[bank].polar;
gpio_array[bank]->edge = gpio_bank_saved[bank].edge;
gpio_array[bank]->both = gpio_bank_saved[bank].both;
gpio_array[bank]->maska = gpio_bank_saved[bank].maska;
}
AWA_DUMMY_READ(maska);
}
#endif
#else /* CONFIG_BF54x */
#ifdef CONFIG_PM
int bfin_pm_standby_ctrl(unsigned ctrl)
{
return 0;
}
void bfin_gpio_pm_hibernate_suspend(void)
{
int i, bank;
for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
bank = gpio_bank(i);
gpio_bank_saved[bank].fer = gpio_array[bank]->port_fer;
gpio_bank_saved[bank].mux = gpio_array[bank]->port_mux;
gpio_bank_saved[bank].data = gpio_array[bank]->data;
gpio_bank_saved[bank].inen = gpio_array[bank]->inen;
gpio_bank_saved[bank].dir = gpio_array[bank]->dir_set;
}
}
void bfin_gpio_pm_hibernate_restore(void)
{
int i, bank;
for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
bank = gpio_bank(i);
gpio_array[bank]->port_mux = gpio_bank_saved[bank].mux;
gpio_array[bank]->port_fer = gpio_bank_saved[bank].fer;
gpio_array[bank]->inen = gpio_bank_saved[bank].inen;
gpio_array[bank]->dir_set = gpio_bank_saved[bank].dir;
gpio_array[bank]->data_set = gpio_bank_saved[bank].data
| gpio_bank_saved[bank].dir;
}
}
#endif
unsigned short get_gpio_dir(unsigned gpio)
{
return (0x01 & (gpio_array[gpio_bank(gpio)]->dir_clear >> gpio_sub_n(gpio)));
}
EXPORT_SYMBOL(get_gpio_dir);
#endif /* CONFIG_BF54x */
/***********************************************************
*
* FUNCTIONS: Blackfin Peripheral Resource Allocation
* and PortMux Setup
*
* INPUTS/OUTPUTS:
* per Peripheral Identifier
* label String
*
* DESCRIPTION: Blackfin Peripheral Resource Allocation and Setup API
*
* CAUTION:
*************************************************************
* MODIFICATION HISTORY :
**************************************************************/
int peripheral_request(unsigned short per, const char *label)
{
unsigned long flags;
unsigned short ident = P_IDENT(per);
/*
* Don't cares are pins with only one dedicated function
*/
if (per & P_DONTCARE)
return 0;
if (!(per & P_DEFINED))
return -ENODEV;
BUG_ON(ident >= MAX_RESOURCES);
flags = hard_local_irq_save();
/* If a pin can be muxed as either GPIO or peripheral, make
* sure it is not already a GPIO pin when we request it.
*/
if (unlikely(!check_gpio(ident) && is_reserved(gpio, ident, 1))) {
if (system_state == SYSTEM_BOOTING)
dump_stack();
printk(KERN_ERR
"%s: Peripheral %d is already reserved as GPIO by %s !\n",
__func__, ident, get_label(ident));
hard_local_irq_restore(flags);
return -EBUSY;
}
if (unlikely(is_reserved(peri, ident, 1))) {
/*
* Pin functions like AMC address strobes my
* be requested and used by several drivers
*/
#ifdef CONFIG_BF54x
if (!((per & P_MAYSHARE) && get_portmux(per) == P_FUNCT2MUX(per))) {
#else
if (!(per & P_MAYSHARE)) {
#endif
/*
* Allow that the identical pin function can
* be requested from the same driver twice
*/
if (cmp_label(ident, label) == 0)
goto anyway;
if (system_state == SYSTEM_BOOTING)
dump_stack();
printk(KERN_ERR
"%s: Peripheral %d function %d is already reserved by %s !\n",
__func__, ident, P_FUNCT2MUX(per), get_label(ident));
hard_local_irq_restore(flags);
return -EBUSY;
}
}
if (unlikely(portmux_group_check(per))) {
hard_local_irq_restore(flags);
return -EBUSY;
}
anyway:
reserve(peri, ident);
portmux_setup(per);
port_setup(ident, PERIPHERAL_USAGE);
hard_local_irq_restore(flags);
set_label(ident, label);
return 0;
}
EXPORT_SYMBOL(peripheral_request);
int peripheral_request_list(const unsigned short per[], const char *label)
{
u16 cnt;
int ret;
for (cnt = 0; per[cnt] != 0; cnt++) {
ret = peripheral_request(per[cnt], label);
if (ret < 0) {
for ( ; cnt > 0; cnt--)
peripheral_free(per[cnt - 1]);
return ret;
}
}
return 0;
}
EXPORT_SYMBOL(peripheral_request_list);
void peripheral_free(unsigned short per)
{
unsigned long flags;
unsigned short ident = P_IDENT(per);
if (per & P_DONTCARE)
return;
if (!(per & P_DEFINED))
return;
flags = hard_local_irq_save();
if (unlikely(!is_reserved(peri, ident, 0))) {
hard_local_irq_restore(flags);
return;
}
if (!(per & P_MAYSHARE))
port_setup(ident, GPIO_USAGE);
unreserve(peri, ident);
set_label(ident, "free");
hard_local_irq_restore(flags);
}
EXPORT_SYMBOL(peripheral_free);
void peripheral_free_list(const unsigned short per[])
{
u16 cnt;
for (cnt = 0; per[cnt] != 0; cnt++)
peripheral_free(per[cnt]);
}
EXPORT_SYMBOL(peripheral_free_list);
/***********************************************************
*
* FUNCTIONS: Blackfin GPIO Driver
*
* INPUTS/OUTPUTS:
* gpio PIO Number between 0 and MAX_BLACKFIN_GPIOS
* label String
*
* DESCRIPTION: Blackfin GPIO Driver API
*
* CAUTION:
*************************************************************
* MODIFICATION HISTORY :
**************************************************************/
int bfin_gpio_request(unsigned gpio, const char *label)
{
unsigned long flags;
if (check_gpio(gpio) < 0)
return -EINVAL;
flags = hard_local_irq_save();
/*
* Allow that the identical GPIO can
* be requested from the same driver twice
* Do nothing and return -
*/
if (cmp_label(gpio, label) == 0) {
hard_local_irq_restore(flags);
return 0;
}
if (unlikely(is_reserved(gpio, gpio, 1))) {
if (system_state == SYSTEM_BOOTING)
dump_stack();
printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n",
gpio, get_label(gpio));
hard_local_irq_restore(flags);
return -EBUSY;
}
if (unlikely(is_reserved(peri, gpio, 1))) {
if (system_state == SYSTEM_BOOTING)
dump_stack();
printk(KERN_ERR
"bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
gpio, get_label(gpio));
hard_local_irq_restore(flags);
return -EBUSY;
}
if (unlikely(is_reserved(gpio_irq, gpio, 1))) {
printk(KERN_NOTICE "bfin-gpio: GPIO %d is already reserved as gpio-irq!"
" (Documentation/blackfin/bfin-gpio-notes.txt)\n", gpio);
}
#ifndef CONFIG_BF54x
else { /* Reset POLAR setting when acquiring a gpio for the first time */
set_gpio_polar(gpio, 0);
}
#endif
reserve(gpio, gpio);
set_label(gpio, label);
hard_local_irq_restore(flags);
port_setup(gpio, GPIO_USAGE);
return 0;
}
EXPORT_SYMBOL(bfin_gpio_request);
void bfin_gpio_free(unsigned gpio)
{
unsigned long flags;
if (check_gpio(gpio) < 0)
return;
might_sleep();
flags = hard_local_irq_save();
if (unlikely(!is_reserved(gpio, gpio, 0))) {
if (system_state == SYSTEM_BOOTING)
dump_stack();
gpio_error(gpio);
hard_local_irq_restore(flags);
return;
}
unreserve(gpio, gpio);
set_label(gpio, "free");
hard_local_irq_restore(flags);
}
EXPORT_SYMBOL(bfin_gpio_free);
#ifdef BFIN_SPECIAL_GPIO_BANKS
DECLARE_RESERVED_MAP(special_gpio, gpio_bank(MAX_RESOURCES));
int bfin_special_gpio_request(unsigned gpio, const char *label)
{
unsigned long flags;
flags = hard_local_irq_save();
/*
* Allow that the identical GPIO can
* be requested from the same driver twice
* Do nothing and return -
*/
if (cmp_label(gpio, label) == 0) {
hard_local_irq_restore(flags);
return 0;
}
if (unlikely(is_reserved(special_gpio, gpio, 1))) {
hard_local_irq_restore(flags);
printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n",
gpio, get_label(gpio));
return -EBUSY;
}
if (unlikely(is_reserved(peri, gpio, 1))) {
hard_local_irq_restore(flags);
printk(KERN_ERR
"bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
gpio, get_label(gpio));
return -EBUSY;
}
reserve(special_gpio, gpio);
reserve(peri, gpio);
set_label(gpio, label);
hard_local_irq_restore(flags);
port_setup(gpio, GPIO_USAGE);
return 0;
}
EXPORT_SYMBOL(bfin_special_gpio_request);
void bfin_special_gpio_free(unsigned gpio)
{
unsigned long flags;
might_sleep();
flags = hard_local_irq_save();
if (unlikely(!is_reserved(special_gpio, gpio, 0))) {
gpio_error(gpio);
hard_local_irq_restore(flags);
return;
}
unreserve(special_gpio, gpio);
unreserve(peri, gpio);
set_label(gpio, "free");
hard_local_irq_restore(flags);
}
EXPORT_SYMBOL(bfin_special_gpio_free);
#endif
int bfin_gpio_irq_request(unsigned gpio, const char *label)
{
unsigned long flags;
if (check_gpio(gpio) < 0)
return -EINVAL;
flags = hard_local_irq_save();
if (unlikely(is_reserved(peri, gpio, 1))) {
if (system_state == SYSTEM_BOOTING)
dump_stack();
printk(KERN_ERR
"bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
gpio, get_label(gpio));
hard_local_irq_restore(flags);
return -EBUSY;
}
if (unlikely(is_reserved(gpio, gpio, 1)))
printk(KERN_NOTICE "bfin-gpio: GPIO %d is already reserved by %s! "
"(Documentation/blackfin/bfin-gpio-notes.txt)\n",
gpio, get_label(gpio));
reserve(gpio_irq, gpio);
set_label(gpio, label);
hard_local_irq_restore(flags);
port_setup(gpio, GPIO_USAGE);
return 0;
}
void bfin_gpio_irq_free(unsigned gpio)
{
unsigned long flags;
if (check_gpio(gpio) < 0)
return;
flags = hard_local_irq_save();
if (unlikely(!is_reserved(gpio_irq, gpio, 0))) {
if (system_state == SYSTEM_BOOTING)
dump_stack();
gpio_error(gpio);
hard_local_irq_restore(flags);
return;
}
unreserve(gpio_irq, gpio);
set_label(gpio, "free");
hard_local_irq_restore(flags);
}
static inline void __bfin_gpio_direction_input(unsigned gpio)
{
#ifdef CONFIG_BF54x
gpio_array[gpio_bank(gpio)]->dir_clear = gpio_bit(gpio);
#else
gpio_array[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio);
#endif
gpio_array[gpio_bank(gpio)]->inen |= gpio_bit(gpio);
}
int bfin_gpio_direction_input(unsigned gpio)
{
unsigned long flags;
if (unlikely(!is_reserved(gpio, gpio, 0))) {
gpio_error(gpio);
return -EINVAL;
}
flags = hard_local_irq_save();
__bfin_gpio_direction_input(gpio);
AWA_DUMMY_READ(inen);
hard_local_irq_restore(flags);
return 0;
}
EXPORT_SYMBOL(bfin_gpio_direction_input);
void bfin_gpio_irq_prepare(unsigned gpio)
{
#ifdef CONFIG_BF54x
unsigned long flags;
#endif
port_setup(gpio, GPIO_USAGE);
#ifdef CONFIG_BF54x
flags = hard_local_irq_save();
__bfin_gpio_direction_input(gpio);
hard_local_irq_restore(flags);
#endif
}
void bfin_gpio_set_value(unsigned gpio, int arg)
{
if (arg)
gpio_array[gpio_bank(gpio)]->data_set = gpio_bit(gpio);
else
gpio_array[gpio_bank(gpio)]->data_clear = gpio_bit(gpio);
}
EXPORT_SYMBOL(bfin_gpio_set_value);
int bfin_gpio_direction_output(unsigned gpio, int value)
{
unsigned long flags;
if (unlikely(!is_reserved(gpio, gpio, 0))) {
gpio_error(gpio);
return -EINVAL;
}
flags = hard_local_irq_save();
gpio_array[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio);
gpio_set_value(gpio, value);
#ifdef CONFIG_BF54x
gpio_array[gpio_bank(gpio)]->dir_set = gpio_bit(gpio);
#else
gpio_array[gpio_bank(gpio)]->dir |= gpio_bit(gpio);
#endif
AWA_DUMMY_READ(dir);
hard_local_irq_restore(flags);
return 0;
}
EXPORT_SYMBOL(bfin_gpio_direction_output);
int bfin_gpio_get_value(unsigned gpio)
{
#ifdef CONFIG_BF54x
return (1 & (gpio_array[gpio_bank(gpio)]->data >> gpio_sub_n(gpio)));
#else
unsigned long flags;
if (unlikely(get_gpio_edge(gpio))) {
int ret;
flags = hard_local_irq_save();
set_gpio_edge(gpio, 0);
ret = get_gpio_data(gpio);
set_gpio_edge(gpio, 1);
hard_local_irq_restore(flags);
return ret;
} else
return get_gpio_data(gpio);
#endif
}
EXPORT_SYMBOL(bfin_gpio_get_value);
/* If we are booting from SPI and our board lacks a strong enough pull up,
* the core can reset and execute the bootrom faster than the resistor can
* pull the signal logically high. To work around this (common) error in
* board design, we explicitly set the pin back to GPIO mode, force /CS
* high, and wait for the electrons to do their thing.
*
* This function only makes sense to be called from reset code, but it
* lives here as we need to force all the GPIO states w/out going through
* BUG() checks and such.
*/
void bfin_reset_boot_spi_cs(unsigned short pin)
{
unsigned short gpio = P_IDENT(pin);
port_setup(gpio, GPIO_USAGE);
gpio_array[gpio_bank(gpio)]->data_set = gpio_bit(gpio);
AWA_DUMMY_READ(data_set);
udelay(1);
}
#if defined(CONFIG_PROC_FS)
static int gpio_proc_show(struct seq_file *m, void *v)
{
int c, irq, gpio;
for (c = 0; c < MAX_RESOURCES; c++) {
irq = is_reserved(gpio_irq, c, 1);
gpio = is_reserved(gpio, c, 1);
if (!check_gpio(c) && (gpio || irq))
seq_printf(m, "GPIO_%d: \t%s%s \t\tGPIO %s\n", c,
get_label(c), (gpio && irq) ? " *" : "",
get_gpio_dir(c) ? "OUTPUT" : "INPUT");
else if (is_reserved(peri, c, 1))
seq_printf(m, "GPIO_%d: \t%s \t\tPeripheral\n", c, get_label(c));
else
continue;
}
return 0;
}
static int gpio_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, gpio_proc_show, NULL);
}
static const struct file_operations gpio_proc_ops = {
.open = gpio_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static __init int gpio_register_proc(void)
{
struct proc_dir_entry *proc_gpio;
proc_gpio = proc_create("gpio", S_IRUGO, NULL, &gpio_proc_ops);
return proc_gpio != NULL;
}
__initcall(gpio_register_proc);
#endif
#ifdef CONFIG_GPIOLIB
static int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio)
{
return bfin_gpio_direction_input(gpio);
}
static int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level)
{
return bfin_gpio_direction_output(gpio, level);
}
static int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio)
{
return bfin_gpio_get_value(gpio);
}
static void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value)
{
return bfin_gpio_set_value(gpio, value);
}
static int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio)
{
return bfin_gpio_request(gpio, chip->label);
}
static void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio)
{
return bfin_gpio_free(gpio);
}
static int bfin_gpiolib_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
{
return gpio + GPIO_IRQ_BASE;
}
static struct gpio_chip bfin_chip = {
.label = "BFIN-GPIO",
.direction_input = bfin_gpiolib_direction_input,
.get = bfin_gpiolib_get_value,
.direction_output = bfin_gpiolib_direction_output,
.set = bfin_gpiolib_set_value,
.request = bfin_gpiolib_gpio_request,
.free = bfin_gpiolib_gpio_free,
.to_irq = bfin_gpiolib_gpio_to_irq,
.base = 0,
.ngpio = MAX_BLACKFIN_GPIOS,
};
static int __init bfin_gpiolib_setup(void)
{
return gpiochip_add(&bfin_chip);
}
arch_initcall(bfin_gpiolib_setup);
#endif
| gpl-2.0 |
christiantroy/linux-allwinner | arch/arm/mach-s3c64xx/cpufreq.c | 2444 | 6481 | /* linux/arch/arm/plat-s3c64xx/cpufreq.c
*
* Copyright 2009 Wolfson Microelectronics plc
*
* S3C64xx CPUfreq Support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
static struct clk *armclk;
static struct regulator *vddarm;
static unsigned long regulator_latency;
#ifdef CONFIG_CPU_S3C6410
struct s3c64xx_dvfs {
unsigned int vddarm_min;
unsigned int vddarm_max;
};
static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
[0] = { 1000000, 1150000 },
[1] = { 1050000, 1150000 },
[2] = { 1100000, 1150000 },
[3] = { 1200000, 1350000 },
};
static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
{ 0, 66000 },
{ 0, 133000 },
{ 1, 222000 },
{ 1, 266000 },
{ 2, 333000 },
{ 2, 400000 },
{ 2, 532000 },
{ 2, 533000 },
{ 3, 667000 },
{ 0, CPUFREQ_TABLE_END },
};
#endif
static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
}
static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
{
if (cpu != 0)
return 0;
return clk_get_rate(armclk) / 1000;
}
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
int ret;
unsigned int i;
struct cpufreq_freqs freqs;
struct s3c64xx_dvfs *dvfs;
ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table,
target_freq, relation, &i);
if (ret != 0)
return ret;
freqs.cpu = 0;
freqs.old = clk_get_rate(armclk) / 1000;
freqs.new = s3c64xx_freq_table[i].frequency;
freqs.flags = 0;
dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].index];
if (freqs.old == freqs.new)
return 0;
pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
#ifdef CONFIG_REGULATOR
if (vddarm && freqs.new > freqs.old) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret);
goto err;
}
}
#endif
ret = clk_set_rate(armclk, freqs.new * 1000);
if (ret < 0) {
pr_err("cpufreq: Failed to set rate %dkHz: %d\n",
freqs.new, ret);
goto err;
}
#ifdef CONFIG_REGULATOR
if (vddarm && freqs.new < freqs.old) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret);
goto err_clk;
}
}
#endif
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
pr_debug("cpufreq: Set actual frequency %lukHz\n",
clk_get_rate(armclk) / 1000);
return 0;
err_clk:
if (clk_set_rate(armclk, freqs.old * 1000) < 0)
pr_err("Failed to restore original clock rate\n");
err:
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return ret;
}
#ifdef CONFIG_REGULATOR
static void __init s3c64xx_cpufreq_config_regulator(void)
{
int count, v, i, found;
struct cpufreq_frequency_table *freq;
struct s3c64xx_dvfs *dvfs;
count = regulator_count_voltages(vddarm);
if (count < 0) {
pr_err("cpufreq: Unable to check supported voltages\n");
}
freq = s3c64xx_freq_table;
while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) {
if (freq->frequency == CPUFREQ_ENTRY_INVALID)
continue;
dvfs = &s3c64xx_dvfs_table[freq->index];
found = 0;
for (i = 0; i < count; i++) {
v = regulator_list_voltage(vddarm, i);
if (v >= dvfs->vddarm_min && v <= dvfs->vddarm_max)
found = 1;
}
if (!found) {
pr_debug("cpufreq: %dkHz unsupported by regulator\n",
freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID;
}
freq++;
}
/* Guess based on having to do an I2C/SPI write; in future we
* will be able to query the regulator performance here. */
regulator_latency = 1 * 1000 * 1000;
}
#endif
static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
{
int ret;
struct cpufreq_frequency_table *freq;
if (policy->cpu != 0)
return -EINVAL;
if (s3c64xx_freq_table == NULL) {
pr_err("cpufreq: No frequency information for this CPU\n");
return -ENODEV;
}
armclk = clk_get(NULL, "armclk");
if (IS_ERR(armclk)) {
pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n",
PTR_ERR(armclk));
return PTR_ERR(armclk);
}
#ifdef CONFIG_REGULATOR
vddarm = regulator_get(NULL, "vddarm");
if (IS_ERR(vddarm)) {
ret = PTR_ERR(vddarm);
pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
pr_err("cpufreq: Only frequency scaling available\n");
vddarm = NULL;
} else {
s3c64xx_cpufreq_config_regulator();
}
#endif
freq = s3c64xx_freq_table;
while (freq->frequency != CPUFREQ_TABLE_END) {
unsigned long r;
/* Check for frequencies we can generate */
r = clk_round_rate(armclk, freq->frequency * 1000);
r /= 1000;
if (r != freq->frequency) {
pr_debug("cpufreq: %dkHz unsupported by clock\n",
freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID;
}
/* If we have no regulator then assume startup
* frequency is the maximum we can support. */
if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0))
freq->frequency = CPUFREQ_ENTRY_INVALID;
freq++;
}
policy->cur = clk_get_rate(armclk) / 1000;
/* Datasheet says PLL stabalisation time (if we were to use
* the PLLs, which we don't currently) is ~300us worst case,
* but add some fudge.
*/
policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency;
ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
if (ret != 0) {
pr_err("cpufreq: Failed to configure frequency table: %d\n",
ret);
regulator_put(vddarm);
clk_put(armclk);
}
return ret;
}
static struct cpufreq_driver s3c64xx_cpufreq_driver = {
.owner = THIS_MODULE,
.flags = 0,
.verify = s3c64xx_cpufreq_verify_speed,
.target = s3c64xx_cpufreq_set_target,
.get = s3c64xx_cpufreq_get_speed,
.init = s3c64xx_cpufreq_driver_init,
.name = "s3c",
};
static int __init s3c64xx_cpufreq_init(void)
{
return cpufreq_register_driver(&s3c64xx_cpufreq_driver);
}
module_init(s3c64xx_cpufreq_init);
| gpl-2.0 |
nicholaschw/jared-rA | fs/affs/namei.c | 2956 | 11072 | /*
* linux/fs/affs/namei.c
*
* (c) 1996 Hans-Joachim Widmaier - Rewritten
*
* (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
*
* (C) 1991 Linus Torvalds - minix filesystem
*/
#include "affs.h"
typedef int (*toupper_t)(int);
static int affs_toupper(int ch);
static int affs_hash_dentry(const struct dentry *,
const struct inode *, struct qstr *);
static int affs_compare_dentry(const struct dentry *parent,
const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name);
static int affs_intl_toupper(int ch);
static int affs_intl_hash_dentry(const struct dentry *,
const struct inode *, struct qstr *);
static int affs_intl_compare_dentry(const struct dentry *parent,
const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name);
const struct dentry_operations affs_dentry_operations = {
.d_hash = affs_hash_dentry,
.d_compare = affs_compare_dentry,
};
const struct dentry_operations affs_intl_dentry_operations = {
.d_hash = affs_intl_hash_dentry,
.d_compare = affs_intl_compare_dentry,
};
/* Simple toupper() for DOS\1 */
static int
affs_toupper(int ch)
{
return ch >= 'a' && ch <= 'z' ? ch -= ('a' - 'A') : ch;
}
/* International toupper() for DOS\3 ("international") */
static int
affs_intl_toupper(int ch)
{
return (ch >= 'a' && ch <= 'z') || (ch >= 0xE0
&& ch <= 0xFE && ch != 0xF7) ?
ch - ('a' - 'A') : ch;
}
static inline toupper_t
affs_get_toupper(struct super_block *sb)
{
return AFFS_SB(sb)->s_flags & SF_INTL ? affs_intl_toupper : affs_toupper;
}
/*
* Note: the dentry argument is the parent dentry.
*/
static inline int
__affs_hash_dentry(struct qstr *qstr, toupper_t toupper)
{
const u8 *name = qstr->name;
unsigned long hash;
int i;
i = affs_check_name(qstr->name, qstr->len);
if (i)
return i;
hash = init_name_hash();
i = min(qstr->len, 30u);
for (; i > 0; name++, i--)
hash = partial_name_hash(toupper(*name), hash);
qstr->hash = end_name_hash(hash);
return 0;
}
static int
affs_hash_dentry(const struct dentry *dentry, const struct inode *inode,
struct qstr *qstr)
{
return __affs_hash_dentry(qstr, affs_toupper);
}
static int
affs_intl_hash_dentry(const struct dentry *dentry, const struct inode *inode,
struct qstr *qstr)
{
return __affs_hash_dentry(qstr, affs_intl_toupper);
}
static inline int __affs_compare_dentry(unsigned int len,
const char *str, const struct qstr *name, toupper_t toupper)
{
const u8 *aname = str;
const u8 *bname = name->name;
/*
* 'str' is the name of an already existing dentry, so the name
* must be valid. 'name' must be validated first.
*/
if (affs_check_name(name->name, name->len))
return 1;
/*
* If the names are longer than the allowed 30 chars,
* the excess is ignored, so their length may differ.
*/
if (len >= 30) {
if (name->len < 30)
return 1;
len = 30;
} else if (len != name->len)
return 1;
for (; len > 0; len--)
if (toupper(*aname++) != toupper(*bname++))
return 1;
return 0;
}
static int
affs_compare_dentry(const struct dentry *parent, const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
return __affs_compare_dentry(len, str, name, affs_toupper);
}
static int
affs_intl_compare_dentry(const struct dentry *parent,const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
return __affs_compare_dentry(len, str, name, affs_intl_toupper);
}
/*
* NOTE! unlike strncmp, affs_match returns 1 for success, 0 for failure.
*/
static inline int
affs_match(struct dentry *dentry, const u8 *name2, toupper_t toupper)
{
const u8 *name = dentry->d_name.name;
int len = dentry->d_name.len;
if (len >= 30) {
if (*name2 < 30)
return 0;
len = 30;
} else if (len != *name2)
return 0;
for (name2++; len > 0; len--)
if (toupper(*name++) != toupper(*name2++))
return 0;
return 1;
}
int
affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len)
{
toupper_t toupper = affs_get_toupper(sb);
int hash;
hash = len = min(len, 30u);
for (; len > 0; len--)
hash = (hash * 13 + toupper(*name++)) & 0x7ff;
return hash % AFFS_SB(sb)->s_hashsize;
}
static struct buffer_head *
affs_find_entry(struct inode *dir, struct dentry *dentry)
{
struct super_block *sb = dir->i_sb;
struct buffer_head *bh;
toupper_t toupper = affs_get_toupper(sb);
u32 key;
pr_debug("AFFS: find_entry(\"%.*s\")\n", (int)dentry->d_name.len, dentry->d_name.name);
bh = affs_bread(sb, dir->i_ino);
if (!bh)
return ERR_PTR(-EIO);
key = be32_to_cpu(AFFS_HEAD(bh)->table[affs_hash_name(sb, dentry->d_name.name, dentry->d_name.len)]);
for (;;) {
affs_brelse(bh);
if (key == 0)
return NULL;
bh = affs_bread(sb, key);
if (!bh)
return ERR_PTR(-EIO);
if (affs_match(dentry, AFFS_TAIL(sb, bh)->name, toupper))
return bh;
key = be32_to_cpu(AFFS_TAIL(sb, bh)->hash_chain);
}
}
struct dentry *
affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
struct super_block *sb = dir->i_sb;
struct buffer_head *bh;
struct inode *inode = NULL;
pr_debug("AFFS: lookup(\"%.*s\")\n",(int)dentry->d_name.len,dentry->d_name.name);
affs_lock_dir(dir);
bh = affs_find_entry(dir, dentry);
affs_unlock_dir(dir);
if (IS_ERR(bh))
return ERR_CAST(bh);
if (bh) {
u32 ino = bh->b_blocknr;
/* store the real header ino in d_fsdata for faster lookups */
dentry->d_fsdata = (void *)(long)ino;
switch (be32_to_cpu(AFFS_TAIL(sb, bh)->stype)) {
//link to dirs disabled
//case ST_LINKDIR:
case ST_LINKFILE:
ino = be32_to_cpu(AFFS_TAIL(sb, bh)->original);
}
affs_brelse(bh);
inode = affs_iget(sb, ino);
if (IS_ERR(inode))
return ERR_CAST(inode);
}
d_add(dentry, inode);
return NULL;
}
int
affs_unlink(struct inode *dir, struct dentry *dentry)
{
pr_debug("AFFS: unlink(dir=%d, %lu \"%.*s\")\n", (u32)dir->i_ino,
dentry->d_inode->i_ino,
(int)dentry->d_name.len, dentry->d_name.name);
return affs_remove_header(dentry);
}
int
affs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
int error;
pr_debug("AFFS: create(%lu,\"%.*s\",0%o)\n",dir->i_ino,(int)dentry->d_name.len,
dentry->d_name.name,mode);
inode = affs_new_inode(dir);
if (!inode)
return -ENOSPC;
inode->i_mode = mode;
mode_to_prot(inode);
mark_inode_dirty(inode);
inode->i_op = &affs_file_inode_operations;
inode->i_fop = &affs_file_operations;
inode->i_mapping->a_ops = (AFFS_SB(sb)->s_flags & SF_OFS) ? &affs_aops_ofs : &affs_aops;
error = affs_add_entry(dir, inode, dentry, ST_FILE);
if (error) {
inode->i_nlink = 0;
iput(inode);
return error;
}
return 0;
}
int
affs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
struct inode *inode;
int error;
pr_debug("AFFS: mkdir(%lu,\"%.*s\",0%o)\n",dir->i_ino,
(int)dentry->d_name.len,dentry->d_name.name,mode);
inode = affs_new_inode(dir);
if (!inode)
return -ENOSPC;
inode->i_mode = S_IFDIR | mode;
mode_to_prot(inode);
inode->i_op = &affs_dir_inode_operations;
inode->i_fop = &affs_dir_operations;
error = affs_add_entry(dir, inode, dentry, ST_USERDIR);
if (error) {
inode->i_nlink = 0;
mark_inode_dirty(inode);
iput(inode);
return error;
}
return 0;
}
int
affs_rmdir(struct inode *dir, struct dentry *dentry)
{
pr_debug("AFFS: rmdir(dir=%u, %lu \"%.*s\")\n", (u32)dir->i_ino,
dentry->d_inode->i_ino,
(int)dentry->d_name.len, dentry->d_name.name);
return affs_remove_header(dentry);
}
int
affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{
struct super_block *sb = dir->i_sb;
struct buffer_head *bh;
struct inode *inode;
char *p;
int i, maxlen, error;
char c, lc;
pr_debug("AFFS: symlink(%lu,\"%.*s\" -> \"%s\")\n",dir->i_ino,
(int)dentry->d_name.len,dentry->d_name.name,symname);
maxlen = AFFS_SB(sb)->s_hashsize * sizeof(u32) - 1;
inode = affs_new_inode(dir);
if (!inode)
return -ENOSPC;
inode->i_op = &affs_symlink_inode_operations;
inode->i_data.a_ops = &affs_symlink_aops;
inode->i_mode = S_IFLNK | 0777;
mode_to_prot(inode);
error = -EIO;
bh = affs_bread(sb, inode->i_ino);
if (!bh)
goto err;
i = 0;
p = (char *)AFFS_HEAD(bh)->table;
lc = '/';
if (*symname == '/') {
struct affs_sb_info *sbi = AFFS_SB(sb);
while (*symname == '/')
symname++;
spin_lock(&sbi->symlink_lock);
while (sbi->s_volume[i]) /* Cannot overflow */
*p++ = sbi->s_volume[i++];
spin_unlock(&sbi->symlink_lock);
}
while (i < maxlen && (c = *symname++)) {
if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') {
*p++ = '/';
i++;
symname += 2;
lc = '/';
} else if (c == '.' && lc == '/' && *symname == '/') {
symname++;
lc = '/';
} else {
*p++ = c;
lc = c;
i++;
}
if (lc == '/')
while (*symname == '/')
symname++;
}
*p = 0;
mark_buffer_dirty_inode(bh, inode);
affs_brelse(bh);
mark_inode_dirty(inode);
error = affs_add_entry(dir, inode, dentry, ST_SOFTLINK);
if (error)
goto err;
return 0;
err:
inode->i_nlink = 0;
mark_inode_dirty(inode);
iput(inode);
return error;
}
int
affs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
struct inode *inode = old_dentry->d_inode;
pr_debug("AFFS: link(%u, %u, \"%.*s\")\n", (u32)inode->i_ino, (u32)dir->i_ino,
(int)dentry->d_name.len,dentry->d_name.name);
return affs_add_entry(dir, inode, dentry, ST_LINKFILE);
}
int
affs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct super_block *sb = old_dir->i_sb;
struct buffer_head *bh = NULL;
int retval;
pr_debug("AFFS: rename(old=%u,\"%*s\" to new=%u,\"%*s\")\n",
(u32)old_dir->i_ino, (int)old_dentry->d_name.len, old_dentry->d_name.name,
(u32)new_dir->i_ino, (int)new_dentry->d_name.len, new_dentry->d_name.name);
retval = affs_check_name(new_dentry->d_name.name,new_dentry->d_name.len);
if (retval)
return retval;
/* Unlink destination if it already exists */
if (new_dentry->d_inode) {
retval = affs_remove_header(new_dentry);
if (retval)
return retval;
}
bh = affs_bread(sb, old_dentry->d_inode->i_ino);
if (!bh)
return -EIO;
/* Remove header from its parent directory. */
affs_lock_dir(old_dir);
retval = affs_remove_hash(old_dir, bh);
affs_unlock_dir(old_dir);
if (retval)
goto done;
/* And insert it into the new directory with the new name. */
affs_copy_name(AFFS_TAIL(sb, bh)->name, new_dentry);
affs_fix_checksum(sb, bh);
affs_lock_dir(new_dir);
retval = affs_insert_hash(new_dir, bh);
affs_unlock_dir(new_dir);
/* TODO: move it back to old_dir, if error? */
done:
mark_buffer_dirty_inode(bh, retval ? old_dir : new_dir);
affs_brelse(bh);
return retval;
}
| gpl-2.0 |
eoghan2t9/primou-kernel-IRONMAN | drivers/isdn/hardware/avm/b1pci.c | 4236 | 10897 | /* $Id: b1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
*
* Module for AVM B1 PCI-card.
*
* Copyright 1999 by Carsten Paeth <calle@calle.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/capi.h>
#include <asm/io.h>
#include <linux/init.h>
#include <linux/isdn/capicmd.h>
#include <linux/isdn/capiutil.h>
#include <linux/isdn/capilli.h>
#include "avmcard.h"
/* ------------------------------------------------------------- */
static char *revision = "$Revision: 1.1.2.2 $";
/* ------------------------------------------------------------- */
static struct pci_device_id b1pci_pci_tbl[] = {
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_B1, PCI_ANY_ID, PCI_ANY_ID },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, b1pci_pci_tbl);
MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM B1 PCI card");
MODULE_AUTHOR("Carsten Paeth");
MODULE_LICENSE("GPL");
/* ------------------------------------------------------------- */
static char *b1pci_procinfo(struct capi_ctr *ctrl)
{
avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
if (!cinfo)
return "";
sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
cinfo->cardname[0] ? cinfo->cardname : "-",
cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
cinfo->card ? cinfo->card->port : 0x0,
cinfo->card ? cinfo->card->irq : 0,
cinfo->card ? cinfo->card->revision : 0
);
return cinfo->infobuf;
}
/* ------------------------------------------------------------- */
static int b1pci_probe(struct capicardparams *p, struct pci_dev *pdev)
{
avmcard *card;
avmctrl_info *cinfo;
int retval;
card = b1_alloc_card(1);
if (!card) {
printk(KERN_WARNING "b1pci: no memory.\n");
retval = -ENOMEM;
goto err;
}
cinfo = card->ctrlinfo;
sprintf(card->name, "b1pci-%x", p->port);
card->port = p->port;
card->irq = p->irq;
card->cardtype = avm_b1pci;
if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n",
card->port, card->port + AVMB1_PORTLEN);
retval = -EBUSY;
goto err_free;
}
b1_reset(card->port);
retval = b1_detect(card->port, card->cardtype);
if (retval) {
printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n",
card->port, retval);
retval = -ENODEV;
goto err_release_region;
}
b1_reset(card->port);
b1_getrevision(card);
retval = request_irq(card->irq, b1_interrupt, IRQF_SHARED, card->name, card);
if (retval) {
printk(KERN_ERR "b1pci: unable to get IRQ %d.\n", card->irq);
retval = -EBUSY;
goto err_release_region;
}
cinfo->capi_ctrl.driver_name = "b1pci";
cinfo->capi_ctrl.driverdata = cinfo;
cinfo->capi_ctrl.register_appl = b1_register_appl;
cinfo->capi_ctrl.release_appl = b1_release_appl;
cinfo->capi_ctrl.send_message = b1_send_message;
cinfo->capi_ctrl.load_firmware = b1_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
cinfo->capi_ctrl.procinfo = b1pci_procinfo;
cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
cinfo->capi_ctrl.owner = THIS_MODULE;
retval = attach_capi_ctr(&cinfo->capi_ctrl);
if (retval) {
printk(KERN_ERR "b1pci: attach controller failed.\n");
goto err_free_irq;
}
if (card->revision >= 4) {
printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, revision %d (no dma)\n",
card->port, card->irq, card->revision);
} else {
printk(KERN_INFO "b1pci: AVM B1 PCI at i/o %#x, irq %d, revision %d\n",
card->port, card->irq, card->revision);
}
pci_set_drvdata(pdev, card);
return 0;
err_free_irq:
free_irq(card->irq, card);
err_release_region:
release_region(card->port, AVMB1_PORTLEN);
err_free:
b1_free_card(card);
err:
return retval;
}
static void b1pci_remove(struct pci_dev *pdev)
{
avmcard *card = pci_get_drvdata(pdev);
avmctrl_info *cinfo = card->ctrlinfo;
unsigned int port = card->port;
b1_reset(port);
b1_reset(port);
detach_capi_ctr(&cinfo->capi_ctrl);
free_irq(card->irq, card);
release_region(card->port, AVMB1_PORTLEN);
b1_free_card(card);
}
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
/* ------------------------------------------------------------- */
static char *b1pciv4_procinfo(struct capi_ctr *ctrl)
{
avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
if (!cinfo)
return "";
sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx r%d",
cinfo->cardname[0] ? cinfo->cardname : "-",
cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
cinfo->card ? cinfo->card->port : 0x0,
cinfo->card ? cinfo->card->irq : 0,
cinfo->card ? cinfo->card->membase : 0,
cinfo->card ? cinfo->card->revision : 0
);
return cinfo->infobuf;
}
/* ------------------------------------------------------------- */
static int b1pciv4_probe(struct capicardparams *p, struct pci_dev *pdev)
{
avmcard *card;
avmctrl_info *cinfo;
int retval;
card = b1_alloc_card(1);
if (!card) {
printk(KERN_WARNING "b1pci: no memory.\n");
retval = -ENOMEM;
goto err;
}
card->dma = avmcard_dma_alloc("b1pci", pdev, 2048+128, 2048+128);
if (!card->dma) {
printk(KERN_WARNING "b1pci: dma alloc.\n");
retval = -ENOMEM;
goto err_free;
}
cinfo = card->ctrlinfo;
sprintf(card->name, "b1pciv4-%x", p->port);
card->port = p->port;
card->irq = p->irq;
card->membase = p->membase;
card->cardtype = avm_b1pci;
if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n",
card->port, card->port + AVMB1_PORTLEN);
retval = -EBUSY;
goto err_free_dma;
}
card->mbase = ioremap(card->membase, 64);
if (!card->mbase) {
printk(KERN_NOTICE "b1pci: can't remap memory at 0x%lx\n",
card->membase);
retval = -ENOMEM;
goto err_release_region;
}
b1dma_reset(card);
retval = b1pciv4_detect(card);
if (retval) {
printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n",
card->port, retval);
retval = -ENODEV;
goto err_unmap;
}
b1dma_reset(card);
b1_getrevision(card);
retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card);
if (retval) {
printk(KERN_ERR "b1pci: unable to get IRQ %d.\n",
card->irq);
retval = -EBUSY;
goto err_unmap;
}
cinfo->capi_ctrl.owner = THIS_MODULE;
cinfo->capi_ctrl.driver_name = "b1pciv4";
cinfo->capi_ctrl.driverdata = cinfo;
cinfo->capi_ctrl.register_appl = b1dma_register_appl;
cinfo->capi_ctrl.release_appl = b1dma_release_appl;
cinfo->capi_ctrl.send_message = b1dma_send_message;
cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
cinfo->capi_ctrl.procinfo = b1pciv4_procinfo;
cinfo->capi_ctrl.proc_fops = &b1dmactl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
if (retval) {
printk(KERN_ERR "b1pci: attach controller failed.\n");
goto err_free_irq;
}
card->cardnr = cinfo->capi_ctrl.cnr;
printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, mem %#lx, revision %d (dma)\n",
card->port, card->irq, card->membase, card->revision);
pci_set_drvdata(pdev, card);
return 0;
err_free_irq:
free_irq(card->irq, card);
err_unmap:
iounmap(card->mbase);
err_release_region:
release_region(card->port, AVMB1_PORTLEN);
err_free_dma:
avmcard_dma_free(card->dma);
err_free:
b1_free_card(card);
err:
return retval;
}
static void b1pciv4_remove(struct pci_dev *pdev)
{
avmcard *card = pci_get_drvdata(pdev);
avmctrl_info *cinfo = card->ctrlinfo;
b1dma_reset(card);
detach_capi_ctr(&cinfo->capi_ctrl);
free_irq(card->irq, card);
iounmap(card->mbase);
release_region(card->port, AVMB1_PORTLEN);
avmcard_dma_free(card->dma);
b1_free_card(card);
}
#endif /* CONFIG_ISDN_DRV_AVMB1_B1PCIV4 */
static int __devinit b1pci_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct capicardparams param;
int retval;
if (pci_enable_device(pdev) < 0) {
printk(KERN_ERR "b1pci: failed to enable AVM-B1\n");
return -ENODEV;
}
param.irq = pdev->irq;
if (pci_resource_start(pdev, 2)) { /* B1 PCI V4 */
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
pci_set_master(pdev);
#endif
param.membase = pci_resource_start(pdev, 0);
param.port = pci_resource_start(pdev, 2);
printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 V4 at i/o %#x, irq %d, mem %#x\n",
param.port, param.irq, param.membase);
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
retval = b1pciv4_probe(¶m, pdev);
#else
retval = b1pci_probe(¶m, pdev);
#endif
if (retval != 0) {
printk(KERN_ERR "b1pci: no AVM-B1 V4 at i/o %#x, irq %d, mem %#x detected\n",
param.port, param.irq, param.membase);
}
} else {
param.membase = 0;
param.port = pci_resource_start(pdev, 1);
printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 at i/o %#x, irq %d\n",
param.port, param.irq);
retval = b1pci_probe(¶m, pdev);
if (retval != 0) {
printk(KERN_ERR "b1pci: no AVM-B1 at i/o %#x, irq %d detected\n",
param.port, param.irq);
}
}
return retval;
}
static void __devexit b1pci_pci_remove(struct pci_dev *pdev)
{
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
avmcard *card = pci_get_drvdata(pdev);
if (card->dma)
b1pciv4_remove(pdev);
else
b1pci_remove(pdev);
#else
b1pci_remove(pdev);
#endif
}
static struct pci_driver b1pci_pci_driver = {
.name = "b1pci",
.id_table = b1pci_pci_tbl,
.probe = b1pci_pci_probe,
.remove = __devexit_p(b1pci_pci_remove),
};
static struct capi_driver capi_driver_b1pci = {
.name = "b1pci",
.revision = "1.0",
};
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
static struct capi_driver capi_driver_b1pciv4 = {
.name = "b1pciv4",
.revision = "1.0",
};
#endif
static int __init b1pci_init(void)
{
char *p;
char rev[32];
int err;
if ((p = strchr(revision, ':')) != NULL && p[1]) {
strlcpy(rev, p + 2, 32);
if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p-1) = 0;
} else
strcpy(rev, "1.0");
err = pci_register_driver(&b1pci_pci_driver);
if (!err) {
strlcpy(capi_driver_b1pci.revision, rev, 32);
register_capi_driver(&capi_driver_b1pci);
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
strlcpy(capi_driver_b1pciv4.revision, rev, 32);
register_capi_driver(&capi_driver_b1pciv4);
#endif
printk(KERN_INFO "b1pci: revision %s\n", rev);
}
return err;
}
static void __exit b1pci_exit(void)
{
unregister_capi_driver(&capi_driver_b1pci);
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
unregister_capi_driver(&capi_driver_b1pciv4);
#endif
pci_unregister_driver(&b1pci_pci_driver);
}
module_init(b1pci_init);
module_exit(b1pci_exit);
| gpl-2.0 |
Freack-v/android_kernel_eagle | net/mac80211/rate.c | 5004 | 13317 | /*
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "rate.h"
#include "ieee80211_i.h"
#include "debugfs.h"
struct rate_control_alg {
struct list_head list;
struct rate_control_ops *ops;
};
static LIST_HEAD(rate_ctrl_algs);
static DEFINE_MUTEX(rate_ctrl_mutex);
static char *ieee80211_default_rc_algo = CONFIG_MAC80211_RC_DEFAULT;
module_param(ieee80211_default_rc_algo, charp, 0644);
MODULE_PARM_DESC(ieee80211_default_rc_algo,
"Default rate control algorithm for mac80211 to use");
int ieee80211_rate_control_register(struct rate_control_ops *ops)
{
struct rate_control_alg *alg;
if (!ops->name)
return -EINVAL;
mutex_lock(&rate_ctrl_mutex);
list_for_each_entry(alg, &rate_ctrl_algs, list) {
if (!strcmp(alg->ops->name, ops->name)) {
/* don't register an algorithm twice */
WARN_ON(1);
mutex_unlock(&rate_ctrl_mutex);
return -EALREADY;
}
}
alg = kzalloc(sizeof(*alg), GFP_KERNEL);
if (alg == NULL) {
mutex_unlock(&rate_ctrl_mutex);
return -ENOMEM;
}
alg->ops = ops;
list_add_tail(&alg->list, &rate_ctrl_algs);
mutex_unlock(&rate_ctrl_mutex);
return 0;
}
EXPORT_SYMBOL(ieee80211_rate_control_register);
void ieee80211_rate_control_unregister(struct rate_control_ops *ops)
{
struct rate_control_alg *alg;
mutex_lock(&rate_ctrl_mutex);
list_for_each_entry(alg, &rate_ctrl_algs, list) {
if (alg->ops == ops) {
list_del(&alg->list);
kfree(alg);
break;
}
}
mutex_unlock(&rate_ctrl_mutex);
}
EXPORT_SYMBOL(ieee80211_rate_control_unregister);
static struct rate_control_ops *
ieee80211_try_rate_control_ops_get(const char *name)
{
struct rate_control_alg *alg;
struct rate_control_ops *ops = NULL;
if (!name)
return NULL;
mutex_lock(&rate_ctrl_mutex);
list_for_each_entry(alg, &rate_ctrl_algs, list) {
if (!strcmp(alg->ops->name, name))
if (try_module_get(alg->ops->module)) {
ops = alg->ops;
break;
}
}
mutex_unlock(&rate_ctrl_mutex);
return ops;
}
/* Get the rate control algorithm. */
static struct rate_control_ops *
ieee80211_rate_control_ops_get(const char *name)
{
struct rate_control_ops *ops;
const char *alg_name;
kparam_block_sysfs_write(ieee80211_default_rc_algo);
if (!name)
alg_name = ieee80211_default_rc_algo;
else
alg_name = name;
ops = ieee80211_try_rate_control_ops_get(alg_name);
if (!ops) {
request_module("rc80211_%s", alg_name);
ops = ieee80211_try_rate_control_ops_get(alg_name);
}
if (!ops && name)
/* try default if specific alg requested but not found */
ops = ieee80211_try_rate_control_ops_get(ieee80211_default_rc_algo);
/* try built-in one if specific alg requested but not found */
if (!ops && strlen(CONFIG_MAC80211_RC_DEFAULT))
ops = ieee80211_try_rate_control_ops_get(CONFIG_MAC80211_RC_DEFAULT);
kparam_unblock_sysfs_write(ieee80211_default_rc_algo);
return ops;
}
static void ieee80211_rate_control_ops_put(struct rate_control_ops *ops)
{
module_put(ops->module);
}
#ifdef CONFIG_MAC80211_DEBUGFS
static ssize_t rcname_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct rate_control_ref *ref = file->private_data;
int len = strlen(ref->ops->name);
return simple_read_from_buffer(userbuf, count, ppos,
ref->ops->name, len);
}
static const struct file_operations rcname_ops = {
.read = rcname_read,
.open = simple_open,
.llseek = default_llseek,
};
#endif
static struct rate_control_ref *rate_control_alloc(const char *name,
struct ieee80211_local *local)
{
struct dentry *debugfsdir = NULL;
struct rate_control_ref *ref;
ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL);
if (!ref)
goto fail_ref;
ref->local = local;
ref->ops = ieee80211_rate_control_ops_get(name);
if (!ref->ops)
goto fail_ops;
#ifdef CONFIG_MAC80211_DEBUGFS
debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir);
local->debugfs.rcdir = debugfsdir;
debugfs_create_file("name", 0400, debugfsdir, ref, &rcname_ops);
#endif
ref->priv = ref->ops->alloc(&local->hw, debugfsdir);
if (!ref->priv)
goto fail_priv;
return ref;
fail_priv:
ieee80211_rate_control_ops_put(ref->ops);
fail_ops:
kfree(ref);
fail_ref:
return NULL;
}
static void rate_control_free(struct rate_control_ref *ctrl_ref)
{
ctrl_ref->ops->free(ctrl_ref->priv);
#ifdef CONFIG_MAC80211_DEBUGFS
debugfs_remove_recursive(ctrl_ref->local->debugfs.rcdir);
ctrl_ref->local->debugfs.rcdir = NULL;
#endif
ieee80211_rate_control_ops_put(ctrl_ref->ops);
kfree(ctrl_ref);
}
static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc)
{
struct sk_buff *skb = txrc->skb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
__le16 fc;
fc = hdr->frame_control;
return (info->flags & (IEEE80211_TX_CTL_NO_ACK |
IEEE80211_TX_CTL_USE_MINRATE)) ||
!ieee80211_is_data(fc);
}
static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
struct ieee80211_supported_band *sband)
{
u8 i;
if (basic_rates == 0)
return; /* assume basic rates unknown and accept rate */
if (*idx < 0)
return;
if (basic_rates & (1 << *idx))
return; /* selected rate is a basic rate */
for (i = *idx + 1; i <= sband->n_bitrates; i++) {
if (basic_rates & (1 << i)) {
*idx = i;
return;
}
}
/* could not find a basic rate; use original selection */
}
static inline s8
rate_lowest_non_cck_index(struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta)
{
int i;
for (i = 0; i < sband->n_bitrates; i++) {
struct ieee80211_rate *srate = &sband->bitrates[i];
if ((srate->bitrate == 10) || (srate->bitrate == 20) ||
(srate->bitrate == 55) || (srate->bitrate == 110))
continue;
if (rate_supported(sta, sband->band, i))
return i;
}
/* No matching rate found */
return 0;
}
bool rate_control_send_low(struct ieee80211_sta *sta,
void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
struct ieee80211_supported_band *sband = txrc->sband;
int mcast_rate;
if (!sta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
if ((sband->band != IEEE80211_BAND_2GHZ) ||
!(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
info->control.rates[0].idx =
rate_lowest_index(txrc->sband, sta);
else
info->control.rates[0].idx =
rate_lowest_non_cck_index(txrc->sband, sta);
info->control.rates[0].count =
(info->flags & IEEE80211_TX_CTL_NO_ACK) ?
1 : txrc->hw->max_rate_tries;
if (!sta && txrc->bss) {
mcast_rate = txrc->bss_conf->mcast_rate[sband->band];
if (mcast_rate > 0) {
info->control.rates[0].idx = mcast_rate - 1;
return true;
}
rc_send_low_broadcast(&info->control.rates[0].idx,
txrc->bss_conf->basic_rates,
sband);
}
return true;
}
return false;
}
EXPORT_SYMBOL(rate_control_send_low);
static bool rate_idx_match_legacy_mask(struct ieee80211_tx_rate *rate,
int n_bitrates, u32 mask)
{
int j;
/* See whether the selected rate or anything below it is allowed. */
for (j = rate->idx; j >= 0; j--) {
if (mask & (1 << j)) {
/* Okay, found a suitable rate. Use it. */
rate->idx = j;
return true;
}
}
/* Try to find a higher rate that would be allowed */
for (j = rate->idx + 1; j < n_bitrates; j++) {
if (mask & (1 << j)) {
/* Okay, found a suitable rate. Use it. */
rate->idx = j;
return true;
}
}
return false;
}
static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
{
int i, j;
int ridx, rbit;
ridx = rate->idx / 8;
rbit = rate->idx % 8;
/* sanity check */
if (ridx < 0 || ridx >= IEEE80211_HT_MCS_MASK_LEN)
return false;
/* See whether the selected rate or anything below it is allowed. */
for (i = ridx; i >= 0; i--) {
for (j = rbit; j >= 0; j--)
if (mcs_mask[i] & BIT(j)) {
rate->idx = i * 8 + j;
return true;
}
rbit = 7;
}
/* Try to find a higher rate that would be allowed */
ridx = (rate->idx + 1) / 8;
rbit = (rate->idx + 1) % 8;
for (i = ridx; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
for (j = rbit; j < 8; j++)
if (mcs_mask[i] & BIT(j)) {
rate->idx = i * 8 + j;
return true;
}
rbit = 0;
}
return false;
}
static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
struct ieee80211_tx_rate_control *txrc,
u32 mask,
u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
{
struct ieee80211_tx_rate alt_rate;
/* handle HT rates */
if (rate->flags & IEEE80211_TX_RC_MCS) {
if (rate_idx_match_mcs_mask(rate, mcs_mask))
return;
/* also try the legacy rates. */
alt_rate.idx = 0;
/* keep protection flags */
alt_rate.flags = rate->flags &
(IEEE80211_TX_RC_USE_RTS_CTS |
IEEE80211_TX_RC_USE_CTS_PROTECT |
IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
alt_rate.count = rate->count;
if (rate_idx_match_legacy_mask(&alt_rate,
txrc->sband->n_bitrates,
mask)) {
*rate = alt_rate;
return;
}
} else {
struct sk_buff *skb = txrc->skb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
__le16 fc;
/* handle legacy rates */
if (rate_idx_match_legacy_mask(rate, txrc->sband->n_bitrates,
mask))
return;
/* if HT BSS, and we handle a data frame, also try HT rates */
if (txrc->bss_conf->channel_type == NL80211_CHAN_NO_HT)
return;
fc = hdr->frame_control;
if (!ieee80211_is_data(fc))
return;
alt_rate.idx = 0;
/* keep protection flags */
alt_rate.flags = rate->flags &
(IEEE80211_TX_RC_USE_RTS_CTS |
IEEE80211_TX_RC_USE_CTS_PROTECT |
IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
alt_rate.count = rate->count;
alt_rate.flags |= IEEE80211_TX_RC_MCS;
if ((txrc->bss_conf->channel_type == NL80211_CHAN_HT40MINUS) ||
(txrc->bss_conf->channel_type == NL80211_CHAN_HT40PLUS))
alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) {
*rate = alt_rate;
return;
}
}
/*
* Uh.. No suitable rate exists. This should not really happen with
* sane TX rate mask configurations. However, should someone manage to
* configure supported rates and TX rate mask in incompatible way,
* allow the frame to be transmitted with whatever the rate control
* selected.
*/
}
void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee80211_tx_rate_control *txrc)
{
struct rate_control_ref *ref = sdata->local->rate_ctrl;
void *priv_sta = NULL;
struct ieee80211_sta *ista = NULL;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
int i;
u32 mask;
u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
if (sta && test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) {
ista = &sta->sta;
priv_sta = sta->rate_ctrl_priv;
}
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
info->control.rates[i].idx = -1;
info->control.rates[i].flags = 0;
info->control.rates[i].count = 0;
}
if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
return;
ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
/*
* Try to enforce the rateidx mask the user wanted. skip this if the
* default mask (allow all rates) is used to save some processing for
* the common case.
*/
mask = sdata->rc_rateidx_mask[info->band];
memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[info->band],
sizeof(mcs_mask));
if (mask != (1 << txrc->sband->n_bitrates) - 1) {
if (sta) {
/* Filter out rates that the STA does not support */
mask &= sta->sta.supp_rates[info->band];
for (i = 0; i < sizeof(mcs_mask); i++)
mcs_mask[i] &= sta->sta.ht_cap.mcs.rx_mask[i];
}
/*
* Make sure the rate index selected for each TX rate is
* included in the configured mask and change the rate indexes
* if needed.
*/
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
/* Skip invalid rates */
if (info->control.rates[i].idx < 0)
break;
rate_idx_match_mask(&info->control.rates[i], txrc,
mask, mcs_mask);
}
}
BUG_ON(info->control.rates[0].idx < 0);
}
int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
const char *name)
{
struct rate_control_ref *ref;
ASSERT_RTNL();
if (local->open_count)
return -EBUSY;
if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
if (WARN_ON(!local->ops->set_rts_threshold))
return -EINVAL;
return 0;
}
ref = rate_control_alloc(name, local);
if (!ref) {
wiphy_warn(local->hw.wiphy,
"Failed to select rate control algorithm\n");
return -ENOENT;
}
WARN_ON(local->rate_ctrl);
local->rate_ctrl = ref;
wiphy_debug(local->hw.wiphy, "Selected rate control algorithm '%s'\n",
ref->ops->name);
return 0;
}
void rate_control_deinitialize(struct ieee80211_local *local)
{
struct rate_control_ref *ref;
ref = local->rate_ctrl;
if (!ref)
return;
local->rate_ctrl = NULL;
rate_control_free(ref);
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.