repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
pronobis/linux_kernel_arm_N8000 | sound/drivers/mpu401/mpu401_uart.c | 3748 | 16837 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Routines for control of MPU-401 in UART mode
*
* MPU-401 supports UART mode which is not capable generate transmit
* interrupts thus output is done via polling. Also, if irq < 0, then
* input is done also via polling. Do not expect good performance.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* 13-03-2003:
* Added support for different kind of hardware I/O. Build in choices
* are port and mmio. For other kind of I/O, set mpu->read and
* mpu->write to your own I/O functions.
*
*/
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <sound/core.h>
#include <sound/mpu401.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Routines for control of MPU-401 in UART mode");
MODULE_LICENSE("GPL");
static void snd_mpu401_uart_input_read(struct snd_mpu401 * mpu);
static void snd_mpu401_uart_output_write(struct snd_mpu401 * mpu);
/*
*/
#define snd_mpu401_input_avail(mpu) \
(!(mpu->read(mpu, MPU401C(mpu)) & MPU401_RX_EMPTY))
#define snd_mpu401_output_ready(mpu) \
(!(mpu->read(mpu, MPU401C(mpu)) & MPU401_TX_FULL))
/* Build in lowlevel io */
static void mpu401_write_port(struct snd_mpu401 *mpu, unsigned char data,
unsigned long addr)
{
outb(data, addr);
}
static unsigned char mpu401_read_port(struct snd_mpu401 *mpu,
unsigned long addr)
{
return inb(addr);
}
static void mpu401_write_mmio(struct snd_mpu401 *mpu, unsigned char data,
unsigned long addr)
{
writeb(data, (void __iomem *)addr);
}
static unsigned char mpu401_read_mmio(struct snd_mpu401 *mpu,
unsigned long addr)
{
return readb((void __iomem *)addr);
}
/* */
static void snd_mpu401_uart_clear_rx(struct snd_mpu401 *mpu)
{
int timeout = 100000;
for (; timeout > 0 && snd_mpu401_input_avail(mpu); timeout--)
mpu->read(mpu, MPU401D(mpu));
#ifdef CONFIG_SND_DEBUG
if (timeout <= 0)
snd_printk(KERN_ERR "cmd: clear rx timeout (status = 0x%x)\n",
mpu->read(mpu, MPU401C(mpu)));
#endif
}
static void uart_interrupt_tx(struct snd_mpu401 *mpu)
{
unsigned long flags;
if (test_bit(MPU401_MODE_BIT_OUTPUT, &mpu->mode) &&
test_bit(MPU401_MODE_BIT_OUTPUT_TRIGGER, &mpu->mode)) {
spin_lock_irqsave(&mpu->output_lock, flags);
snd_mpu401_uart_output_write(mpu);
spin_unlock_irqrestore(&mpu->output_lock, flags);
}
}
static void _snd_mpu401_uart_interrupt(struct snd_mpu401 *mpu)
{
unsigned long flags;
if (mpu->info_flags & MPU401_INFO_INPUT) {
spin_lock_irqsave(&mpu->input_lock, flags);
if (test_bit(MPU401_MODE_BIT_INPUT, &mpu->mode))
snd_mpu401_uart_input_read(mpu);
else
snd_mpu401_uart_clear_rx(mpu);
spin_unlock_irqrestore(&mpu->input_lock, flags);
}
if (! (mpu->info_flags & MPU401_INFO_TX_IRQ))
/* ok. for better Tx performance try do some output
when input is done */
uart_interrupt_tx(mpu);
}
/**
* snd_mpu401_uart_interrupt - generic MPU401-UART interrupt handler
* @irq: the irq number
* @dev_id: mpu401 instance
*
* Processes the interrupt for MPU401-UART i/o.
*/
irqreturn_t snd_mpu401_uart_interrupt(int irq, void *dev_id)
{
struct snd_mpu401 *mpu = dev_id;
if (mpu == NULL)
return IRQ_NONE;
_snd_mpu401_uart_interrupt(mpu);
return IRQ_HANDLED;
}
EXPORT_SYMBOL(snd_mpu401_uart_interrupt);
/**
* snd_mpu401_uart_interrupt_tx - generic MPU401-UART transmit irq handler
* @irq: the irq number
* @dev_id: mpu401 instance
*
* Processes the interrupt for MPU401-UART output.
*/
irqreturn_t snd_mpu401_uart_interrupt_tx(int irq, void *dev_id)
{
struct snd_mpu401 *mpu = dev_id;
if (mpu == NULL)
return IRQ_NONE;
uart_interrupt_tx(mpu);
return IRQ_HANDLED;
}
EXPORT_SYMBOL(snd_mpu401_uart_interrupt_tx);
/*
* timer callback
* reprogram the timer and call the interrupt job
*/
static void snd_mpu401_uart_timer(unsigned long data)
{
struct snd_mpu401 *mpu = (struct snd_mpu401 *)data;
unsigned long flags;
spin_lock_irqsave(&mpu->timer_lock, flags);
/*mpu->mode |= MPU401_MODE_TIMER;*/
mpu->timer.expires = 1 + jiffies;
add_timer(&mpu->timer);
spin_unlock_irqrestore(&mpu->timer_lock, flags);
if (mpu->rmidi)
_snd_mpu401_uart_interrupt(mpu);
}
/*
* initialize the timer callback if not programmed yet
*/
static void snd_mpu401_uart_add_timer (struct snd_mpu401 *mpu, int input)
{
unsigned long flags;
spin_lock_irqsave (&mpu->timer_lock, flags);
if (mpu->timer_invoked == 0) {
init_timer(&mpu->timer);
mpu->timer.data = (unsigned long)mpu;
mpu->timer.function = snd_mpu401_uart_timer;
mpu->timer.expires = 1 + jiffies;
add_timer(&mpu->timer);
}
mpu->timer_invoked |= input ? MPU401_MODE_INPUT_TIMER :
MPU401_MODE_OUTPUT_TIMER;
spin_unlock_irqrestore (&mpu->timer_lock, flags);
}
/*
* remove the timer callback if still active
*/
static void snd_mpu401_uart_remove_timer (struct snd_mpu401 *mpu, int input)
{
unsigned long flags;
spin_lock_irqsave (&mpu->timer_lock, flags);
if (mpu->timer_invoked) {
mpu->timer_invoked &= input ? ~MPU401_MODE_INPUT_TIMER :
~MPU401_MODE_OUTPUT_TIMER;
if (! mpu->timer_invoked)
del_timer(&mpu->timer);
}
spin_unlock_irqrestore (&mpu->timer_lock, flags);
}
/*
* send a UART command
* return zero if successful, non-zero for some errors
*/
static int snd_mpu401_uart_cmd(struct snd_mpu401 * mpu, unsigned char cmd,
int ack)
{
unsigned long flags;
int timeout, ok;
spin_lock_irqsave(&mpu->input_lock, flags);
if (mpu->hardware != MPU401_HW_TRID4DWAVE) {
mpu->write(mpu, 0x00, MPU401D(mpu));
/*snd_mpu401_uart_clear_rx(mpu);*/
}
/* ok. standard MPU-401 initialization */
if (mpu->hardware != MPU401_HW_SB) {
for (timeout = 1000; timeout > 0 &&
!snd_mpu401_output_ready(mpu); timeout--)
udelay(10);
#ifdef CONFIG_SND_DEBUG
if (!timeout)
snd_printk(KERN_ERR "cmd: tx timeout (status = 0x%x)\n",
mpu->read(mpu, MPU401C(mpu)));
#endif
}
mpu->write(mpu, cmd, MPU401C(mpu));
if (ack && !(mpu->info_flags & MPU401_INFO_NO_ACK)) {
ok = 0;
timeout = 10000;
while (!ok && timeout-- > 0) {
if (snd_mpu401_input_avail(mpu)) {
if (mpu->read(mpu, MPU401D(mpu)) == MPU401_ACK)
ok = 1;
}
}
if (!ok && mpu->read(mpu, MPU401D(mpu)) == MPU401_ACK)
ok = 1;
} else
ok = 1;
spin_unlock_irqrestore(&mpu->input_lock, flags);
if (!ok) {
snd_printk(KERN_ERR "cmd: 0x%x failed at 0x%lx "
"(status = 0x%x, data = 0x%x)\n", cmd, mpu->port,
mpu->read(mpu, MPU401C(mpu)),
mpu->read(mpu, MPU401D(mpu)));
return 1;
}
return 0;
}
static int snd_mpu401_do_reset(struct snd_mpu401 *mpu)
{
if (snd_mpu401_uart_cmd(mpu, MPU401_RESET, 1))
return -EIO;
if (snd_mpu401_uart_cmd(mpu, MPU401_ENTER_UART, 0))
return -EIO;
return 0;
}
/*
* input/output open/close - protected by open_mutex in rawmidi.c
*/
static int snd_mpu401_uart_input_open(struct snd_rawmidi_substream *substream)
{
struct snd_mpu401 *mpu;
int err;
mpu = substream->rmidi->private_data;
if (mpu->open_input && (err = mpu->open_input(mpu)) < 0)
return err;
if (! test_bit(MPU401_MODE_BIT_OUTPUT, &mpu->mode)) {
if (snd_mpu401_do_reset(mpu) < 0)
goto error_out;
}
mpu->substream_input = substream;
set_bit(MPU401_MODE_BIT_INPUT, &mpu->mode);
return 0;
error_out:
if (mpu->open_input && mpu->close_input)
mpu->close_input(mpu);
return -EIO;
}
static int snd_mpu401_uart_output_open(struct snd_rawmidi_substream *substream)
{
struct snd_mpu401 *mpu;
int err;
mpu = substream->rmidi->private_data;
if (mpu->open_output && (err = mpu->open_output(mpu)) < 0)
return err;
if (! test_bit(MPU401_MODE_BIT_INPUT, &mpu->mode)) {
if (snd_mpu401_do_reset(mpu) < 0)
goto error_out;
}
mpu->substream_output = substream;
set_bit(MPU401_MODE_BIT_OUTPUT, &mpu->mode);
return 0;
error_out:
if (mpu->open_output && mpu->close_output)
mpu->close_output(mpu);
return -EIO;
}
static int snd_mpu401_uart_input_close(struct snd_rawmidi_substream *substream)
{
struct snd_mpu401 *mpu;
int err = 0;
mpu = substream->rmidi->private_data;
clear_bit(MPU401_MODE_BIT_INPUT, &mpu->mode);
mpu->substream_input = NULL;
if (! test_bit(MPU401_MODE_BIT_OUTPUT, &mpu->mode))
err = snd_mpu401_uart_cmd(mpu, MPU401_RESET, 0);
if (mpu->close_input)
mpu->close_input(mpu);
if (err)
return -EIO;
return 0;
}
static int snd_mpu401_uart_output_close(struct snd_rawmidi_substream *substream)
{
struct snd_mpu401 *mpu;
int err = 0;
mpu = substream->rmidi->private_data;
clear_bit(MPU401_MODE_BIT_OUTPUT, &mpu->mode);
mpu->substream_output = NULL;
if (! test_bit(MPU401_MODE_BIT_INPUT, &mpu->mode))
err = snd_mpu401_uart_cmd(mpu, MPU401_RESET, 0);
if (mpu->close_output)
mpu->close_output(mpu);
if (err)
return -EIO;
return 0;
}
/*
* trigger input callback
*/
static void
snd_mpu401_uart_input_trigger(struct snd_rawmidi_substream *substream, int up)
{
unsigned long flags;
struct snd_mpu401 *mpu;
int max = 64;
mpu = substream->rmidi->private_data;
if (up) {
if (! test_and_set_bit(MPU401_MODE_BIT_INPUT_TRIGGER,
&mpu->mode)) {
/* first time - flush FIFO */
while (max-- > 0)
mpu->read(mpu, MPU401D(mpu));
if (mpu->irq < 0)
snd_mpu401_uart_add_timer(mpu, 1);
}
/* read data in advance */
spin_lock_irqsave(&mpu->input_lock, flags);
snd_mpu401_uart_input_read(mpu);
spin_unlock_irqrestore(&mpu->input_lock, flags);
} else {
if (mpu->irq < 0)
snd_mpu401_uart_remove_timer(mpu, 1);
clear_bit(MPU401_MODE_BIT_INPUT_TRIGGER, &mpu->mode);
}
}
/*
* transfer input pending data
* call with input_lock spinlock held
*/
static void snd_mpu401_uart_input_read(struct snd_mpu401 * mpu)
{
int max = 128;
unsigned char byte;
while (max-- > 0) {
if (! snd_mpu401_input_avail(mpu))
break; /* input not available */
byte = mpu->read(mpu, MPU401D(mpu));
if (test_bit(MPU401_MODE_BIT_INPUT_TRIGGER, &mpu->mode))
snd_rawmidi_receive(mpu->substream_input, &byte, 1);
}
}
/*
* Tx FIFO sizes:
* CS4237B - 16 bytes
* AudioDrive ES1688 - 12 bytes
* S3 SonicVibes - 8 bytes
* SoundBlaster AWE 64 - 2 bytes (ugly hardware)
*/
/*
* write output pending bytes
* call with output_lock spinlock held
*/
static void snd_mpu401_uart_output_write(struct snd_mpu401 * mpu)
{
unsigned char byte;
int max = 256;
do {
if (snd_rawmidi_transmit_peek(mpu->substream_output,
&byte, 1) == 1) {
/*
* Try twice because there is hardware that insists on
* setting the output busy bit after each write.
*/
if (!snd_mpu401_output_ready(mpu) &&
!snd_mpu401_output_ready(mpu))
break; /* Tx FIFO full - try again later */
mpu->write(mpu, byte, MPU401D(mpu));
snd_rawmidi_transmit_ack(mpu->substream_output, 1);
} else {
snd_mpu401_uart_remove_timer (mpu, 0);
break; /* no other data - leave the tx loop */
}
} while (--max > 0);
}
/*
* output trigger callback
*/
static void
snd_mpu401_uart_output_trigger(struct snd_rawmidi_substream *substream, int up)
{
unsigned long flags;
struct snd_mpu401 *mpu;
mpu = substream->rmidi->private_data;
if (up) {
set_bit(MPU401_MODE_BIT_OUTPUT_TRIGGER, &mpu->mode);
/* try to add the timer at each output trigger,
* since the output timer might have been removed in
* snd_mpu401_uart_output_write().
*/
if (! (mpu->info_flags & MPU401_INFO_TX_IRQ))
snd_mpu401_uart_add_timer(mpu, 0);
/* output pending data */
spin_lock_irqsave(&mpu->output_lock, flags);
snd_mpu401_uart_output_write(mpu);
spin_unlock_irqrestore(&mpu->output_lock, flags);
} else {
if (! (mpu->info_flags & MPU401_INFO_TX_IRQ))
snd_mpu401_uart_remove_timer(mpu, 0);
clear_bit(MPU401_MODE_BIT_OUTPUT_TRIGGER, &mpu->mode);
}
}
/*
*/
static struct snd_rawmidi_ops snd_mpu401_uart_output =
{
.open = snd_mpu401_uart_output_open,
.close = snd_mpu401_uart_output_close,
.trigger = snd_mpu401_uart_output_trigger,
};
static struct snd_rawmidi_ops snd_mpu401_uart_input =
{
.open = snd_mpu401_uart_input_open,
.close = snd_mpu401_uart_input_close,
.trigger = snd_mpu401_uart_input_trigger,
};
static void snd_mpu401_uart_free(struct snd_rawmidi *rmidi)
{
struct snd_mpu401 *mpu = rmidi->private_data;
if (mpu->irq_flags && mpu->irq >= 0)
free_irq(mpu->irq, (void *) mpu);
release_and_free_resource(mpu->res);
kfree(mpu);
}
/**
* snd_mpu401_uart_new - create an MPU401-UART instance
* @card: the card instance
* @device: the device index, zero-based
* @hardware: the hardware type, MPU401_HW_XXXX
* @port: the base address of MPU401 port
* @info_flags: bitflags MPU401_INFO_XXX
* @irq: the irq number, -1 if no interrupt for mpu
* @irq_flags: the irq request flags (SA_XXX), 0 if irq was already reserved.
* @rrawmidi: the pointer to store the new rawmidi instance
*
* Creates a new MPU-401 instance.
*
* Note that the rawmidi instance is returned on the rrawmidi argument,
* not the mpu401 instance itself. To access to the mpu401 instance,
* cast from rawmidi->private_data (with struct snd_mpu401 magic-cast).
*
* Returns zero if successful, or a negative error code.
*/
int snd_mpu401_uart_new(struct snd_card *card, int device,
unsigned short hardware,
unsigned long port,
unsigned int info_flags,
int irq, int irq_flags,
struct snd_rawmidi ** rrawmidi)
{
struct snd_mpu401 *mpu;
struct snd_rawmidi *rmidi;
int in_enable, out_enable;
int err;
if (rrawmidi)
*rrawmidi = NULL;
if (! (info_flags & (MPU401_INFO_INPUT | MPU401_INFO_OUTPUT)))
info_flags |= MPU401_INFO_INPUT | MPU401_INFO_OUTPUT;
in_enable = (info_flags & MPU401_INFO_INPUT) ? 1 : 0;
out_enable = (info_flags & MPU401_INFO_OUTPUT) ? 1 : 0;
if ((err = snd_rawmidi_new(card, "MPU-401U", device,
out_enable, in_enable, &rmidi)) < 0)
return err;
mpu = kzalloc(sizeof(*mpu), GFP_KERNEL);
if (mpu == NULL) {
snd_printk(KERN_ERR "mpu401_uart: cannot allocate\n");
snd_device_free(card, rmidi);
return -ENOMEM;
}
rmidi->private_data = mpu;
rmidi->private_free = snd_mpu401_uart_free;
spin_lock_init(&mpu->input_lock);
spin_lock_init(&mpu->output_lock);
spin_lock_init(&mpu->timer_lock);
mpu->hardware = hardware;
if (! (info_flags & MPU401_INFO_INTEGRATED)) {
int res_size = hardware == MPU401_HW_PC98II ? 4 : 2;
mpu->res = request_region(port, res_size, "MPU401 UART");
if (mpu->res == NULL) {
snd_printk(KERN_ERR "mpu401_uart: "
"unable to grab port 0x%lx size %d\n",
port, res_size);
snd_device_free(card, rmidi);
return -EBUSY;
}
}
if (info_flags & MPU401_INFO_MMIO) {
mpu->write = mpu401_write_mmio;
mpu->read = mpu401_read_mmio;
} else {
mpu->write = mpu401_write_port;
mpu->read = mpu401_read_port;
}
mpu->port = port;
if (hardware == MPU401_HW_PC98II)
mpu->cport = port + 2;
else
mpu->cport = port + 1;
if (irq >= 0 && irq_flags) {
if (request_irq(irq, snd_mpu401_uart_interrupt, irq_flags,
"MPU401 UART", (void *) mpu)) {
snd_printk(KERN_ERR "mpu401_uart: "
"unable to grab IRQ %d\n", irq);
snd_device_free(card, rmidi);
return -EBUSY;
}
}
mpu->info_flags = info_flags;
mpu->irq = irq;
mpu->irq_flags = irq_flags;
if (card->shortname[0])
snprintf(rmidi->name, sizeof(rmidi->name), "%s MIDI",
card->shortname);
else
sprintf(rmidi->name, "MPU-401 MIDI %d-%d",card->number, device);
if (out_enable) {
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT,
&snd_mpu401_uart_output);
rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT;
}
if (in_enable) {
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT,
&snd_mpu401_uart_input);
rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT;
if (out_enable)
rmidi->info_flags |= SNDRV_RAWMIDI_INFO_DUPLEX;
}
mpu->rmidi = rmidi;
if (rrawmidi)
*rrawmidi = rmidi;
return 0;
}
EXPORT_SYMBOL(snd_mpu401_uart_new);
/*
* INIT part
*/
static int __init alsa_mpu401_uart_init(void)
{
return 0;
}
static void __exit alsa_mpu401_uart_exit(void)
{
}
module_init(alsa_mpu401_uart_init)
module_exit(alsa_mpu401_uart_exit)
| gpl-2.0 |
daliguro/tf101-kernel-test | drivers/input/touchscreen/w90p910_ts.c | 4004 | 8781 | /*
* Copyright (c) 2008 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation;version 2 of the License.
*
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
/* ADC controller bit defines */
#define ADC_DELAY 0xf00
#define ADC_DOWN 0x01
#define ADC_TSC_Y (0x01 << 8)
#define ADC_TSC_X (0x00 << 8)
#define TSC_FOURWIRE (~(0x03 << 1))
#define ADC_CLK_EN (0x01 << 28) /* ADC clock enable */
#define ADC_READ_CON (0x01 << 12)
#define ADC_CONV (0x01 << 13)
#define ADC_SEMIAUTO (0x01 << 14)
#define ADC_WAITTRIG (0x03 << 14)
#define ADC_RST1 (0x01 << 16)
#define ADC_RST0 (0x00 << 16)
#define ADC_EN (0x01 << 17)
#define ADC_INT (0x01 << 18)
#define WT_INT (0x01 << 20)
#define ADC_INT_EN (0x01 << 21)
#define LVD_INT_EN (0x01 << 22)
#define WT_INT_EN (0x01 << 23)
#define ADC_DIV (0x04 << 1) /* div = 6 */
enum ts_state {
TS_WAIT_NEW_PACKET, /* We are waiting next touch report */
TS_WAIT_X_COORD, /* We are waiting for ADC to report X coord */
TS_WAIT_Y_COORD, /* We are waiting for ADC to report Y coord */
TS_IDLE, /* Input device is closed, don't do anything */
};
struct w90p910_ts {
struct input_dev *input;
struct timer_list timer;
struct clk *clk;
int irq_num;
void __iomem *ts_reg;
spinlock_t lock;
enum ts_state state;
};
static void w90p910_report_event(struct w90p910_ts *w90p910_ts, bool down)
{
struct input_dev *dev = w90p910_ts->input;
if (down) {
input_report_abs(dev, ABS_X,
__raw_readl(w90p910_ts->ts_reg + 0x0c));
input_report_abs(dev, ABS_Y,
__raw_readl(w90p910_ts->ts_reg + 0x10));
}
input_report_key(dev, BTN_TOUCH, down);
input_sync(dev);
}
static void w90p910_prepare_x_reading(struct w90p910_ts *w90p910_ts)
{
unsigned long ctlreg;
__raw_writel(ADC_TSC_X, w90p910_ts->ts_reg + 0x04);
ctlreg = __raw_readl(w90p910_ts->ts_reg);
ctlreg &= ~(ADC_WAITTRIG | WT_INT | WT_INT_EN);
ctlreg |= ADC_SEMIAUTO | ADC_INT_EN | ADC_CONV;
__raw_writel(ctlreg, w90p910_ts->ts_reg);
w90p910_ts->state = TS_WAIT_X_COORD;
}
static void w90p910_prepare_y_reading(struct w90p910_ts *w90p910_ts)
{
unsigned long ctlreg;
__raw_writel(ADC_TSC_Y, w90p910_ts->ts_reg + 0x04);
ctlreg = __raw_readl(w90p910_ts->ts_reg);
ctlreg &= ~(ADC_WAITTRIG | ADC_INT | WT_INT_EN);
ctlreg |= ADC_SEMIAUTO | ADC_INT_EN | ADC_CONV;
__raw_writel(ctlreg, w90p910_ts->ts_reg);
w90p910_ts->state = TS_WAIT_Y_COORD;
}
static void w90p910_prepare_next_packet(struct w90p910_ts *w90p910_ts)
{
unsigned long ctlreg;
ctlreg = __raw_readl(w90p910_ts->ts_reg);
ctlreg &= ~(ADC_INT | ADC_INT_EN | ADC_SEMIAUTO | ADC_CONV);
ctlreg |= ADC_WAITTRIG | WT_INT_EN;
__raw_writel(ctlreg, w90p910_ts->ts_reg);
w90p910_ts->state = TS_WAIT_NEW_PACKET;
}
static irqreturn_t w90p910_ts_interrupt(int irq, void *dev_id)
{
struct w90p910_ts *w90p910_ts = dev_id;
unsigned long flags;
spin_lock_irqsave(&w90p910_ts->lock, flags);
switch (w90p910_ts->state) {
case TS_WAIT_NEW_PACKET:
/*
* The controller only generates interrupts when pen
* is down.
*/
del_timer(&w90p910_ts->timer);
w90p910_prepare_x_reading(w90p910_ts);
break;
case TS_WAIT_X_COORD:
w90p910_prepare_y_reading(w90p910_ts);
break;
case TS_WAIT_Y_COORD:
w90p910_report_event(w90p910_ts, true);
w90p910_prepare_next_packet(w90p910_ts);
mod_timer(&w90p910_ts->timer, jiffies + msecs_to_jiffies(100));
break;
case TS_IDLE:
break;
}
spin_unlock_irqrestore(&w90p910_ts->lock, flags);
return IRQ_HANDLED;
}
static void w90p910_check_pen_up(unsigned long data)
{
struct w90p910_ts *w90p910_ts = (struct w90p910_ts *) data;
unsigned long flags;
spin_lock_irqsave(&w90p910_ts->lock, flags);
if (w90p910_ts->state == TS_WAIT_NEW_PACKET &&
!(__raw_readl(w90p910_ts->ts_reg + 0x04) & ADC_DOWN)) {
w90p910_report_event(w90p910_ts, false);
}
spin_unlock_irqrestore(&w90p910_ts->lock, flags);
}
static int w90p910_open(struct input_dev *dev)
{
struct w90p910_ts *w90p910_ts = input_get_drvdata(dev);
unsigned long val;
/* enable the ADC clock */
clk_enable(w90p910_ts->clk);
__raw_writel(ADC_RST1, w90p910_ts->ts_reg);
msleep(1);
__raw_writel(ADC_RST0, w90p910_ts->ts_reg);
msleep(1);
/* set delay and screen type */
val = __raw_readl(w90p910_ts->ts_reg + 0x04);
__raw_writel(val & TSC_FOURWIRE, w90p910_ts->ts_reg + 0x04);
__raw_writel(ADC_DELAY, w90p910_ts->ts_reg + 0x08);
w90p910_ts->state = TS_WAIT_NEW_PACKET;
wmb();
/* set trigger mode */
val = __raw_readl(w90p910_ts->ts_reg);
val |= ADC_WAITTRIG | ADC_DIV | ADC_EN | WT_INT_EN;
__raw_writel(val, w90p910_ts->ts_reg);
return 0;
}
static void w90p910_close(struct input_dev *dev)
{
struct w90p910_ts *w90p910_ts = input_get_drvdata(dev);
unsigned long val;
/* disable trigger mode */
spin_lock_irq(&w90p910_ts->lock);
w90p910_ts->state = TS_IDLE;
val = __raw_readl(w90p910_ts->ts_reg);
val &= ~(ADC_WAITTRIG | ADC_DIV | ADC_EN | WT_INT_EN | ADC_INT_EN);
__raw_writel(val, w90p910_ts->ts_reg);
spin_unlock_irq(&w90p910_ts->lock);
/* Now that interrupts are shut off we can safely delete timer */
del_timer_sync(&w90p910_ts->timer);
/* stop the ADC clock */
clk_disable(w90p910_ts->clk);
}
static int __devinit w90x900ts_probe(struct platform_device *pdev)
{
struct w90p910_ts *w90p910_ts;
struct input_dev *input_dev;
struct resource *res;
int err;
w90p910_ts = kzalloc(sizeof(struct w90p910_ts), GFP_KERNEL);
input_dev = input_allocate_device();
if (!w90p910_ts || !input_dev) {
err = -ENOMEM;
goto fail1;
}
w90p910_ts->input = input_dev;
w90p910_ts->state = TS_IDLE;
spin_lock_init(&w90p910_ts->lock);
setup_timer(&w90p910_ts->timer, w90p910_check_pen_up,
(unsigned long)w90p910_ts);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
err = -ENXIO;
goto fail1;
}
if (!request_mem_region(res->start, resource_size(res),
pdev->name)) {
err = -EBUSY;
goto fail1;
}
w90p910_ts->ts_reg = ioremap(res->start, resource_size(res));
if (!w90p910_ts->ts_reg) {
err = -ENOMEM;
goto fail2;
}
w90p910_ts->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(w90p910_ts->clk)) {
err = PTR_ERR(w90p910_ts->clk);
goto fail3;
}
input_dev->name = "W90P910 TouchScreen";
input_dev->phys = "w90p910ts/event0";
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0x0005;
input_dev->id.product = 0x0001;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &pdev->dev;
input_dev->open = w90p910_open;
input_dev->close = w90p910_close;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, 0, 0x400, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 0, 0x400, 0, 0);
input_set_drvdata(input_dev, w90p910_ts);
w90p910_ts->irq_num = platform_get_irq(pdev, 0);
if (request_irq(w90p910_ts->irq_num, w90p910_ts_interrupt,
IRQF_DISABLED, "w90p910ts", w90p910_ts)) {
err = -EBUSY;
goto fail4;
}
err = input_register_device(w90p910_ts->input);
if (err)
goto fail5;
platform_set_drvdata(pdev, w90p910_ts);
return 0;
fail5: free_irq(w90p910_ts->irq_num, w90p910_ts);
fail4: clk_put(w90p910_ts->clk);
fail3: iounmap(w90p910_ts->ts_reg);
fail2: release_mem_region(res->start, resource_size(res));
fail1: input_free_device(input_dev);
kfree(w90p910_ts);
return err;
}
static int __devexit w90x900ts_remove(struct platform_device *pdev)
{
struct w90p910_ts *w90p910_ts = platform_get_drvdata(pdev);
struct resource *res;
free_irq(w90p910_ts->irq_num, w90p910_ts);
del_timer_sync(&w90p910_ts->timer);
iounmap(w90p910_ts->ts_reg);
clk_put(w90p910_ts->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
input_unregister_device(w90p910_ts->input);
kfree(w90p910_ts);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver w90x900ts_driver = {
.probe = w90x900ts_probe,
.remove = __devexit_p(w90x900ts_remove),
.driver = {
.name = "nuc900-ts",
.owner = THIS_MODULE,
},
};
static int __init w90x900ts_init(void)
{
return platform_driver_register(&w90x900ts_driver);
}
static void __exit w90x900ts_exit(void)
{
platform_driver_unregister(&w90x900ts_driver);
}
module_init(w90x900ts_init);
module_exit(w90x900ts_exit);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 touch screen driver!");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:nuc900-ts");
| gpl-2.0 |
PRJosh/kernel_samsung_mondrianwifi | arch/mips/netlogic/xlp/setup.c | 4516 | 3124 | /*
* Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
* reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the NetLogic
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/serial_8250.h>
#include <linux/pm.h>
#include <asm/reboot.h>
#include <asm/time.h>
#include <asm/bootinfo.h>
#include <linux/of_fdt.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/common.h>
#include <asm/netlogic/xlp-hal/iomap.h>
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/sys.h>
unsigned long nlm_common_ebase = 0x0;
/* default to uniprocessor */
uint32_t nlm_coremask = 1, nlm_cpumask = 1;
int nlm_threads_per_core = 1;
static void nlm_linux_exit(void)
{
nlm_write_sys_reg(nlm_sys_base, SYS_CHIP_RESET, 1);
for ( ; ; )
cpu_wait();
}
void __init plat_mem_setup(void)
{
panic_timeout = 5;
_machine_restart = (void (*)(char *))nlm_linux_exit;
_machine_halt = nlm_linux_exit;
pm_power_off = nlm_linux_exit;
}
const char *get_system_type(void)
{
return "Netlogic XLP Series";
}
void __init prom_free_prom_memory(void)
{
/* Nothing yet */
}
void xlp_mmu_init(void)
{
write_c0_config6(read_c0_config6() | 0x24);
current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
write_c0_config7(PM_DEFAULT_MASK >>
(13 + (ffz(PM_DEFAULT_MASK >> 13) / 2)));
}
void __init prom_init(void)
{
void *fdtp;
fdtp = (void *)(long)fw_arg0;
xlp_mmu_init();
nlm_hal_init();
early_init_devtree(fdtp);
nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
#ifdef CONFIG_SMP
nlm_wakeup_secondary_cpus(0xffffffff);
register_smp_ops(&nlm_smp_ops);
#endif
}
| gpl-2.0 |
kyak/qi-kernel | arch/mips/lib/uncached.c | 9380 | 2132 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 Thiemo Seufer
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
* Author: Maciej W. Rozycki <macro@mips.com>
*/
#include <linux/init.h>
#include <asm/addrspace.h>
#include <asm/bug.h>
#include <asm/cacheflush.h>
#ifndef CKSEG2
#define CKSEG2 CKSSEG
#endif
#ifndef TO_PHYS_MASK
#define TO_PHYS_MASK -1
#endif
/*
* FUNC is executed in one of the uncached segments, depending on its
* original address as follows:
*
* 1. If the original address is in CKSEG0 or CKSEG1, then the uncached
* segment used is CKSEG1.
* 2. If the original address is in XKPHYS, then the uncached segment
* used is XKPHYS(2).
* 3. Otherwise it's a bug.
*
* The same remapping is done with the stack pointer. Stack handling
* works because we don't handle stack arguments or more complex return
* values, so we can avoid sharing the same stack area between a cached
* and the uncached mode.
*/
unsigned long __cpuinit run_uncached(void *func)
{
register long sp __asm__("$sp");
register long ret __asm__("$2");
long lfunc = (long)func, ufunc;
long usp;
if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
usp = CKSEG1ADDR(sp);
#ifdef CONFIG_64BIT
else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0, 0) &&
(long long)sp < (long long)PHYS_TO_XKPHYS(8, 0))
usp = PHYS_TO_XKPHYS(K_CALG_UNCACHED,
XKPHYS_TO_PHYS((long long)sp));
#endif
else {
BUG();
usp = sp;
}
if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2)
ufunc = CKSEG1ADDR(lfunc);
#ifdef CONFIG_64BIT
else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0, 0) &&
(long long)lfunc < (long long)PHYS_TO_XKPHYS(8, 0))
ufunc = PHYS_TO_XKPHYS(K_CALG_UNCACHED,
XKPHYS_TO_PHYS((long long)lfunc));
#endif
else {
BUG();
ufunc = lfunc;
}
__asm__ __volatile__ (
" move $16, $sp\n"
" move $sp, %1\n"
" jalr %2\n"
" move $sp, $16"
: "=r" (ret)
: "r" (usp), "r" (ufunc)
: "$16", "$31");
return ret;
}
| gpl-2.0 |
shaqfu786/android_kernel_lge_omap4-common | drivers/input/touchscreen/touchit213.c | 9892 | 6060 | /*
* Sahara TouchIT-213 serial touchscreen driver
*
* Copyright (c) 2007-2008 Claudio Nieder <private@claudio.ch>
*
* Based on Touchright driver (drivers/input/touchscreen/touchright.c)
* Copyright (c) 2006 Rick Koch <n1gp@hotmail.com>
* Copyright (c) 2004 Vojtech Pavlik
* and Dan Streetman <ddstreet@ieee.org>
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/init.h>
#define DRIVER_DESC "Sahara TouchIT-213 serial touchscreen driver"
MODULE_AUTHOR("Claudio Nieder <private@claudio.ch>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/*
* Definitions & global arrays.
*/
/*
* Data is received through COM1 at 9600bit/s,8bit,no parity in packets
* of 5 byte each.
*
* +--------+ +--------+ +--------+ +--------+ +--------+
* |1000000p| |0xxxxxxx| |0xxxxxxx| |0yyyyyyy| |0yyyyyyy|
* +--------+ +--------+ +--------+ +--------+ +--------+
* MSB LSB MSB LSB
*
* The value of p is 1 as long as the screen is touched and 0 when
* reporting the location where touching stopped, e.g. where the pen was
* lifted from the screen.
*
* When holding the screen in landscape mode as the BIOS text output is
* presented, x is the horizontal axis with values growing from left to
* right and y is the vertical axis with values growing from top to
* bottom.
*
* When holding the screen in portrait mode with the Sahara logo in its
* correct position, x ist the vertical axis with values growing from
* top to bottom and y is the horizontal axis with values growing from
* right to left.
*/
#define T213_FORMAT_TOUCH_BIT 0x01
#define T213_FORMAT_STATUS_BYTE 0x80
#define T213_FORMAT_STATUS_MASK ~T213_FORMAT_TOUCH_BIT
/*
* On my Sahara Touch-IT 213 I have observed x values from 0 to 0x7f0
* and y values from 0x1d to 0x7e9, so the actual measurement is
* probably done with an 11 bit precision.
*/
#define T213_MIN_XC 0
#define T213_MAX_XC 0x07ff
#define T213_MIN_YC 0
#define T213_MAX_YC 0x07ff
/*
* Per-touchscreen data.
*/
struct touchit213 {
struct input_dev *dev;
struct serio *serio;
int idx;
unsigned char csum;
unsigned char data[5];
char phys[32];
};
static irqreturn_t touchit213_interrupt(struct serio *serio,
unsigned char data, unsigned int flags)
{
struct touchit213 *touchit213 = serio_get_drvdata(serio);
struct input_dev *dev = touchit213->dev;
touchit213->data[touchit213->idx] = data;
switch (touchit213->idx++) {
case 0:
if ((touchit213->data[0] & T213_FORMAT_STATUS_MASK) !=
T213_FORMAT_STATUS_BYTE) {
pr_debug("unsynchronized data: 0x%02x\n", data);
touchit213->idx = 0;
}
break;
case 4:
touchit213->idx = 0;
input_report_abs(dev, ABS_X,
(touchit213->data[1] << 7) | touchit213->data[2]);
input_report_abs(dev, ABS_Y,
(touchit213->data[3] << 7) | touchit213->data[4]);
input_report_key(dev, BTN_TOUCH,
touchit213->data[0] & T213_FORMAT_TOUCH_BIT);
input_sync(dev);
break;
}
return IRQ_HANDLED;
}
/*
* touchit213_disconnect() is the opposite of touchit213_connect()
*/
static void touchit213_disconnect(struct serio *serio)
{
struct touchit213 *touchit213 = serio_get_drvdata(serio);
input_get_device(touchit213->dev);
input_unregister_device(touchit213->dev);
serio_close(serio);
serio_set_drvdata(serio, NULL);
input_put_device(touchit213->dev);
kfree(touchit213);
}
/*
* touchit213_connect() is the routine that is called when someone adds a
* new serio device that supports the Touchright protocol and registers it as
* an input device.
*/
static int touchit213_connect(struct serio *serio, struct serio_driver *drv)
{
struct touchit213 *touchit213;
struct input_dev *input_dev;
int err;
touchit213 = kzalloc(sizeof(struct touchit213), GFP_KERNEL);
input_dev = input_allocate_device();
if (!touchit213 || !input_dev) {
err = -ENOMEM;
goto fail1;
}
touchit213->serio = serio;
touchit213->dev = input_dev;
snprintf(touchit213->phys, sizeof(touchit213->phys),
"%s/input0", serio->phys);
input_dev->name = "Sahara Touch-iT213 Serial TouchScreen";
input_dev->phys = touchit213->phys;
input_dev->id.bustype = BUS_RS232;
input_dev->id.vendor = SERIO_TOUCHIT213;
input_dev->id.product = 0;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &serio->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(touchit213->dev, ABS_X,
T213_MIN_XC, T213_MAX_XC, 0, 0);
input_set_abs_params(touchit213->dev, ABS_Y,
T213_MIN_YC, T213_MAX_YC, 0, 0);
serio_set_drvdata(serio, touchit213);
err = serio_open(serio, drv);
if (err)
goto fail2;
err = input_register_device(touchit213->dev);
if (err)
goto fail3;
return 0;
fail3: serio_close(serio);
fail2: serio_set_drvdata(serio, NULL);
fail1: input_free_device(input_dev);
kfree(touchit213);
return err;
}
/*
* The serio driver structure.
*/
static struct serio_device_id touchit213_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_TOUCHIT213,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{ 0 }
};
MODULE_DEVICE_TABLE(serio, touchit213_serio_ids);
static struct serio_driver touchit213_drv = {
.driver = {
.name = "touchit213",
},
.description = DRIVER_DESC,
.id_table = touchit213_serio_ids,
.interrupt = touchit213_interrupt,
.connect = touchit213_connect,
.disconnect = touchit213_disconnect,
};
/*
* The functions for inserting/removing us as a module.
*/
static int __init touchit213_init(void)
{
return serio_register_driver(&touchit213_drv);
}
static void __exit touchit213_exit(void)
{
serio_unregister_driver(&touchit213_drv);
}
module_init(touchit213_init);
module_exit(touchit213_exit);
| gpl-2.0 |
AzraelsKiss/android_kernel_samsung_smdk4412 | arch/alpha/lib/fls.c | 11940 | 1117 | /*
* arch/alpha/lib/fls.c
*/
#include <linux/module.h>
#include <linux/bitops.h>
/* This is fls(x)-1, except zero is held to zero. This allows most
efficient input into extbl, plus it allows easy handling of fls(0)=0. */
const unsigned char __flsm1_tab[256] =
{
0,
0,
1, 1,
2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
};
EXPORT_SYMBOL(__flsm1_tab);
| gpl-2.0 |
daeiron/LGD855_kernel | sound/pci/ctxfi/ctamixer.c | 12708 | 9927 | /**
* Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
*
* This source file is released under GPL v2 license (no other versions).
* See the COPYING file included in the main directory of this source
* distribution for the license terms and conditions.
*
* @File ctamixer.c
*
* @Brief
* This file contains the implementation of the Audio Mixer
* resource management object.
*
* @Author Liu Chun
* @Date May 21 2008
*
*/
#include "ctamixer.h"
#include "cthardware.h"
#include <linux/slab.h>
#define AMIXER_RESOURCE_NUM 256
#define SUM_RESOURCE_NUM 256
#define AMIXER_Y_IMMEDIATE 1
#define BLANK_SLOT 4094
static int amixer_master(struct rsc *rsc)
{
rsc->conj = 0;
return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
}
static int amixer_next_conj(struct rsc *rsc)
{
rsc->conj++;
return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
}
static int amixer_index(const struct rsc *rsc)
{
return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
}
static int amixer_output_slot(const struct rsc *rsc)
{
return (amixer_index(rsc) << 4) + 0x4;
}
static struct rsc_ops amixer_basic_rsc_ops = {
.master = amixer_master,
.next_conj = amixer_next_conj,
.index = amixer_index,
.output_slot = amixer_output_slot,
};
static int amixer_set_input(struct amixer *amixer, struct rsc *rsc)
{
struct hw *hw;
hw = amixer->rsc.hw;
hw->amixer_set_mode(amixer->rsc.ctrl_blk, AMIXER_Y_IMMEDIATE);
amixer->input = rsc;
if (!rsc)
hw->amixer_set_x(amixer->rsc.ctrl_blk, BLANK_SLOT);
else
hw->amixer_set_x(amixer->rsc.ctrl_blk,
rsc->ops->output_slot(rsc));
return 0;
}
/* y is a 14-bit immediate constant */
static int amixer_set_y(struct amixer *amixer, unsigned int y)
{
struct hw *hw;
hw = amixer->rsc.hw;
hw->amixer_set_y(amixer->rsc.ctrl_blk, y);
return 0;
}
static int amixer_set_invalid_squash(struct amixer *amixer, unsigned int iv)
{
struct hw *hw;
hw = amixer->rsc.hw;
hw->amixer_set_iv(amixer->rsc.ctrl_blk, iv);
return 0;
}
static int amixer_set_sum(struct amixer *amixer, struct sum *sum)
{
struct hw *hw;
hw = amixer->rsc.hw;
amixer->sum = sum;
if (!sum) {
hw->amixer_set_se(amixer->rsc.ctrl_blk, 0);
} else {
hw->amixer_set_se(amixer->rsc.ctrl_blk, 1);
hw->amixer_set_sadr(amixer->rsc.ctrl_blk,
sum->rsc.ops->index(&sum->rsc));
}
return 0;
}
static int amixer_commit_write(struct amixer *amixer)
{
struct hw *hw;
unsigned int index;
int i;
struct rsc *input;
struct sum *sum;
hw = amixer->rsc.hw;
input = amixer->input;
sum = amixer->sum;
/* Program master and conjugate resources */
amixer->rsc.ops->master(&amixer->rsc);
if (input)
input->ops->master(input);
if (sum)
sum->rsc.ops->master(&sum->rsc);
for (i = 0; i < amixer->rsc.msr; i++) {
hw->amixer_set_dirty_all(amixer->rsc.ctrl_blk);
if (input) {
hw->amixer_set_x(amixer->rsc.ctrl_blk,
input->ops->output_slot(input));
input->ops->next_conj(input);
}
if (sum) {
hw->amixer_set_sadr(amixer->rsc.ctrl_blk,
sum->rsc.ops->index(&sum->rsc));
sum->rsc.ops->next_conj(&sum->rsc);
}
index = amixer->rsc.ops->output_slot(&amixer->rsc);
hw->amixer_commit_write(hw, index, amixer->rsc.ctrl_blk);
amixer->rsc.ops->next_conj(&amixer->rsc);
}
amixer->rsc.ops->master(&amixer->rsc);
if (input)
input->ops->master(input);
if (sum)
sum->rsc.ops->master(&sum->rsc);
return 0;
}
static int amixer_commit_raw_write(struct amixer *amixer)
{
struct hw *hw;
unsigned int index;
hw = amixer->rsc.hw;
index = amixer->rsc.ops->output_slot(&amixer->rsc);
hw->amixer_commit_write(hw, index, amixer->rsc.ctrl_blk);
return 0;
}
static int amixer_get_y(struct amixer *amixer)
{
struct hw *hw;
hw = amixer->rsc.hw;
return hw->amixer_get_y(amixer->rsc.ctrl_blk);
}
static int amixer_setup(struct amixer *amixer, struct rsc *input,
unsigned int scale, struct sum *sum)
{
amixer_set_input(amixer, input);
amixer_set_y(amixer, scale);
amixer_set_sum(amixer, sum);
amixer_commit_write(amixer);
return 0;
}
static struct amixer_rsc_ops amixer_ops = {
.set_input = amixer_set_input,
.set_invalid_squash = amixer_set_invalid_squash,
.set_scale = amixer_set_y,
.set_sum = amixer_set_sum,
.commit_write = amixer_commit_write,
.commit_raw_write = amixer_commit_raw_write,
.setup = amixer_setup,
.get_scale = amixer_get_y,
};
static int amixer_rsc_init(struct amixer *amixer,
const struct amixer_desc *desc,
struct amixer_mgr *mgr)
{
int err;
err = rsc_init(&amixer->rsc, amixer->idx[0],
AMIXER, desc->msr, mgr->mgr.hw);
if (err)
return err;
/* Set amixer specific operations */
amixer->rsc.ops = &amixer_basic_rsc_ops;
amixer->ops = &amixer_ops;
amixer->input = NULL;
amixer->sum = NULL;
amixer_setup(amixer, NULL, 0, NULL);
return 0;
}
static int amixer_rsc_uninit(struct amixer *amixer)
{
amixer_setup(amixer, NULL, 0, NULL);
rsc_uninit(&amixer->rsc);
amixer->ops = NULL;
amixer->input = NULL;
amixer->sum = NULL;
return 0;
}
static int get_amixer_rsc(struct amixer_mgr *mgr,
const struct amixer_desc *desc,
struct amixer **ramixer)
{
int err, i;
unsigned int idx;
struct amixer *amixer;
unsigned long flags;
*ramixer = NULL;
/* Allocate mem for amixer resource */
amixer = kzalloc(sizeof(*amixer), GFP_KERNEL);
if (!amixer)
return -ENOMEM;
/* Check whether there are sufficient
* amixer resources to meet request. */
err = 0;
spin_lock_irqsave(&mgr->mgr_lock, flags);
for (i = 0; i < desc->msr; i++) {
err = mgr_get_resource(&mgr->mgr, 1, &idx);
if (err)
break;
amixer->idx[i] = idx;
}
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
if (err) {
printk(KERN_ERR "ctxfi: Can't meet AMIXER resource request!\n");
goto error;
}
err = amixer_rsc_init(amixer, desc, mgr);
if (err)
goto error;
*ramixer = amixer;
return 0;
error:
spin_lock_irqsave(&mgr->mgr_lock, flags);
for (i--; i >= 0; i--)
mgr_put_resource(&mgr->mgr, 1, amixer->idx[i]);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
kfree(amixer);
return err;
}
static int put_amixer_rsc(struct amixer_mgr *mgr, struct amixer *amixer)
{
unsigned long flags;
int i;
spin_lock_irqsave(&mgr->mgr_lock, flags);
for (i = 0; i < amixer->rsc.msr; i++)
mgr_put_resource(&mgr->mgr, 1, amixer->idx[i]);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
amixer_rsc_uninit(amixer);
kfree(amixer);
return 0;
}
int amixer_mgr_create(void *hw, struct amixer_mgr **ramixer_mgr)
{
int err;
struct amixer_mgr *amixer_mgr;
*ramixer_mgr = NULL;
amixer_mgr = kzalloc(sizeof(*amixer_mgr), GFP_KERNEL);
if (!amixer_mgr)
return -ENOMEM;
err = rsc_mgr_init(&amixer_mgr->mgr, AMIXER, AMIXER_RESOURCE_NUM, hw);
if (err)
goto error;
spin_lock_init(&amixer_mgr->mgr_lock);
amixer_mgr->get_amixer = get_amixer_rsc;
amixer_mgr->put_amixer = put_amixer_rsc;
*ramixer_mgr = amixer_mgr;
return 0;
error:
kfree(amixer_mgr);
return err;
}
int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
{
rsc_mgr_uninit(&amixer_mgr->mgr);
kfree(amixer_mgr);
return 0;
}
/* SUM resource management */
static int sum_master(struct rsc *rsc)
{
rsc->conj = 0;
return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
}
static int sum_next_conj(struct rsc *rsc)
{
rsc->conj++;
return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
}
static int sum_index(const struct rsc *rsc)
{
return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
}
static int sum_output_slot(const struct rsc *rsc)
{
return (sum_index(rsc) << 4) + 0xc;
}
static struct rsc_ops sum_basic_rsc_ops = {
.master = sum_master,
.next_conj = sum_next_conj,
.index = sum_index,
.output_slot = sum_output_slot,
};
static int sum_rsc_init(struct sum *sum,
const struct sum_desc *desc,
struct sum_mgr *mgr)
{
int err;
err = rsc_init(&sum->rsc, sum->idx[0], SUM, desc->msr, mgr->mgr.hw);
if (err)
return err;
sum->rsc.ops = &sum_basic_rsc_ops;
return 0;
}
static int sum_rsc_uninit(struct sum *sum)
{
rsc_uninit(&sum->rsc);
return 0;
}
static int get_sum_rsc(struct sum_mgr *mgr,
const struct sum_desc *desc,
struct sum **rsum)
{
int err, i;
unsigned int idx;
struct sum *sum;
unsigned long flags;
*rsum = NULL;
/* Allocate mem for sum resource */
sum = kzalloc(sizeof(*sum), GFP_KERNEL);
if (!sum)
return -ENOMEM;
/* Check whether there are sufficient sum resources to meet request. */
err = 0;
spin_lock_irqsave(&mgr->mgr_lock, flags);
for (i = 0; i < desc->msr; i++) {
err = mgr_get_resource(&mgr->mgr, 1, &idx);
if (err)
break;
sum->idx[i] = idx;
}
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
if (err) {
printk(KERN_ERR "ctxfi: Can't meet SUM resource request!\n");
goto error;
}
err = sum_rsc_init(sum, desc, mgr);
if (err)
goto error;
*rsum = sum;
return 0;
error:
spin_lock_irqsave(&mgr->mgr_lock, flags);
for (i--; i >= 0; i--)
mgr_put_resource(&mgr->mgr, 1, sum->idx[i]);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
kfree(sum);
return err;
}
static int put_sum_rsc(struct sum_mgr *mgr, struct sum *sum)
{
unsigned long flags;
int i;
spin_lock_irqsave(&mgr->mgr_lock, flags);
for (i = 0; i < sum->rsc.msr; i++)
mgr_put_resource(&mgr->mgr, 1, sum->idx[i]);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
sum_rsc_uninit(sum);
kfree(sum);
return 0;
}
int sum_mgr_create(void *hw, struct sum_mgr **rsum_mgr)
{
int err;
struct sum_mgr *sum_mgr;
*rsum_mgr = NULL;
sum_mgr = kzalloc(sizeof(*sum_mgr), GFP_KERNEL);
if (!sum_mgr)
return -ENOMEM;
err = rsc_mgr_init(&sum_mgr->mgr, SUM, SUM_RESOURCE_NUM, hw);
if (err)
goto error;
spin_lock_init(&sum_mgr->mgr_lock);
sum_mgr->get_sum = get_sum_rsc;
sum_mgr->put_sum = put_sum_rsc;
*rsum_mgr = sum_mgr;
return 0;
error:
kfree(sum_mgr);
return err;
}
int sum_mgr_destroy(struct sum_mgr *sum_mgr)
{
rsc_mgr_uninit(&sum_mgr->mgr);
kfree(sum_mgr);
return 0;
}
| gpl-2.0 |
firstred/surfacepro3-kernel | drivers/i2c/busses/i2c-puv3.c | 165 | 6194 | /*
* I2C driver for PKUnity-v3 SoC
* Code specific to PKUnity SoC and UniCore ISA
*
* Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
* Copyright (C) 2001-2010 Guan Xuetao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <mach/hardware.h>
/*
* Poll the i2c status register until the specified bit is set.
* Returns 0 if timed out (100 msec).
*/
static short poll_status(unsigned long bit)
{
int loop_cntr = 1000;
if (bit & I2C_STATUS_TFNF) {
do {
udelay(10);
} while (!(readl(I2C_STATUS) & bit) && (--loop_cntr > 0));
} else {
/* RXRDY handler */
do {
if (readl(I2C_TAR) == I2C_TAR_EEPROM)
msleep(20);
else
udelay(10);
} while (!(readl(I2C_RXFLR) & 0xf) && (--loop_cntr > 0));
}
return (loop_cntr > 0);
}
static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length)
{
int i2c_reg = *buf;
/* Read data */
while (length--) {
if (!poll_status(I2C_STATUS_TFNF)) {
dev_dbg(&adap->dev, "Tx FIFO Not Full timeout\n");
return -ETIMEDOUT;
}
/* send addr */
writel(i2c_reg | I2C_DATACMD_WRITE, I2C_DATACMD);
/* get ready to next write */
i2c_reg++;
/* send read CMD */
writel(I2C_DATACMD_READ, I2C_DATACMD);
/* wait until the Rx FIFO have available */
if (!poll_status(I2C_STATUS_RFNE)) {
dev_dbg(&adap->dev, "RXRDY timeout\n");
return -ETIMEDOUT;
}
/* read the data to buf */
*buf = (readl(I2C_DATACMD) & I2C_DATACMD_DAT_MASK);
buf++;
}
return 0;
}
static int xfer_write(struct i2c_adapter *adap, unsigned char *buf, int length)
{
int i2c_reg = *buf;
/* Do nothing but storing the reg_num to a static variable */
if (i2c_reg == -1) {
printk(KERN_WARNING "Error i2c reg\n");
return -ETIMEDOUT;
}
if (length == 1)
return 0;
buf++;
length--;
while (length--) {
/* send addr */
writel(i2c_reg | I2C_DATACMD_WRITE, I2C_DATACMD);
/* send write CMD */
writel(*buf | I2C_DATACMD_WRITE, I2C_DATACMD);
/* wait until the Rx FIFO have available */
msleep(20);
/* read the data to buf */
i2c_reg++;
buf++;
}
return 0;
}
/*
* Generic i2c master transfer entrypoint.
*
*/
static int puv3_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *pmsg,
int num)
{
int i, ret;
unsigned char swap;
/* Disable i2c */
writel(I2C_ENABLE_DISABLE, I2C_ENABLE);
/* Set the work mode and speed*/
writel(I2C_CON_MASTER | I2C_CON_SPEED_STD | I2C_CON_SLAVEDISABLE, I2C_CON);
writel(pmsg->addr, I2C_TAR);
/* Enable i2c */
writel(I2C_ENABLE_ENABLE, I2C_ENABLE);
dev_dbg(&adap->dev, "puv3_i2c_xfer: processing %d messages:\n", num);
for (i = 0; i < num; i++) {
dev_dbg(&adap->dev, " #%d: %sing %d byte%s %s 0x%02x\n", i,
pmsg->flags & I2C_M_RD ? "read" : "writ",
pmsg->len, pmsg->len > 1 ? "s" : "",
pmsg->flags & I2C_M_RD ? "from" : "to", pmsg->addr);
if (pmsg->len && pmsg->buf) { /* sanity check */
if (pmsg->flags & I2C_M_RD)
ret = xfer_read(adap, pmsg->buf, pmsg->len);
else
ret = xfer_write(adap, pmsg->buf, pmsg->len);
if (ret)
return ret;
}
dev_dbg(&adap->dev, "transfer complete\n");
pmsg++; /* next message */
}
/* XXX: fixup be16_to_cpu in bq27x00_battery.c */
if (pmsg->addr == I2C_TAR_PWIC) {
swap = pmsg->buf[0];
pmsg->buf[0] = pmsg->buf[1];
pmsg->buf[1] = swap;
}
return i;
}
/*
* Return list of supported functionality.
*/
static u32 puv3_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static struct i2c_algorithm puv3_i2c_algorithm = {
.master_xfer = puv3_i2c_xfer,
.functionality = puv3_i2c_func,
};
/*
* Main initialization routine.
*/
static int puv3_i2c_probe(struct platform_device *pdev)
{
struct i2c_adapter *adapter;
struct resource *mem;
int rc;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
return -ENODEV;
if (!request_mem_region(mem->start, resource_size(mem), "puv3_i2c"))
return -EBUSY;
adapter = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
if (adapter == NULL) {
dev_err(&pdev->dev, "can't allocate interface!\n");
rc = -ENOMEM;
goto fail_nomem;
}
snprintf(adapter->name, sizeof(adapter->name), "PUV3-I2C at 0x%08x",
mem->start);
adapter->algo = &puv3_i2c_algorithm;
adapter->class = I2C_CLASS_HWMON;
adapter->dev.parent = &pdev->dev;
platform_set_drvdata(pdev, adapter);
adapter->nr = pdev->id;
rc = i2c_add_numbered_adapter(adapter);
if (rc) {
dev_err(&pdev->dev, "Adapter '%s' registration failed\n",
adapter->name);
goto fail_add_adapter;
}
dev_info(&pdev->dev, "PKUnity v3 i2c bus adapter.\n");
return 0;
fail_add_adapter:
kfree(adapter);
fail_nomem:
release_mem_region(mem->start, resource_size(mem));
return rc;
}
static int puv3_i2c_remove(struct platform_device *pdev)
{
struct i2c_adapter *adapter = platform_get_drvdata(pdev);
struct resource *mem;
i2c_del_adapter(adapter);
put_device(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int puv3_i2c_suspend(struct device *dev)
{
int poll_count;
/* Disable the IIC */
writel(I2C_ENABLE_DISABLE, I2C_ENABLE);
for (poll_count = 0; poll_count < 50; poll_count++) {
if (readl(I2C_ENSTATUS) & I2C_ENSTATUS_ENABLE)
udelay(25);
}
return 0;
}
static SIMPLE_DEV_PM_OPS(puv3_i2c_pm, puv3_i2c_suspend, NULL);
#define PUV3_I2C_PM (&puv3_i2c_pm)
#else
#define PUV3_I2C_PM NULL
#endif
static struct platform_driver puv3_i2c_driver = {
.probe = puv3_i2c_probe,
.remove = puv3_i2c_remove,
.driver = {
.name = "PKUnity-v3-I2C",
.owner = THIS_MODULE,
.pm = PUV3_I2C_PM,
}
};
module_platform_driver(puv3_i2c_driver);
MODULE_DESCRIPTION("PKUnity v3 I2C driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:puv3_i2c");
| gpl-2.0 |
gabrielleLQX/qemu-1.6.0 | roms/ipxe/src/hci/mucurses/windows.c | 165 | 3653 | #include <curses.h>
#include <stddef.h>
#include <stdlib.h>
#include "mucurses.h"
/** @file
*
* MuCurses windows instance functions
*
*/
/**
* Delete a window
*
* @v *win pointer to window being deleted
* @ret rc return status code
*/
int delwin ( WINDOW *win ) {
if ( win == NULL )
return ERR;
/* I think we should blank the region covered by the window -
ncurses doesn't do this, but they have a buffer, so they
may just be deleting from an offscreen context whereas we
are guaranteed to be deleting something onscreen */
wmove( win, 0, 0 );
chtype killch = (chtype)' ';
do {
_wputch( win, killch, WRAP );
} while ( win->curs_x + win->curs_y );
free( win );
wmove ( stdscr, 0, 0 );
return OK;
}
/**
* Create a new derived window
*
* @v parent parent window
* @v nlines window height
* @v ncols window width
* @v begin_y window y origin (relative to parent)
* @v begin_x window x origin (relative to parent)
* @ret ptr return pointer to child window
*/
WINDOW *derwin ( WINDOW *parent, int nlines, int ncols,
int begin_y, int begin_x ) {
WINDOW *child;
if ( parent == NULL )
return NULL;
if ( ( child = malloc( sizeof( WINDOW ) ) ) == NULL )
return NULL;
if ( ( (unsigned)ncols > parent->width ) ||
( (unsigned)nlines > parent->height ) )
return NULL;
child->ori_y = parent->ori_y + begin_y;
child->ori_x = parent->ori_x + begin_x;
child->height = nlines;
child->width = ncols;
child->parent = parent;
child->scr = parent->scr;
return child;
}
/**
* Create a duplicate of the specified window
*
* @v orig original window
* @ret ptr pointer to duplicate window
*/
WINDOW *dupwin ( WINDOW *orig ) {
WINDOW *copy;
if ( orig == NULL )
return NULL;
if ( ( copy = malloc( sizeof( WINDOW ) ) ) == NULL )
return NULL;
copy->scr = orig->scr;
copy->attrs = orig->attrs;
copy->ori_y = orig->ori_y;
copy->ori_x = orig->ori_x;
copy->curs_y = orig->curs_y;
copy->curs_x = orig->curs_x;
copy->height = orig->height;
copy->width = orig->width;
return copy;
}
/**
* Move window origin to specified coordinates
*
* @v *win window to move
* @v y Y position
* @v x X position
* @ret rc return status code
*/
int mvwin ( WINDOW *win, int y, int x ) {
if ( win == NULL )
return ERR;
if ( ( ( (unsigned)y + win->height ) > LINES ) ||
( ( (unsigned)x + win->width ) > COLS ) )
return ERR;
win->ori_y = y;
win->ori_x = x;
return OK;
}
/**
* Create new WINDOW
*
* @v nlines number of lines
* @v ncols number of columns
* @v begin_y column origin
* @v begin_x line origin
* @ret *win return pointer to new window
*/
WINDOW *newwin ( int nlines, int ncols, int begin_y, int begin_x ) {
WINDOW *win;
if ( ( win = malloc( sizeof(WINDOW) ) ) == NULL )
return NULL;
if ( ( (unsigned)( begin_y + nlines ) > stdscr->height ) &&
( (unsigned)( begin_x + ncols ) > stdscr->width ) )
return NULL;
win->ori_y = begin_y;
win->ori_x = begin_x;
win->height = nlines;
win->width = ncols;
win->scr = stdscr->scr;
win->parent = stdscr;
return win;
}
/**
* Create a new sub-window
*
* @v orig parent window
* @v nlines window height
* @v ncols window width
* @v begin_y window y origin (absolute)
* @v begin_x window x origin (absolute)
* @ret ptr return pointer to child window
*/
WINDOW *subwin ( WINDOW *parent, int nlines, int ncols,
int begin_y, int begin_x ) {
WINDOW *child;
if ( parent == NULL )
return NULL;
if ( ( child = malloc( sizeof( WINDOW ) ) ) == NULL )
return NULL;
child = newwin( nlines, ncols, begin_y, begin_x );
child->parent = parent;
child->scr = parent->scr;
return child;
}
| gpl-2.0 |
georgecherian/linux | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 165 | 50776 | /**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "vmwgfx_reg.h"
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_placement.h>
#define VMW_RES_HT_ORDER 12
/**
* struct vmw_resource_relocation - Relocation info for resources
*
* @head: List head for the software context's relocation list.
* @res: Non-ref-counted pointer to the resource.
* @offset: Offset of 4 byte entries into the command buffer where the
* id that needs fixup is located.
*/
struct vmw_resource_relocation {
struct list_head head;
const struct vmw_resource *res;
unsigned long offset;
};
/**
* struct vmw_resource_val_node - Validation info for resources
*
* @head: List head for the software context's resource list.
* @hash: Hash entry for quick resouce to val_node lookup.
* @res: Ref-counted pointer to the resource.
* @switch_backup: Boolean whether to switch backup buffer on unreserve.
* @new_backup: Refcounted pointer to the new backup buffer.
* @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
* @first_usage: Set to true the first time the resource is referenced in
* the command stream.
* @no_buffer_needed: Resources do not need to allocate buffer backup on
* reservation. The command stream will provide one.
*/
struct vmw_resource_val_node {
struct list_head head;
struct drm_hash_item hash;
struct vmw_resource *res;
struct vmw_dma_buffer *new_backup;
unsigned long new_backup_offset;
bool first_usage;
bool no_buffer_needed;
};
/**
* vmw_resource_unreserve - unreserve resources previously reserved for
* command submission.
*
* @list_head: list of resources to unreserve.
* @backoff: Whether command submission failed.
*/
static void vmw_resource_list_unreserve(struct list_head *list,
bool backoff)
{
struct vmw_resource_val_node *val;
list_for_each_entry(val, list, head) {
struct vmw_resource *res = val->res;
struct vmw_dma_buffer *new_backup =
backoff ? NULL : val->new_backup;
vmw_resource_unreserve(res, new_backup,
val->new_backup_offset);
vmw_dmabuf_unreference(&val->new_backup);
}
}
/**
* vmw_resource_val_add - Add a resource to the software context's
* resource list if it's not already on it.
*
* @sw_context: Pointer to the software context.
* @res: Pointer to the resource.
* @p_node On successful return points to a valid pointer to a
* struct vmw_resource_val_node, if non-NULL on entry.
*/
static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
struct vmw_resource *res,
struct vmw_resource_val_node **p_node)
{
struct vmw_resource_val_node *node;
struct drm_hash_item *hash;
int ret;
if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
&hash) == 0)) {
node = container_of(hash, struct vmw_resource_val_node, hash);
node->first_usage = false;
if (unlikely(p_node != NULL))
*p_node = node;
return 0;
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (unlikely(node == NULL)) {
DRM_ERROR("Failed to allocate a resource validation "
"entry.\n");
return -ENOMEM;
}
node->hash.key = (unsigned long) res;
ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize a resource validation "
"entry.\n");
kfree(node);
return ret;
}
list_add_tail(&node->head, &sw_context->resource_list);
node->res = vmw_resource_reference(res);
node->first_usage = true;
if (unlikely(p_node != NULL))
*p_node = node;
return 0;
}
/**
* vmw_resource_relocation_add - Add a relocation to the relocation list
*
* @list: Pointer to head of relocation list.
* @res: The resource.
* @offset: Offset into the command buffer currently being parsed where the
* id that needs fixup is located. Granularity is 4 bytes.
*/
static int vmw_resource_relocation_add(struct list_head *list,
const struct vmw_resource *res,
unsigned long offset)
{
struct vmw_resource_relocation *rel;
rel = kmalloc(sizeof(*rel), GFP_KERNEL);
if (unlikely(rel == NULL)) {
DRM_ERROR("Failed to allocate a resource relocation.\n");
return -ENOMEM;
}
rel->res = res;
rel->offset = offset;
list_add_tail(&rel->head, list);
return 0;
}
/**
* vmw_resource_relocations_free - Free all relocations on a list
*
* @list: Pointer to the head of the relocation list.
*/
static void vmw_resource_relocations_free(struct list_head *list)
{
struct vmw_resource_relocation *rel, *n;
list_for_each_entry_safe(rel, n, list, head) {
list_del(&rel->head);
kfree(rel);
}
}
/**
* vmw_resource_relocations_apply - Apply all relocations on a list
*
* @cb: Pointer to the start of the command buffer bein patch. This need
* not be the same buffer as the one being parsed when the relocation
* list was built, but the contents must be the same modulo the
* resource ids.
* @list: Pointer to the head of the relocation list.
*/
static void vmw_resource_relocations_apply(uint32_t *cb,
struct list_head *list)
{
struct vmw_resource_relocation *rel;
list_for_each_entry(rel, list, head)
cb[rel->offset] = rel->res->id;
}
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
return capable(CAP_SYS_ADMIN) ? : -EINVAL;
}
static int vmw_cmd_ok(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
return 0;
}
/**
* vmw_bo_to_validate_list - add a bo to a validate list
*
* @sw_context: The software context used for this command submission batch.
* @bo: The buffer object to add.
* @p_val_node: If non-NULL Will be updated with the validate node number
* on return.
*
* Returns -EINVAL if the limit of number of buffer objects per command
* submission is reached.
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct ttm_buffer_object *bo,
uint32_t *p_val_node)
{
uint32_t val_node;
struct vmw_validate_buffer *vval_buf;
struct ttm_validate_buffer *val_buf;
struct drm_hash_item *hash;
int ret;
if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
&hash) == 0)) {
vval_buf = container_of(hash, struct vmw_validate_buffer,
hash);
val_buf = &vval_buf->base;
val_node = vval_buf - sw_context->val_bufs;
} else {
val_node = sw_context->cur_val_buf;
if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
DRM_ERROR("Max number of DMA buffers per submission "
"exceeded.\n");
return -EINVAL;
}
vval_buf = &sw_context->val_bufs[val_node];
vval_buf->hash.key = (unsigned long) bo;
ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize a buffer validation "
"entry.\n");
return ret;
}
++sw_context->cur_val_buf;
val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(bo);
val_buf->reserved = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
}
sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
if (p_val_node)
*p_val_node = val_node;
return 0;
}
/**
* vmw_resources_reserve - Reserve all resources on the sw_context's
* resource list.
*
* @sw_context: Pointer to the software context.
*
* Note that since vmware's command submission currently is protected by
* the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
* since only a single thread at once will attempt this.
*/
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
int ret;
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
ret = vmw_resource_reserve(res, val->no_buffer_needed);
if (unlikely(ret != 0))
return ret;
if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base;
ret = vmw_bo_to_validate_list
(sw_context, bo, NULL);
if (unlikely(ret != 0))
return ret;
}
}
return 0;
}
/**
* vmw_resources_validate - Validate all resources on the sw_context's
* resource list.
*
* @sw_context: Pointer to the software context.
*
* Before this function is called, all resource backup buffers must have
* been validated.
*/
static int vmw_resources_validate(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
int ret;
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
ret = vmw_resource_validate(res);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to validate resource.\n");
return ret;
}
}
return 0;
}
/**
* vmw_cmd_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
* @id: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
*/
static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
const struct vmw_user_resource_conv *converter,
uint32_t *id,
struct vmw_resource_val_node **p_val)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type];
struct vmw_resource *res;
struct vmw_resource_val_node *node;
int ret;
if (*id == SVGA3D_INVALID_ID)
return 0;
/*
* Fastpath in case of repeated commands referencing the same
* resource
*/
if (likely(rcache->valid && *id == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false;
if (p_val)
*p_val = rcache->node;
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
id - sw_context->buf_start);
}
ret = vmw_user_resource_lookup_handle(dev_priv,
sw_context->tfile,
*id,
converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
(unsigned) *id);
dump_stack();
return ret;
}
rcache->valid = true;
rcache->res = res;
rcache->handle = *id;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
id - sw_context->buf_start);
if (unlikely(ret != 0))
goto out_no_reloc;
ret = vmw_resource_val_add(sw_context, res, &node);
if (unlikely(ret != 0))
goto out_no_reloc;
rcache->node = node;
if (p_val)
*p_val = node;
vmw_resource_unreference(&res);
return 0;
out_no_reloc:
BUG_ON(sw_context->error_resource != NULL);
sw_context->error_resource = res;
return ret;
}
/**
* vmw_cmd_cid_check - Check a command header for valid context information.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @header: A command header with an embedded user-space context handle.
*
* Convenience function: Call vmw_cmd_res_check with the user-space context
* handle embedded in @header.
*/
static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_cid_cmd {
SVGA3dCmdHeader header;
__le32 cid;
} *cmd;
cmd = container_of(header, struct vmw_cid_cmd, header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->cid, NULL);
}
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSetRenderTarget body;
} *cmd;
int ret;
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.target.sid, NULL);
return ret;
}
static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceCopy body;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.dest.sid, NULL);
}
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceStretchBlt body;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.dest.sid, NULL);
}
static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdBlitSurfaceToScreen body;
} *cmd;
cmd = container_of(header, struct vmw_sid_cmd, header);
if (unlikely(!sw_context->kernel)) {
DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
return -EPERM;
}
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.srcImage.sid, NULL);
}
static int vmw_cmd_present_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdPresent body;
} *cmd;
cmd = container_of(header, struct vmw_sid_cmd, header);
if (unlikely(!sw_context->kernel)) {
DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
return -EPERM;
}
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->body.sid,
NULL);
}
/**
* vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
*
* @dev_priv: The device private structure.
* @new_query_bo: The new buffer holding query results.
* @sw_context: The software context used for this command submission.
*
* This function checks whether @new_query_bo is suitable for holding
* query results, and if another buffer currently is pinned for query
* results. If so, the function prepares the state of @sw_context for
* switching pinned buffers after successful submission of the current
* command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
struct ttm_buffer_object *new_query_bo,
struct vmw_sw_context *sw_context)
{
struct vmw_res_cache_entry *ctx_entry =
&sw_context->res_cache[vmw_res_context];
int ret;
BUG_ON(!ctx_entry->valid);
sw_context->last_query_ctx = ctx_entry->res;
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
if (unlikely(new_query_bo->num_pages > 4)) {
DRM_ERROR("Query buffer too large.\n");
return -EINVAL;
}
if (unlikely(sw_context->cur_query_bo != NULL)) {
sw_context->needs_post_query_barrier = true;
ret = vmw_bo_to_validate_list(sw_context,
sw_context->cur_query_bo,
NULL);
if (unlikely(ret != 0))
return ret;
}
sw_context->cur_query_bo = new_query_bo;
ret = vmw_bo_to_validate_list(sw_context,
dev_priv->dummy_query_bo,
NULL);
if (unlikely(ret != 0))
return ret;
}
return 0;
}
/**
* vmw_query_bo_switch_commit - Finalize switching pinned query buffer
*
* @dev_priv: The device private structure.
* @sw_context: The software context used for this command submission batch.
*
* This function will check if we're switching query buffers, and will then,
* issue a dummy occlusion query wait used as a query barrier. When the fence
* object following that query wait has signaled, we are sure that all
* preceding queries have finished, and the old query buffer can be unpinned.
* However, since both the new query buffer and the old one are fenced with
* that fence, we can do an asynchronus unpin now, and be sure that the
* old query buffer won't be moved until the fence has signaled.
*
* As mentioned above, both the new - and old query buffers need to be fenced
* using a sequence emitted *after* calling this function.
*/
static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
/*
* The validate list should still hold references to all
* contexts here.
*/
if (sw_context->needs_post_query_barrier) {
struct vmw_res_cache_entry *ctx_entry =
&sw_context->res_cache[vmw_res_context];
struct vmw_resource *ctx;
int ret;
BUG_ON(!ctx_entry->valid);
ctx = ctx_entry->res;
ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
if (unlikely(ret != 0))
DRM_ERROR("Out of fifo space for dummy query.\n");
}
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
if (dev_priv->pinned_bo) {
vmw_bo_pin(dev_priv->pinned_bo, false);
ttm_bo_unref(&dev_priv->pinned_bo);
}
if (!sw_context->needs_post_query_barrier) {
vmw_bo_pin(sw_context->cur_query_bo, true);
/*
* We pin also the dummy_query_bo buffer so that we
* don't need to validate it when emitting
* dummy queries in context destroy paths.
*/
vmw_bo_pin(dev_priv->dummy_query_bo, true);
dev_priv->dummy_query_bo_pinned = true;
BUG_ON(sw_context->last_query_ctx == NULL);
dev_priv->query_cid = sw_context->last_query_ctx->id;
dev_priv->query_cid_valid = true;
dev_priv->pinned_bo =
ttm_bo_reference(sw_context->cur_query_bo);
}
}
}
/**
* vmw_translate_guest_pointer - Prepare to translate a user-space buffer
* handle to a valid SVGAGuestPtr
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: The software context used for this command batch validation.
* @ptr: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry
* a reference-counted pointer to the DMA buffer identified by the
* user-space handle in @id.
*
* This function saves information needed to translate a user-space buffer
* handle to a valid SVGAGuestPtr. The translation does not take place
* immediately, but during a call to vmw_apply_relocations().
* This function builds a relocation list and a list of buffers to validate.
* The former needs to be freed using either vmw_apply_relocations() or
* vmw_free_relocations(). The latter needs to be freed using
* vmw_clear_validations.
*/
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
struct vmw_dma_buffer **vmw_bo_p)
{
struct vmw_dma_buffer *vmw_bo = NULL;
struct ttm_buffer_object *bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL;
}
bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission"
" exceeded\n");
ret = -EINVAL;
goto out_no_reloc;
}
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
*vmw_bo_p = vmw_bo;
return 0;
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
vmw_bo_p = NULL;
return ret;
}
/**
* vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_begin_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdBeginQuery q;
} *cmd;
cmd = container_of(header, struct vmw_begin_query_cmd,
header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->q.cid,
NULL);
}
/**
* vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdEndQuery q;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->q.guestResult,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
/*
* vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery q;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->q.guestResult,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
vmw_dmabuf_unreference(&vmw_bo);
return 0;
}
static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo = NULL;
struct vmw_surface *srf = NULL;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_dma_cmd, header);
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->dma.guest.ptr,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->dma.host.sid,
NULL);
if (unlikely(ret != 0)) {
if (unlikely(ret != -ERESTARTSYS))
DRM_ERROR("could not find surface for DMA.\n");
goto out_no_surface;
}
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
out_no_surface:
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
static int vmw_cmd_draw(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_draw_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdDrawPrimitives body;
} *cmd;
SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
(unsigned long)header + sizeof(*cmd));
SVGA3dPrimitiveRange *range;
uint32_t i;
uint32_t maxnum;
int ret;
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
cmd = container_of(header, struct vmw_draw_cmd, header);
maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
if (unlikely(cmd->body.numVertexDecls > maxnum)) {
DRM_ERROR("Illegal number of vertex declarations.\n");
return -EINVAL;
}
for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&decl->array.surfaceId, NULL);
if (unlikely(ret != 0))
return ret;
}
maxnum = (header->size - sizeof(cmd->body) -
cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
if (unlikely(cmd->body.numRanges > maxnum)) {
DRM_ERROR("Illegal number of index ranges.\n");
return -EINVAL;
}
range = (SVGA3dPrimitiveRange *) decl;
for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&range->indexArray.surfaceId, NULL);
if (unlikely(ret != 0))
return ret;
}
return 0;
}
static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_tex_state_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSetTextureState state;
};
SVGA3dTextureState *last_state = (SVGA3dTextureState *)
((unsigned long) header + header->size + sizeof(header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
int ret;
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
for (; cur_state < last_state; ++cur_state) {
if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
continue;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cur_state->value, NULL);
if (unlikely(ret != 0))
return ret;
}
return 0;
}
static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf)
{
struct vmw_dma_buffer *vmw_bo;
int ret;
struct {
uint32_t header;
SVGAFifoCmdDefineGMRFB body;
} *cmd = buf;
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.ptr,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
/**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_set_shader_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_set_shader_cmd,
header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
return 0;
}
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
{
uint32_t size_remaining = *size;
uint32_t cmd_id;
cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
switch (cmd_id) {
case SVGA_CMD_UPDATE:
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
break;
case SVGA_CMD_DEFINE_GMRFB:
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
break;
case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
break;
case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
break;
default:
DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
return -EINVAL;
}
if (*size > size_remaining) {
DRM_ERROR("Invalid SVGA command (size mismatch):"
" %u.\n", cmd_id);
return -EINVAL;
}
if (unlikely(!sw_context->kernel)) {
DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
return -EPERM;
}
if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
return 0;
}
typedef int (*vmw_cmd_func) (struct vmw_private *,
struct vmw_sw_context *,
SVGA3dCmdHeader *);
#define VMW_CMD_DEF(cmd, func) \
[cmd - SVGA_3D_CMD_BASE] = func
static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
&vmw_cmd_set_render_target_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
&vmw_cmd_blt_surf_screen_check),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
{
uint32_t cmd_id;
uint32_t size_remaining = *size;
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
int ret;
cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
/* Handle any none 3D commands */
if (unlikely(cmd_id < SVGA_CMD_MAX))
return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
cmd_id = le32_to_cpu(header->id);
*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
cmd_id -= SVGA_3D_CMD_BASE;
if (unlikely(*size > size_remaining))
goto out_err;
if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
goto out_err;
ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
if (unlikely(ret != 0))
goto out_err;
return 0;
out_err:
DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
}
static int vmw_cmd_check_all(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf,
uint32_t size)
{
int32_t cur_size = size;
int ret;
sw_context->buf_start = buf;
while (cur_size > 0) {
size = cur_size;
ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
if (unlikely(ret != 0))
return ret;
buf = (void *)((unsigned long) buf + size);
cur_size -= size;
}
if (unlikely(cur_size != 0)) {
DRM_ERROR("Command verifier out of sync.\n");
return -EINVAL;
}
return 0;
}
static void vmw_free_relocations(struct vmw_sw_context *sw_context)
{
sw_context->cur_reloc = 0;
}
static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
{
uint32_t i;
struct vmw_relocation *reloc;
struct ttm_validate_buffer *validate;
struct ttm_buffer_object *bo;
for (i = 0; i < sw_context->cur_reloc; ++i) {
reloc = &sw_context->relocs[i];
validate = &sw_context->val_bufs[reloc->index].base;
bo = validate->bo;
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
reloc->location->offset += bo->offset;
reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
break;
case VMW_PL_GMR:
reloc->location->gmrId = bo->mem.start;
break;
default:
BUG();
}
}
vmw_free_relocations(sw_context);
}
/**
* vmw_resource_list_unrefererence - Free up a resource list and unreference
* all resources referenced by it.
*
* @list: The resource list.
*/
static void vmw_resource_list_unreference(struct list_head *list)
{
struct vmw_resource_val_node *val, *val_next;
/*
* Drop references to resources held during command submission.
*/
list_for_each_entry_safe(val, val_next, list, head) {
list_del_init(&val->head);
vmw_resource_unreference(&val->res);
kfree(val);
}
}
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
struct vmw_validate_buffer *entry, *next;
struct vmw_resource_val_node *val;
/*
* Drop references to DMA buffers held during command submission.
*/
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
base.head) {
list_del(&entry->base.head);
ttm_bo_unref(&entry->base.bo);
(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
sw_context->cur_val_buf--;
}
BUG_ON(sw_context->cur_val_buf != 0);
list_for_each_entry(val, &sw_context->resource_list, head)
(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
}
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo)
{
int ret;
/*
* Don't validate pinned buffers.
*/
if (bo == dev_priv->pinned_bo ||
(bo == dev_priv->dummy_query_bo &&
dev_priv->dummy_query_bo_pinned))
return 0;
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* If there is no space in VRAM and GMR ids are all used up,
* start evicting GMRs to make room. If the DMA buffer can't be
* used as a GMR, this will return -ENOMEM.
*/
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
/**
* If that failed, try VRAM again, this time evicting
* previous contents.
*/
DRM_INFO("Falling through to VRAM.\n");
ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
return ret;
}
static int vmw_validate_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
struct vmw_validate_buffer *entry;
int ret;
list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
if (unlikely(ret != 0))
return ret;
}
return 0;
}
static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
uint32_t size)
{
if (likely(sw_context->cmd_bounce_size >= size))
return 0;
if (sw_context->cmd_bounce_size == 0)
sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
while (sw_context->cmd_bounce_size < size) {
sw_context->cmd_bounce_size =
PAGE_ALIGN(sw_context->cmd_bounce_size +
(sw_context->cmd_bounce_size >> 1));
}
if (sw_context->cmd_bounce != NULL)
vfree(sw_context->cmd_bounce);
sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
if (sw_context->cmd_bounce == NULL) {
DRM_ERROR("Failed to allocate command bounce buffer.\n");
sw_context->cmd_bounce_size = 0;
return -ENOMEM;
}
return 0;
}
/**
* vmw_execbuf_fence_commands - create and submit a command stream fence
*
* Creates a fence object and submits a command stream marker.
* If this fails for some reason, We sync the fifo and return NULL.
* It is then safe to fence buffers with a NULL pointer.
*
* If @p_handle is not NULL @file_priv must also not be NULL. Creates
* a userspace handle if @p_handle is not NULL, otherwise not.
*/
int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle)
{
uint32_t sequence;
int ret;
bool synced = false;
/* p_handle implies file_priv. */
BUG_ON(p_handle != NULL && file_priv == NULL);
ret = vmw_fifo_send_fence(dev_priv, &sequence);
if (unlikely(ret != 0)) {
DRM_ERROR("Fence submission error. Syncing.\n");
synced = true;
}
if (p_handle != NULL)
ret = vmw_user_fence_create(file_priv, dev_priv->fman,
sequence,
DRM_VMW_FENCE_FLAG_EXEC,
p_fence, p_handle);
else
ret = vmw_fence_create(dev_priv->fman, sequence,
DRM_VMW_FENCE_FLAG_EXEC,
p_fence);
if (unlikely(ret != 0 && !synced)) {
(void) vmw_fallback_wait(dev_priv, false, false,
sequence, false,
VMW_FENCE_WAIT_TIMEOUT);
*p_fence = NULL;
}
return 0;
}
/**
* vmw_execbuf_copy_fence_user - copy fence object information to
* user-space.
*
* @dev_priv: Pointer to a vmw_private struct.
* @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
* @ret: Return value from fence object creation.
* @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
* which the information should be copied.
* @fence: Pointer to the fenc object.
* @fence_handle: User-space fence handle.
*
* This function copies fence information to user-space. If copying fails,
* The user-space struct drm_vmw_fence_rep::error member is hopefully
* left untouched, and if it's preloaded with an -EFAULT by user-space,
* the error will hopefully be detected.
* Also if copying fails, user-space will be unable to signal the fence
* object so we wait for it immediately, and then unreference the
* user-space reference.
*/
void
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp,
int ret,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj *fence,
uint32_t fence_handle)
{
struct drm_vmw_fence_rep fence_rep;
if (user_fence_rep == NULL)
return;
memset(&fence_rep, 0, sizeof(fence_rep));
fence_rep.error = ret;
if (ret == 0) {
BUG_ON(fence == NULL);
fence_rep.handle = fence_handle;
fence_rep.seqno = fence->seqno;
vmw_update_seqno(dev_priv, &dev_priv->fifo);
fence_rep.passed_seqno = dev_priv->last_read_seqno;
}
/*
* copy_to_user errors will be detected by user space not
* seeing fence_rep::error filled in. Typically
* user-space would have pre-set that member to -EFAULT.
*/
ret = copy_to_user(user_fence_rep, &fence_rep,
sizeof(fence_rep));
/*
* User-space lost the fence object. We need to sync
* and unreference the handle.
*/
if (unlikely(ret != 0) && (fence_rep.error == 0)) {
ttm_ref_object_base_unref(vmw_fp->tfile,
fence_handle, TTM_REF_USAGE);
DRM_ERROR("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, fence->signal_mask,
false, false,
VMW_FENCE_WAIT_TIMEOUT);
}
}
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands,
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj **out_fence)
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
struct vmw_resource *error_resource;
struct list_head resource_list;
struct ww_acquire_ctx ticket;
uint32_t handle;
void *cmd;
int ret;
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
if (unlikely(ret != 0))
return -ERESTARTSYS;
if (kernel_commands == NULL) {
sw_context->kernel = false;
ret = vmw_resize_cmd_bounce(sw_context, command_size);
if (unlikely(ret != 0))
goto out_unlock;
ret = copy_from_user(sw_context->cmd_bounce,
user_commands, command_size);
if (unlikely(ret != 0)) {
ret = -EFAULT;
DRM_ERROR("Failed copying commands.\n");
goto out_unlock;
}
kernel_commands = sw_context->cmd_bounce;
} else
sw_context->kernel = true;
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
INIT_LIST_HEAD(&sw_context->resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
sw_context->last_query_ctx = NULL;
sw_context->needs_post_query_barrier = false;
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->validate_nodes);
INIT_LIST_HEAD(&sw_context->res_relocations);
if (!sw_context->res_ht_initialized) {
ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
if (unlikely(ret != 0))
goto out_unlock;
sw_context->res_ht_initialized = true;
}
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
goto out_err;
ret = vmw_resources_reserve(sw_context);
if (unlikely(ret != 0))
goto out_err;
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
if (unlikely(ret != 0))
goto out_err;
ret = vmw_validate_buffers(dev_priv, sw_context);
if (unlikely(ret != 0))
goto out_err;
ret = vmw_resources_validate(sw_context);
if (unlikely(ret != 0))
goto out_err;
if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us);
if (unlikely(ret != 0))
goto out_err;
}
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
goto out_err;
}
vmw_apply_relocations(sw_context);
memcpy(cmd, kernel_commands, command_size);
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_fifo_commit(dev_priv, command_size);
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
&fence,
(user_fence_rep) ? &handle : NULL);
/*
* This error is harmless, because if fence submission fails,
* vmw_fifo_send_fence will sync. The error will be propagated to
* user-space in @fence_rep
*/
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(&sw_context->resource_list, false);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, fence);
vmw_clear_validations(sw_context);
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle);
/* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) {
*out_fence = fence;
fence = NULL;
} else if (likely(fence != NULL)) {
vmw_fence_obj_unreference(&fence);
}
list_splice_init(&sw_context->resource_list, &resource_list);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
vmw_resource_list_unreference(&resource_list);
return 0;
out_err:
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
vmw_resource_list_unreserve(&sw_context->resource_list, true);
vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock:
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
vmw_resource_list_unreference(&resource_list);
if (unlikely(error_resource != NULL))
vmw_resource_unreference(&error_resource);
return ret;
}
/**
* vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
*
* @dev_priv: The device private structure.
*
* This function is called to idle the fifo and unpin the query buffer
* if the normal way to do this hits an error, which should typically be
* extremely rare.
*/
static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
{
DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
vmw_bo_pin(dev_priv->pinned_bo, false);
vmw_bo_pin(dev_priv->dummy_query_bo, false);
dev_priv->dummy_query_bo_pinned = false;
}
/**
* __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
* query bo.
*
* @dev_priv: The device private structure.
* @fence: If non-NULL should point to a struct vmw_fence_obj issued
* _after_ a query barrier that flushes all queries touching the current
* buffer pointed to by @dev_priv->pinned_bo
*
* This function should be used to unpin the pinned query bo, or
* as a query barrier when we need to make sure that all queries have
* finished before the next fifo command. (For example on hardware
* context destructions where the hardware may otherwise leak unfinished
* queries).
*
* This function does not return any failure codes, but make attempts
* to do safe unpinning in case of errors.
*
* The function will synchronize on the previous query barrier, and will
* thus not finish until that barrier has executed.
*
* the @dev_priv->cmdbuf_mutex needs to be held by the current thread
* before calling this function.
*/
void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence)
{
int ret = 0;
struct list_head validate_list;
struct ttm_validate_buffer pinned_val, query_val;
struct vmw_fence_obj *lfence = NULL;
struct ww_acquire_ctx ticket;
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
INIT_LIST_HEAD(&validate_list);
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
list_add_tail(&pinned_val.head, &validate_list);
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
list_add_tail(&query_val.head, &validate_list);
do {
ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
} while (ret == -ERESTARTSYS);
if (unlikely(ret != 0)) {
vmw_execbuf_unpin_panic(dev_priv);
goto out_no_reserve;
}
if (dev_priv->query_cid_valid) {
BUG_ON(fence != NULL);
ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
if (unlikely(ret != 0)) {
vmw_execbuf_unpin_panic(dev_priv);
goto out_no_emit;
}
dev_priv->query_cid_valid = false;
}
vmw_bo_pin(dev_priv->pinned_bo, false);
vmw_bo_pin(dev_priv->dummy_query_bo, false);
dev_priv->dummy_query_bo_pinned = false;
if (fence == NULL) {
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
NULL);
fence = lfence;
}
ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
if (lfence != NULL)
vmw_fence_obj_unreference(&lfence);
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
out_unlock:
return;
out_no_emit:
ttm_eu_backoff_reservation(&ticket, &validate_list);
out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
}
/**
* vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
* query bo.
*
* @dev_priv: The device private structure.
*
* This function should be used to unpin the pinned query bo, or
* as a query barrier when we need to make sure that all queries have
* finished before the next fifo command. (For example on hardware
* context destructions where the hardware may otherwise leak unfinished
* queries).
*
* This function does not return any failure codes, but make attempts
* to do safe unpinning in case of errors.
*
* The function will synchronize on the previous query barrier, and will
* thus not finish until that barrier has executed.
*/
void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
{
mutex_lock(&dev_priv->cmdbuf_mutex);
if (dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
/*
* This will allow us to extend the ioctl argument while
* maintaining backwards compatibility:
* We take different code paths depending on the value of
* arg->version.
*/
if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
DRM_ERROR("Incorrect execbuf version.\n");
DRM_ERROR("You're running outdated experimental "
"vmwgfx user-space drivers.");
return -EINVAL;
}
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
ret = vmw_execbuf_process(file_priv, dev_priv,
(void __user *)(unsigned long)arg->commands,
NULL, arg->command_size, arg->throttle_us,
(void __user *)(unsigned long)arg->fence_rep,
NULL);
if (unlikely(ret != 0))
goto out_unlock;
vmw_kms_cursor_post_execbuf(dev_priv);
out_unlock:
ttm_read_unlock(&vmaster->lock);
return ret;
}
| gpl-2.0 |
thornbirdblue/8064_kernel | drivers/virtio/virtio_pci.c | 165 | 12573 | /*
* Virtio PCI driver
*
* This module allows virtio devices to be used over a virtual PCI device.
* This can be used with QEMU based VMMs like KVM or Xen.
*
* Copyright IBM Corp. 2007
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include <linux/module.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_pci.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
MODULE_DESCRIPTION("virtio-pci");
MODULE_LICENSE("GPL");
MODULE_VERSION("1");
/* Our device structure */
struct virtio_pci_device
{
struct virtio_device vdev;
struct pci_dev *pci_dev;
/* the IO mapping for the PCI config space */
void __iomem *ioaddr;
/* a list of queues so we can dispatch IRQs */
spinlock_t lock;
struct list_head virtqueues;
};
struct virtio_pci_vq_info
{
/* the actual virtqueue */
struct virtqueue *vq;
/* the number of entries in the queue */
int num;
/* the index of the queue */
int queue_index;
/* the virtual address of the ring queue */
void *queue;
/* the list node for the virtqueues list */
struct list_head node;
};
/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
static struct pci_device_id virtio_pci_id_table[] = {
{ 0x1af4, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
/* A PCI device has it's own struct device and so does a virtio device so
* we create a place for the virtio devices to show up in sysfs. I think it
* would make more sense for virtio to not insist on having it's own device. */
static struct device *virtio_pci_root;
/* Convert a generic virtio device to our structure */
static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
{
return container_of(vdev, struct virtio_pci_device, vdev);
}
/* virtio config->get_features() implementation */
static u32 vp_get_features(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* When someone needs more than 32 feature bits, we'll need to
* steal a bit to indicate that the rest are somewhere else. */
return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
}
/* virtio config->finalize_features() implementation */
static void vp_finalize_features(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
/* We only support 32 feature bits. */
BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1);
iowrite32(vdev->features[0], vp_dev->ioaddr+VIRTIO_PCI_GUEST_FEATURES);
}
/* virtio config->get() implementation */
static void vp_get(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset;
u8 *ptr = buf;
int i;
for (i = 0; i < len; i++)
ptr[i] = ioread8(ioaddr + i);
}
/* the config->set() implementation. it's symmetric to the config->get()
* implementation */
static void vp_set(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset;
const u8 *ptr = buf;
int i;
for (i = 0; i < len; i++)
iowrite8(ptr[i], ioaddr + i);
}
/* config->{get,set}_status() implementations */
static u8 vp_get_status(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
}
static void vp_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* We should never be setting status to 0. */
BUG_ON(status == 0);
iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
}
static void vp_reset(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */
iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
}
/* the notify function used when creating a virt queue */
static void vp_notify(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_vq_info *info = vq->priv;
/* we write the queue's selector into the notification register to
* signal the other end */
iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
}
/* A small wrapper to also acknowledge the interrupt when it's handled.
* I really need an EIO hook for the vring so I can ack the interrupt once we
* know that we'll be handling the IRQ but before we invoke the callback since
* the callback may notify the host which results in the host attempting to
* raise an interrupt that we would then mask once we acknowledged the
* interrupt. */
static irqreturn_t vp_interrupt(int irq, void *opaque)
{
struct virtio_pci_device *vp_dev = opaque;
struct virtio_pci_vq_info *info;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
u8 isr;
/* reading the ISR has the effect of also clearing it so it's very
* important to save off the value. */
isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
/* It's definitely not us if the ISR was not high */
if (!isr)
return IRQ_NONE;
/* Configuration change? Tell driver if it wants to know. */
if (isr & VIRTIO_PCI_ISR_CONFIG) {
struct virtio_driver *drv;
drv = container_of(vp_dev->vdev.dev.driver,
struct virtio_driver, driver);
if (drv && drv->config_changed)
drv->config_changed(&vp_dev->vdev);
}
spin_lock_irqsave(&vp_dev->lock, flags);
list_for_each_entry(info, &vp_dev->virtqueues, node) {
if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
ret = IRQ_HANDLED;
}
spin_unlock_irqrestore(&vp_dev->lock, flags);
return ret;
}
/* the config->find_vq() implementation */
static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
void (*callback)(struct virtqueue *vq))
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_vq_info *info;
struct virtqueue *vq;
unsigned long flags, size;
u16 num;
int err;
/* Select the queue we're interested in */
iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
/* Check if queue is either not available or already active. */
num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
return ERR_PTR(-ENOENT);
/* allocate and fill out our structure the represents an active
* queue */
info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
info->queue_index = index;
info->num = num;
size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
if (info->queue == NULL) {
err = -ENOMEM;
goto out_info;
}
/* activate the queue */
iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
/* create the vring */
vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN,
vdev, info->queue, vp_notify, callback);
if (!vq) {
err = -ENOMEM;
goto out_activate_queue;
}
vq->priv = info;
info->vq = vq;
spin_lock_irqsave(&vp_dev->lock, flags);
list_add(&info->node, &vp_dev->virtqueues);
spin_unlock_irqrestore(&vp_dev->lock, flags);
return vq;
out_activate_queue:
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
free_pages_exact(info->queue, size);
out_info:
kfree(info);
return ERR_PTR(err);
}
/* the config->del_vq() implementation */
static void vp_del_vq(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_vq_info *info = vq->priv;
unsigned long flags, size;
spin_lock_irqsave(&vp_dev->lock, flags);
list_del(&info->node);
spin_unlock_irqrestore(&vp_dev->lock, flags);
vring_del_virtqueue(vq);
/* Select and deactivate the queue */
iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
free_pages_exact(info->queue, size);
kfree(info);
}
static struct virtio_config_ops virtio_pci_config_ops = {
.get = vp_get,
.set = vp_set,
.get_status = vp_get_status,
.set_status = vp_set_status,
.reset = vp_reset,
.find_vq = vp_find_vq,
.del_vq = vp_del_vq,
.get_features = vp_get_features,
.finalize_features = vp_finalize_features,
};
static void virtio_pci_release_dev(struct device *_d)
{
struct virtio_device *dev = container_of(_d, struct virtio_device, dev);
struct virtio_pci_device *vp_dev = to_vp_device(dev);
struct pci_dev *pci_dev = vp_dev->pci_dev;
free_irq(pci_dev->irq, vp_dev);
pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
kfree(vp_dev);
}
/* the PCI probing function */
static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
struct virtio_pci_device *vp_dev;
int err;
/* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
return -ENODEV;
if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
return -ENODEV;
}
/* allocate our structure and fill it out */
vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
if (vp_dev == NULL)
return -ENOMEM;
vp_dev->vdev.dev.parent = virtio_pci_root;
vp_dev->vdev.dev.release = virtio_pci_release_dev;
vp_dev->vdev.config = &virtio_pci_config_ops;
vp_dev->pci_dev = pci_dev;
INIT_LIST_HEAD(&vp_dev->virtqueues);
spin_lock_init(&vp_dev->lock);
/* enable the device */
err = pci_enable_device(pci_dev);
if (err)
goto out;
err = pci_request_regions(pci_dev, "virtio-pci");
if (err)
goto out_enable_device;
vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
if (vp_dev->ioaddr == NULL)
goto out_req_regions;
pci_set_drvdata(pci_dev, vp_dev);
/* we use the subsystem vendor/device id as the virtio vendor/device
* id. this allows us to use the same PCI vendor/device id for all
* virtio devices and to identify the particular virtio driver by
* the subsytem ids */
vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
vp_dev->vdev.id.device = pci_dev->subsystem_device;
/* register a handler for the queue with the PCI device's interrupt */
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
dev_name(&vp_dev->vdev.dev), vp_dev);
if (err)
goto out_set_drvdata;
/* finally register the virtio device */
err = register_virtio_device(&vp_dev->vdev);
if (err)
goto out_req_irq;
return 0;
out_req_irq:
free_irq(pci_dev->irq, vp_dev);
out_set_drvdata:
pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr);
out_req_regions:
pci_release_regions(pci_dev);
out_enable_device:
pci_disable_device(pci_dev);
out:
kfree(vp_dev);
return err;
}
static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
{
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
unregister_virtio_device(&vp_dev->vdev);
}
#ifdef CONFIG_PM
static int virtio_pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
{
pci_save_state(pci_dev);
pci_set_power_state(pci_dev, PCI_D3hot);
return 0;
}
static int virtio_pci_resume(struct pci_dev *pci_dev)
{
pci_restore_state(pci_dev);
pci_set_power_state(pci_dev, PCI_D0);
return 0;
}
#endif
static struct pci_driver virtio_pci_driver = {
.name = "virtio-pci",
.id_table = virtio_pci_id_table,
.probe = virtio_pci_probe,
.remove = virtio_pci_remove,
#ifdef CONFIG_PM
.suspend = virtio_pci_suspend,
.resume = virtio_pci_resume,
#endif
};
static int __init virtio_pci_init(void)
{
int err;
virtio_pci_root = root_device_register("virtio-pci");
if (IS_ERR(virtio_pci_root))
return PTR_ERR(virtio_pci_root);
err = pci_register_driver(&virtio_pci_driver);
if (err)
device_unregister(virtio_pci_root);
return err;
}
module_init(virtio_pci_init);
static void __exit virtio_pci_exit(void)
{
pci_unregister_driver(&virtio_pci_driver);
root_device_unregister(virtio_pci_root);
}
module_exit(virtio_pci_exit);
| gpl-2.0 |
stari4ek/hero_kernel | kernel/latencytop.c | 165 | 5333 | /*
* latencytop.c: Latency display infrastructure
*
* (C) Copyright 2008 Intel Corporation
* Author: Arjan van de Ven <arjan@linux.intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*/
#include <linux/latencytop.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/proc_fs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
static DEFINE_SPINLOCK(latency_lock);
#define MAXLR 128
static struct latency_record latency_record[MAXLR];
int latencytop_enabled;
void clear_all_latency_tracing(struct task_struct *p)
{
unsigned long flags;
if (!latencytop_enabled)
return;
spin_lock_irqsave(&latency_lock, flags);
memset(&p->latency_record, 0, sizeof(p->latency_record));
p->latency_record_count = 0;
spin_unlock_irqrestore(&latency_lock, flags);
}
static void clear_global_latency_tracing(void)
{
unsigned long flags;
spin_lock_irqsave(&latency_lock, flags);
memset(&latency_record, 0, sizeof(latency_record));
spin_unlock_irqrestore(&latency_lock, flags);
}
static void __sched
account_global_scheduler_latency(struct task_struct *tsk, struct latency_record *lat)
{
int firstnonnull = MAXLR + 1;
int i;
if (!latencytop_enabled)
return;
/* skip kernel threads for now */
if (!tsk->mm)
return;
for (i = 0; i < MAXLR; i++) {
int q, same = 1;
/* Nothing stored: */
if (!latency_record[i].backtrace[0]) {
if (firstnonnull > i)
firstnonnull = i;
continue;
}
for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
unsigned long record = lat->backtrace[q];
if (latency_record[i].backtrace[q] != record) {
same = 0;
break;
}
/* 0 and ULONG_MAX entries mean end of backtrace: */
if (record == 0 || record == ULONG_MAX)
break;
}
if (same) {
latency_record[i].count++;
latency_record[i].time += lat->time;
if (lat->time > latency_record[i].max)
latency_record[i].max = lat->time;
return;
}
}
i = firstnonnull;
if (i >= MAXLR - 1)
return;
/* Allocted a new one: */
memcpy(&latency_record[i], lat, sizeof(struct latency_record));
}
static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat)
{
struct stack_trace trace;
memset(&trace, 0, sizeof(trace));
trace.max_entries = LT_BACKTRACEDEPTH;
trace.entries = &lat->backtrace[0];
trace.skip = 0;
save_stack_trace_tsk(tsk, &trace);
}
void __sched
account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
{
unsigned long flags;
int i, q;
struct latency_record lat;
if (!latencytop_enabled)
return;
/* Long interruptible waits are generally user requested... */
if (inter && usecs > 5000)
return;
memset(&lat, 0, sizeof(lat));
lat.count = 1;
lat.time = usecs;
lat.max = usecs;
store_stacktrace(tsk, &lat);
spin_lock_irqsave(&latency_lock, flags);
account_global_scheduler_latency(tsk, &lat);
/*
* short term hack; if we're > 32 we stop; future we recycle:
*/
tsk->latency_record_count++;
if (tsk->latency_record_count >= LT_SAVECOUNT)
goto out_unlock;
for (i = 0; i < LT_SAVECOUNT ; i++) {
struct latency_record *mylat;
int same = 1;
mylat = &tsk->latency_record[i];
for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
unsigned long record = lat.backtrace[q];
if (mylat->backtrace[q] != record) {
same = 0;
break;
}
/* 0 and ULONG_MAX entries mean end of backtrace: */
if (record == 0 || record == ULONG_MAX)
break;
}
if (same) {
mylat->count++;
mylat->time += lat.time;
if (lat.time > mylat->max)
mylat->max = lat.time;
goto out_unlock;
}
}
/* Allocated a new one: */
i = tsk->latency_record_count;
memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
out_unlock:
spin_unlock_irqrestore(&latency_lock, flags);
}
static int lstats_show(struct seq_file *m, void *v)
{
int i;
seq_puts(m, "Latency Top version : v0.1\n");
for (i = 0; i < MAXLR; i++) {
if (latency_record[i].backtrace[0]) {
int q;
seq_printf(m, "%i %li %li ",
latency_record[i].count,
latency_record[i].time,
latency_record[i].max);
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
char sym[KSYM_SYMBOL_LEN];
char *c;
if (!latency_record[i].backtrace[q])
break;
if (latency_record[i].backtrace[q] == ULONG_MAX)
break;
sprint_symbol(sym, latency_record[i].backtrace[q]);
c = strchr(sym, '+');
if (c)
*c = 0;
seq_printf(m, "%s ", sym);
}
seq_printf(m, "\n");
}
}
return 0;
}
static ssize_t
lstats_write(struct file *file, const char __user *buf, size_t count,
loff_t *offs)
{
clear_global_latency_tracing();
return count;
}
static int lstats_open(struct inode *inode, struct file *filp)
{
return single_open(filp, lstats_show, NULL);
}
static struct file_operations lstats_fops = {
.open = lstats_open,
.read = seq_read,
.write = lstats_write,
.llseek = seq_lseek,
.release = single_release,
};
static int __init init_lstats_procfs(void)
{
proc_create("latency_stats", 0644, NULL, &lstats_fops);
return 0;
}
__initcall(init_lstats_procfs);
| gpl-2.0 |
temasek/Kernel-Nexus7 | drivers/usb/gadget/cdc2.c | 421 | 6918 | /*
* cdc2.c -- CDC Composite driver, with ECM and ACM support
*
* Copyright (C) 2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/utsname.h>
#include "u_ether.h"
#include "u_serial.h"
#define DRIVER_DESC "CDC Composite Gadget"
#define DRIVER_VERSION "King Kamehameha Day 2008"
/*-------------------------------------------------------------------------*/
/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
* Instead: allocate your own, using normal USB-IF procedures.
*/
/* Thanks to NetChip Technologies for donating this product ID.
* It's for devices with only this composite CDC configuration.
*/
#define CDC_VENDOR_NUM 0x0525 /* NetChip */
#define CDC_PRODUCT_NUM 0xa4aa /* CDC Composite: ECM + ACM */
/*-------------------------------------------------------------------------*/
/*
* Kbuild is not very cooperative with respect to linking separately
* compiled library objects into one module. So for now we won't use
* separate compilation ... ensuring init/exit sections work to shrink
* the runtime footprint, and giving us at least some parts of what
* a "gcc --combine ... part1.c part2.c part3.c ... " build would.
*/
#include "composite.c"
#include "usbstring.c"
#include "config.c"
#include "epautoconf.c"
#include "u_serial.c"
#include "f_acm.c"
#include "f_ecm.c"
#include "u_ether.c"
/*-------------------------------------------------------------------------*/
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
.bcdUSB = cpu_to_le16(0x0200),
.bDeviceClass = USB_CLASS_COMM,
.bDeviceSubClass = 0,
.bDeviceProtocol = 0,
/* .bMaxPacketSize0 = f(hardware) */
/* Vendor and product id can be overridden by module parameters. */
.idVendor = cpu_to_le16(CDC_VENDOR_NUM),
.idProduct = cpu_to_le16(CDC_PRODUCT_NUM),
/* .bcdDevice = f(hardware) */
/* .iManufacturer = DYNAMIC */
/* .iProduct = DYNAMIC */
/* NO SERIAL NUMBER */
.bNumConfigurations = 1,
};
static struct usb_otg_descriptor otg_descriptor = {
.bLength = sizeof otg_descriptor,
.bDescriptorType = USB_DT_OTG,
/* REVISIT SRP-only hardware is possible, although
* it would not be called "OTG" ...
*/
.bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
};
static const struct usb_descriptor_header *otg_desc[] = {
(struct usb_descriptor_header *) &otg_descriptor,
NULL,
};
/* string IDs are assigned dynamically */
#define STRING_MANUFACTURER_IDX 0
#define STRING_PRODUCT_IDX 1
static char manufacturer[50];
static struct usb_string strings_dev[] = {
[STRING_MANUFACTURER_IDX].s = manufacturer,
[STRING_PRODUCT_IDX].s = DRIVER_DESC,
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
.strings = strings_dev,
};
static struct usb_gadget_strings *dev_strings[] = {
&stringtab_dev,
NULL,
};
static u8 hostaddr[ETH_ALEN];
/*-------------------------------------------------------------------------*/
/*
* We _always_ have both CDC ECM and CDC ACM functions.
*/
static int __init cdc_do_config(struct usb_configuration *c)
{
int status;
if (gadget_is_otg(c->cdev->gadget)) {
c->descriptors = otg_desc;
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
status = ecm_bind_config(c, hostaddr);
if (status < 0)
return status;
status = acm_bind_config(c, 0);
if (status < 0)
return status;
return 0;
}
static struct usb_configuration cdc_config_driver = {
.label = "CDC Composite (ECM + ACM)",
.bConfigurationValue = 1,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
/*-------------------------------------------------------------------------*/
static int __init cdc_bind(struct usb_composite_dev *cdev)
{
int gcnum;
struct usb_gadget *gadget = cdev->gadget;
int status;
if (!can_support_ecm(cdev->gadget)) {
dev_err(&gadget->dev, "controller '%s' not usable\n",
gadget->name);
return -EINVAL;
}
/* set up network link layer */
status = gether_setup(cdev->gadget, hostaddr);
if (status < 0)
return status;
/* set up serial link layer */
status = gserial_setup(cdev->gadget, 1);
if (status < 0)
goto fail0;
gcnum = usb_gadget_controller_number(gadget);
if (gcnum >= 0)
device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
else {
/* We assume that can_support_ecm() tells the truth;
* but if the controller isn't recognized at all then
* that assumption is a bit more likely to be wrong.
*/
WARNING(cdev, "controller '%s' not recognized; trying %s\n",
gadget->name,
cdc_config_driver.label);
device_desc.bcdDevice =
cpu_to_le16(0x0300 | 0x0099);
}
/* Allocate string descriptor numbers ... note that string
* contents can be overridden by the composite_dev glue.
*/
/* device descriptor strings: manufacturer, product */
snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
init_utsname()->sysname, init_utsname()->release,
gadget->name);
status = usb_string_id(cdev);
if (status < 0)
goto fail1;
strings_dev[STRING_MANUFACTURER_IDX].id = status;
device_desc.iManufacturer = status;
status = usb_string_id(cdev);
if (status < 0)
goto fail1;
strings_dev[STRING_PRODUCT_IDX].id = status;
device_desc.iProduct = status;
/* register our configuration */
status = usb_add_config(cdev, &cdc_config_driver, cdc_do_config);
if (status < 0)
goto fail1;
dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
DRIVER_DESC);
return 0;
fail1:
gserial_cleanup();
fail0:
gether_cleanup();
return status;
}
static int __exit cdc_unbind(struct usb_composite_dev *cdev)
{
gserial_cleanup();
gether_cleanup();
return 0;
}
static struct usb_composite_driver cdc_driver = {
.name = "g_cdc",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_HIGH,
.unbind = __exit_p(cdc_unbind),
};
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
static int __init init(void)
{
return usb_composite_probe(&cdc_driver, cdc_bind);
}
module_init(init);
static void __exit cleanup(void)
{
usb_composite_unregister(&cdc_driver);
}
module_exit(cleanup);
| gpl-2.0 |
EPDCenter/android_kernel_odys_genio | drivers/mtd/nand/nandsim.c | 933 | 69293 | /*
* NAND flash simulator.
*
* Author: Artem B. Bityuckiy <dedekind@oktetlabs.ru>, <dedekind@infradead.org>
*
* Copyright (C) 2004 Nokia Corporation
*
* Note: NS means "NAND Simulator".
* Note: Input means input TO flash chip, output means output FROM chip.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/math64.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_bch.h>
#include <linux/mtd/partitions.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
/* Default simulator parameters values */
#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
!defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
!defined(CONFIG_NANDSIM_THIRD_ID_BYTE) || \
!defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
#define CONFIG_NANDSIM_FIRST_ID_BYTE 0x98
#define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
#define CONFIG_NANDSIM_THIRD_ID_BYTE 0xFF /* No byte */
#define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF /* No byte */
#endif
#ifndef CONFIG_NANDSIM_ACCESS_DELAY
#define CONFIG_NANDSIM_ACCESS_DELAY 25
#endif
#ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
#define CONFIG_NANDSIM_PROGRAMM_DELAY 200
#endif
#ifndef CONFIG_NANDSIM_ERASE_DELAY
#define CONFIG_NANDSIM_ERASE_DELAY 2
#endif
#ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
#define CONFIG_NANDSIM_OUTPUT_CYCLE 40
#endif
#ifndef CONFIG_NANDSIM_INPUT_CYCLE
#define CONFIG_NANDSIM_INPUT_CYCLE 50
#endif
#ifndef CONFIG_NANDSIM_BUS_WIDTH
#define CONFIG_NANDSIM_BUS_WIDTH 8
#endif
#ifndef CONFIG_NANDSIM_DO_DELAYS
#define CONFIG_NANDSIM_DO_DELAYS 0
#endif
#ifndef CONFIG_NANDSIM_LOG
#define CONFIG_NANDSIM_LOG 0
#endif
#ifndef CONFIG_NANDSIM_DBG
#define CONFIG_NANDSIM_DBG 0
#endif
#ifndef CONFIG_NANDSIM_MAX_PARTS
#define CONFIG_NANDSIM_MAX_PARTS 32
#endif
static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
static uint third_id_byte = CONFIG_NANDSIM_THIRD_ID_BYTE;
static uint fourth_id_byte = CONFIG_NANDSIM_FOURTH_ID_BYTE;
static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY;
static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY;
static uint output_cycle = CONFIG_NANDSIM_OUTPUT_CYCLE;
static uint input_cycle = CONFIG_NANDSIM_INPUT_CYCLE;
static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
static uint log = CONFIG_NANDSIM_LOG;
static uint dbg = CONFIG_NANDSIM_DBG;
static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
static unsigned int parts_num;
static char *badblocks = NULL;
static char *weakblocks = NULL;
static char *weakpages = NULL;
static unsigned int bitflips = 0;
static char *gravepages = NULL;
static unsigned int rptwear = 0;
static unsigned int overridesize = 0;
static char *cache_file = NULL;
static unsigned int bbt;
static unsigned int bch;
module_param(first_id_byte, uint, 0400);
module_param(second_id_byte, uint, 0400);
module_param(third_id_byte, uint, 0400);
module_param(fourth_id_byte, uint, 0400);
module_param(access_delay, uint, 0400);
module_param(programm_delay, uint, 0400);
module_param(erase_delay, uint, 0400);
module_param(output_cycle, uint, 0400);
module_param(input_cycle, uint, 0400);
module_param(bus_width, uint, 0400);
module_param(do_delays, uint, 0400);
module_param(log, uint, 0400);
module_param(dbg, uint, 0400);
module_param_array(parts, ulong, &parts_num, 0400);
module_param(badblocks, charp, 0400);
module_param(weakblocks, charp, 0400);
module_param(weakpages, charp, 0400);
module_param(bitflips, uint, 0400);
module_param(gravepages, charp, 0400);
module_param(rptwear, uint, 0400);
module_param(overridesize, uint, 0400);
module_param(cache_file, charp, 0400);
module_param(bbt, uint, 0400);
module_param(bch, uint, 0400);
MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command");
MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
MODULE_PARM_DESC(log, "Perform logging if not zero");
MODULE_PARM_DESC(dbg, "Output debug information if not zero");
MODULE_PARM_DESC(parts, "Partition sizes (in erase blocks) separated by commas");
/* Page and erase block positions for the following parameters are independent of any partitions */
MODULE_PARM_DESC(badblocks, "Erase blocks that are initially marked bad, separated by commas");
MODULE_PARM_DESC(weakblocks, "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
" separated by commas e.g. 113:2 means eb 113"
" can be erased only twice before failing");
MODULE_PARM_DESC(weakpages, "Weak pages [: maximum writes (defaults to 3)]"
" separated by commas e.g. 1401:2 means page 1401"
" can be written only twice before failing");
MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (zero by default)");
MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
" separated by commas e.g. 1401:2 means page 1401"
" can be read only twice before failing");
MODULE_PARM_DESC(rptwear, "Number of erases between reporting wear, if not zero");
MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
"The size is specified in erase blocks and as the exponent of a power of two"
" e.g. 5 means a size of 32 erase blocks");
MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
"be correctable in 512-byte blocks");
/* The largest possible page size */
#define NS_LARGEST_PAGE_SIZE 4096
/* The prefix for simulator output */
#define NS_OUTPUT_PREFIX "[nandsim]"
/* Simulator's output macros (logging, debugging, warning, error) */
#define NS_LOG(args...) \
do { if (log) printk(KERN_DEBUG NS_OUTPUT_PREFIX " log: " args); } while(0)
#define NS_DBG(args...) \
do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0)
#define NS_WARN(args...) \
do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warning: " args); } while(0)
#define NS_ERR(args...) \
do { printk(KERN_ERR NS_OUTPUT_PREFIX " error: " args); } while(0)
#define NS_INFO(args...) \
do { printk(KERN_INFO NS_OUTPUT_PREFIX " " args); } while(0)
/* Busy-wait delay macros (microseconds, milliseconds) */
#define NS_UDELAY(us) \
do { if (do_delays) udelay(us); } while(0)
#define NS_MDELAY(us) \
do { if (do_delays) mdelay(us); } while(0)
/* Is the nandsim structure initialized ? */
#define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
/* Good operation completion status */
#define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
/* Operation failed completion status */
#define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
/* Calculate the page offset in flash RAM image by (row, column) address */
#define NS_RAW_OFFSET(ns) \
(((ns)->regs.row << (ns)->geom.pgshift) + ((ns)->regs.row * (ns)->geom.oobsz) + (ns)->regs.column)
/* Calculate the OOB offset in flash RAM image by (row, column) address */
#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
/* After a command is input, the simulator goes to one of the following states */
#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
#define STATE_CMD_PAGEPROG 0x00000004 /* start page program */
#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
#define STATE_CMD_STATUS 0x00000007 /* read status */
#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */
#define STATE_CMD_SEQIN 0x00000009 /* sequential data input */
#define STATE_CMD_READID 0x0000000A /* read ID */
#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
#define STATE_CMD_RESET 0x0000000C /* reset */
#define STATE_CMD_RNDOUT 0x0000000D /* random output command */
#define STATE_CMD_RNDOUTSTART 0x0000000E /* random output start command */
#define STATE_CMD_MASK 0x0000000F /* command states mask */
/* After an address is input, the simulator goes to one of these states */
#define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */
#define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */
#define STATE_ADDR_COLUMN 0x00000030 /* column address was accepted */
#define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */
#define STATE_ADDR_MASK 0x00000070 /* address states mask */
/* During data input/output the simulator is in these states */
#define STATE_DATAIN 0x00000100 /* waiting for data input */
#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */
#define STATE_DATAOUT 0x00001000 /* waiting for page data output */
#define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */
#define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */
#define STATE_DATAOUT_STATUS_M 0x00004000 /* waiting for multi-plane status output */
#define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */
/* Previous operation is done, ready to accept new requests */
#define STATE_READY 0x00000000
/* This state is used to mark that the next state isn't known yet */
#define STATE_UNKNOWN 0x10000000
/* Simulator's actions bit masks */
#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */
#define ACTION_PRGPAGE 0x00200000 /* program the internal buffer to flash */
#define ACTION_SECERASE 0x00300000 /* erase sector */
#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */
#define ACTION_HALFOFF 0x00500000 /* add to address half of page */
#define ACTION_OOBOFF 0x00600000 /* add to address OOB offset */
#define ACTION_MASK 0x00700000 /* action mask */
#define NS_OPER_NUM 13 /* Number of operations supported by the simulator */
#define NS_OPER_STATES 6 /* Maximum number of states in operation */
#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
#define OPT_PAGE256 0x00000001 /* 256-byte page chips */
#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
#define OPT_AUTOINCR 0x00000020 /* page number auto incrementation is possible */
#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
/* Remove action bits from state */
#define NS_STATE(x) ((x) & ~ACTION_MASK)
/*
* Maximum previous states which need to be saved. Currently saving is
* only needed for page program operation with preceded read command
* (which is only valid for 512-byte pages).
*/
#define NS_MAX_PREVSTATES 1
/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
#define NS_MAX_HELD_PAGES 16
/*
* A union to represent flash memory contents and flash buffer.
*/
union ns_mem {
u_char *byte; /* for byte access */
uint16_t *word; /* for 16-bit word access */
};
/*
* The structure which describes all the internal simulator data.
*/
struct nandsim {
struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
unsigned int nbparts;
uint busw; /* flash chip bus width (8 or 16) */
u_char ids[4]; /* chip's ID bytes */
uint32_t options; /* chip's characteristic bits */
uint32_t state; /* current chip state */
uint32_t nxstate; /* next expected state */
uint32_t *op; /* current operation, NULL operations isn't known yet */
uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */
uint16_t npstates; /* number of previous states saved */
uint16_t stateidx; /* current state index */
/* The simulated NAND flash pages array */
union ns_mem *pages;
/* Slab allocator for nand pages */
struct kmem_cache *nand_pages_slab;
/* Internal buffer of page + OOB size bytes */
union ns_mem buf;
/* NAND flash "geometry" */
struct {
uint64_t totsz; /* total flash size, bytes */
uint32_t secsz; /* flash sector (erase block) size, bytes */
uint pgsz; /* NAND flash page size, bytes */
uint oobsz; /* page OOB area size, bytes */
uint64_t totszoob; /* total flash size including OOB, bytes */
uint pgszoob; /* page size including OOB , bytes*/
uint secszoob; /* sector size including OOB, bytes */
uint pgnum; /* total number of pages */
uint pgsec; /* number of pages per sector */
uint secshift; /* bits number in sector size */
uint pgshift; /* bits number in page size */
uint oobshift; /* bits number in OOB size */
uint pgaddrbytes; /* bytes per page address */
uint secaddrbytes; /* bytes per sector address */
uint idbytes; /* the number ID bytes that this chip outputs */
} geom;
/* NAND flash internal registers */
struct {
unsigned command; /* the command register */
u_char status; /* the status register */
uint row; /* the page number */
uint column; /* the offset within page */
uint count; /* internal counter */
uint num; /* number of bytes which must be processed */
uint off; /* fixed page offset */
} regs;
/* NAND flash lines state */
struct {
int ce; /* chip Enable */
int cle; /* command Latch Enable */
int ale; /* address Latch Enable */
int wp; /* write Protect */
} lines;
/* Fields needed when using a cache file */
struct file *cfile; /* Open file */
unsigned char *pages_written; /* Which pages have been written */
void *file_buf;
struct page *held_pages[NS_MAX_HELD_PAGES];
int held_cnt;
};
/*
* Operations array. To perform any operation the simulator must pass
* through the correspondent states chain.
*/
static struct nandsim_operations {
uint32_t reqopts; /* options which are required to perform the operation */
uint32_t states[NS_OPER_STATES]; /* operation's states */
} ops[NS_OPER_NUM] = {
/* Read page + OOB from the beginning */
{OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
STATE_DATAOUT, STATE_READY}},
/* Read page + OOB from the second half */
{OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
STATE_DATAOUT, STATE_READY}},
/* Read OOB */
{OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
STATE_DATAOUT, STATE_READY}},
/* Program page starting from the beginning */
{OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
/* Program page starting from the beginning */
{OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
/* Program page starting from the second half */
{OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
/* Program OOB */
{OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
/* Erase sector */
{OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
/* Read status */
{OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
/* Read multi-plane status */
{OPT_SMARTMEDIA, {STATE_CMD_STATUS_M, STATE_DATAOUT_STATUS_M, STATE_READY}},
/* Read ID */
{OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
/* Large page devices read page */
{OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
STATE_DATAOUT, STATE_READY}},
/* Large page devices random page read */
{OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
STATE_DATAOUT, STATE_READY}},
};
struct weak_block {
struct list_head list;
unsigned int erase_block_no;
unsigned int max_erases;
unsigned int erases_done;
};
static LIST_HEAD(weak_blocks);
struct weak_page {
struct list_head list;
unsigned int page_no;
unsigned int max_writes;
unsigned int writes_done;
};
static LIST_HEAD(weak_pages);
struct grave_page {
struct list_head list;
unsigned int page_no;
unsigned int max_reads;
unsigned int reads_done;
};
static LIST_HEAD(grave_pages);
static unsigned long *erase_block_wear = NULL;
static unsigned int wear_eb_count = 0;
static unsigned long total_wear = 0;
static unsigned int rptwear_cnt = 0;
/* MTD structure for NAND controller */
static struct mtd_info *nsmtd;
static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
/*
* Allocate array of page pointers, create slab allocation for an array
* and initialize the array by NULL pointers.
*
* RETURNS: 0 if success, -ENOMEM if memory alloc fails.
*/
static int alloc_device(struct nandsim *ns)
{
struct file *cfile;
int i, err;
if (cache_file) {
cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
if (IS_ERR(cfile))
return PTR_ERR(cfile);
if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
NS_ERR("alloc_device: cache file not readable\n");
err = -EINVAL;
goto err_close;
}
if (!cfile->f_op->write && !cfile->f_op->aio_write) {
NS_ERR("alloc_device: cache file not writeable\n");
err = -EINVAL;
goto err_close;
}
ns->pages_written = vzalloc(ns->geom.pgnum);
if (!ns->pages_written) {
NS_ERR("alloc_device: unable to allocate pages written array\n");
err = -ENOMEM;
goto err_close;
}
ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
if (!ns->file_buf) {
NS_ERR("alloc_device: unable to allocate file buf\n");
err = -ENOMEM;
goto err_free;
}
ns->cfile = cfile;
return 0;
}
ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
if (!ns->pages) {
NS_ERR("alloc_device: unable to allocate page array\n");
return -ENOMEM;
}
for (i = 0; i < ns->geom.pgnum; i++) {
ns->pages[i].byte = NULL;
}
ns->nand_pages_slab = kmem_cache_create("nandsim",
ns->geom.pgszoob, 0, 0, NULL);
if (!ns->nand_pages_slab) {
NS_ERR("cache_create: unable to create kmem_cache\n");
return -ENOMEM;
}
return 0;
err_free:
vfree(ns->pages_written);
err_close:
filp_close(cfile, NULL);
return err;
}
/*
* Free any allocated pages, and free the array of page pointers.
*/
static void free_device(struct nandsim *ns)
{
int i;
if (ns->cfile) {
kfree(ns->file_buf);
vfree(ns->pages_written);
filp_close(ns->cfile, NULL);
return;
}
if (ns->pages) {
for (i = 0; i < ns->geom.pgnum; i++) {
if (ns->pages[i].byte)
kmem_cache_free(ns->nand_pages_slab,
ns->pages[i].byte);
}
kmem_cache_destroy(ns->nand_pages_slab);
vfree(ns->pages);
}
}
static char *get_partition_name(int i)
{
char buf[64];
sprintf(buf, "NAND simulator partition %d", i);
return kstrdup(buf, GFP_KERNEL);
}
/*
* Initialize the nandsim structure.
*
* RETURNS: 0 if success, -ERRNO if failure.
*/
static int init_nandsim(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
struct nandsim *ns = chip->priv;
int i, ret = 0;
uint64_t remains;
uint64_t next_offset;
if (NS_IS_INITIALIZED(ns)) {
NS_ERR("init_nandsim: nandsim is already initialized\n");
return -EIO;
}
/* Force mtd to not do delays */
chip->chip_delay = 0;
/* Initialize the NAND flash parameters */
ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
ns->geom.totsz = mtd->size;
ns->geom.pgsz = mtd->writesize;
ns->geom.oobsz = mtd->oobsize;
ns->geom.secsz = mtd->erasesize;
ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
ns->geom.secshift = ffs(ns->geom.secsz) - 1;
ns->geom.pgshift = chip->page_shift;
ns->geom.oobshift = ffs(ns->geom.oobsz) - 1;
ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
ns->options = 0;
if (ns->geom.pgsz == 256) {
ns->options |= OPT_PAGE256;
}
else if (ns->geom.pgsz == 512) {
ns->options |= (OPT_PAGE512 | OPT_AUTOINCR);
if (ns->busw == 8)
ns->options |= OPT_PAGE512_8BIT;
} else if (ns->geom.pgsz == 2048) {
ns->options |= OPT_PAGE2048;
} else if (ns->geom.pgsz == 4096) {
ns->options |= OPT_PAGE4096;
} else {
NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
return -EIO;
}
if (ns->options & OPT_SMALLPAGE) {
if (ns->geom.totsz <= (32 << 20)) {
ns->geom.pgaddrbytes = 3;
ns->geom.secaddrbytes = 2;
} else {
ns->geom.pgaddrbytes = 4;
ns->geom.secaddrbytes = 3;
}
} else {
if (ns->geom.totsz <= (128 << 20)) {
ns->geom.pgaddrbytes = 4;
ns->geom.secaddrbytes = 2;
} else {
ns->geom.pgaddrbytes = 5;
ns->geom.secaddrbytes = 3;
}
}
/* Fill the partition_info structure */
if (parts_num > ARRAY_SIZE(ns->partitions)) {
NS_ERR("too many partitions.\n");
ret = -EINVAL;
goto error;
}
remains = ns->geom.totsz;
next_offset = 0;
for (i = 0; i < parts_num; ++i) {
uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
if (!part_sz || part_sz > remains) {
NS_ERR("bad partition size.\n");
ret = -EINVAL;
goto error;
}
ns->partitions[i].name = get_partition_name(i);
ns->partitions[i].offset = next_offset;
ns->partitions[i].size = part_sz;
next_offset += ns->partitions[i].size;
remains -= ns->partitions[i].size;
}
ns->nbparts = parts_num;
if (remains) {
if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
NS_ERR("too many partitions.\n");
ret = -EINVAL;
goto error;
}
ns->partitions[i].name = get_partition_name(i);
ns->partitions[i].offset = next_offset;
ns->partitions[i].size = remains;
ns->nbparts += 1;
}
/* Detect how many ID bytes the NAND chip outputs */
for (i = 0; nand_flash_ids[i].name != NULL; i++) {
if (second_id_byte != nand_flash_ids[i].id)
continue;
if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
ns->options |= OPT_AUTOINCR;
}
if (ns->busw == 16)
NS_WARN("16-bit flashes support wasn't tested\n");
printk("flash size: %llu MiB\n",
(unsigned long long)ns->geom.totsz >> 20);
printk("page size: %u bytes\n", ns->geom.pgsz);
printk("OOB area size: %u bytes\n", ns->geom.oobsz);
printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
printk("pages number: %u\n", ns->geom.pgnum);
printk("pages per sector: %u\n", ns->geom.pgsec);
printk("bus width: %u\n", ns->busw);
printk("bits in sector size: %u\n", ns->geom.secshift);
printk("bits in page size: %u\n", ns->geom.pgshift);
printk("bits in OOB size: %u\n", ns->geom.oobshift);
printk("flash size with OOB: %llu KiB\n",
(unsigned long long)ns->geom.totszoob >> 10);
printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
printk("options: %#x\n", ns->options);
if ((ret = alloc_device(ns)) != 0)
goto error;
/* Allocate / initialize the internal buffer */
ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
if (!ns->buf.byte) {
NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
ns->geom.pgszoob);
ret = -ENOMEM;
goto error;
}
memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
return 0;
error:
free_device(ns);
return ret;
}
/*
* Free the nandsim structure.
*/
static void free_nandsim(struct nandsim *ns)
{
kfree(ns->buf.byte);
free_device(ns);
return;
}
static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
{
char *w;
int zero_ok;
unsigned int erase_block_no;
loff_t offset;
if (!badblocks)
return 0;
w = badblocks;
do {
zero_ok = (*w == '0' ? 1 : 0);
erase_block_no = simple_strtoul(w, &w, 0);
if (!zero_ok && !erase_block_no) {
NS_ERR("invalid badblocks.\n");
return -EINVAL;
}
offset = erase_block_no * ns->geom.secsz;
if (mtd->block_markbad(mtd, offset)) {
NS_ERR("invalid badblocks.\n");
return -EINVAL;
}
if (*w == ',')
w += 1;
} while (*w);
return 0;
}
static int parse_weakblocks(void)
{
char *w;
int zero_ok;
unsigned int erase_block_no;
unsigned int max_erases;
struct weak_block *wb;
if (!weakblocks)
return 0;
w = weakblocks;
do {
zero_ok = (*w == '0' ? 1 : 0);
erase_block_no = simple_strtoul(w, &w, 0);
if (!zero_ok && !erase_block_no) {
NS_ERR("invalid weakblocks.\n");
return -EINVAL;
}
max_erases = 3;
if (*w == ':') {
w += 1;
max_erases = simple_strtoul(w, &w, 0);
}
if (*w == ',')
w += 1;
wb = kzalloc(sizeof(*wb), GFP_KERNEL);
if (!wb) {
NS_ERR("unable to allocate memory.\n");
return -ENOMEM;
}
wb->erase_block_no = erase_block_no;
wb->max_erases = max_erases;
list_add(&wb->list, &weak_blocks);
} while (*w);
return 0;
}
static int erase_error(unsigned int erase_block_no)
{
struct weak_block *wb;
list_for_each_entry(wb, &weak_blocks, list)
if (wb->erase_block_no == erase_block_no) {
if (wb->erases_done >= wb->max_erases)
return 1;
wb->erases_done += 1;
return 0;
}
return 0;
}
static int parse_weakpages(void)
{
char *w;
int zero_ok;
unsigned int page_no;
unsigned int max_writes;
struct weak_page *wp;
if (!weakpages)
return 0;
w = weakpages;
do {
zero_ok = (*w == '0' ? 1 : 0);
page_no = simple_strtoul(w, &w, 0);
if (!zero_ok && !page_no) {
NS_ERR("invalid weakpagess.\n");
return -EINVAL;
}
max_writes = 3;
if (*w == ':') {
w += 1;
max_writes = simple_strtoul(w, &w, 0);
}
if (*w == ',')
w += 1;
wp = kzalloc(sizeof(*wp), GFP_KERNEL);
if (!wp) {
NS_ERR("unable to allocate memory.\n");
return -ENOMEM;
}
wp->page_no = page_no;
wp->max_writes = max_writes;
list_add(&wp->list, &weak_pages);
} while (*w);
return 0;
}
static int write_error(unsigned int page_no)
{
struct weak_page *wp;
list_for_each_entry(wp, &weak_pages, list)
if (wp->page_no == page_no) {
if (wp->writes_done >= wp->max_writes)
return 1;
wp->writes_done += 1;
return 0;
}
return 0;
}
static int parse_gravepages(void)
{
char *g;
int zero_ok;
unsigned int page_no;
unsigned int max_reads;
struct grave_page *gp;
if (!gravepages)
return 0;
g = gravepages;
do {
zero_ok = (*g == '0' ? 1 : 0);
page_no = simple_strtoul(g, &g, 0);
if (!zero_ok && !page_no) {
NS_ERR("invalid gravepagess.\n");
return -EINVAL;
}
max_reads = 3;
if (*g == ':') {
g += 1;
max_reads = simple_strtoul(g, &g, 0);
}
if (*g == ',')
g += 1;
gp = kzalloc(sizeof(*gp), GFP_KERNEL);
if (!gp) {
NS_ERR("unable to allocate memory.\n");
return -ENOMEM;
}
gp->page_no = page_no;
gp->max_reads = max_reads;
list_add(&gp->list, &grave_pages);
} while (*g);
return 0;
}
static int read_error(unsigned int page_no)
{
struct grave_page *gp;
list_for_each_entry(gp, &grave_pages, list)
if (gp->page_no == page_no) {
if (gp->reads_done >= gp->max_reads)
return 1;
gp->reads_done += 1;
return 0;
}
return 0;
}
static void free_lists(void)
{
struct list_head *pos, *n;
list_for_each_safe(pos, n, &weak_blocks) {
list_del(pos);
kfree(list_entry(pos, struct weak_block, list));
}
list_for_each_safe(pos, n, &weak_pages) {
list_del(pos);
kfree(list_entry(pos, struct weak_page, list));
}
list_for_each_safe(pos, n, &grave_pages) {
list_del(pos);
kfree(list_entry(pos, struct grave_page, list));
}
kfree(erase_block_wear);
}
static int setup_wear_reporting(struct mtd_info *mtd)
{
size_t mem;
if (!rptwear)
return 0;
wear_eb_count = div_u64(mtd->size, mtd->erasesize);
mem = wear_eb_count * sizeof(unsigned long);
if (mem / sizeof(unsigned long) != wear_eb_count) {
NS_ERR("Too many erase blocks for wear reporting\n");
return -ENOMEM;
}
erase_block_wear = kzalloc(mem, GFP_KERNEL);
if (!erase_block_wear) {
NS_ERR("Too many erase blocks for wear reporting\n");
return -ENOMEM;
}
return 0;
}
static void update_wear(unsigned int erase_block_no)
{
unsigned long wmin = -1, wmax = 0, avg;
unsigned long deciles[10], decile_max[10], tot = 0;
unsigned int i;
if (!erase_block_wear)
return;
total_wear += 1;
if (total_wear == 0)
NS_ERR("Erase counter total overflow\n");
erase_block_wear[erase_block_no] += 1;
if (erase_block_wear[erase_block_no] == 0)
NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
rptwear_cnt += 1;
if (rptwear_cnt < rptwear)
return;
rptwear_cnt = 0;
/* Calc wear stats */
for (i = 0; i < wear_eb_count; ++i) {
unsigned long wear = erase_block_wear[i];
if (wear < wmin)
wmin = wear;
if (wear > wmax)
wmax = wear;
tot += wear;
}
for (i = 0; i < 9; ++i) {
deciles[i] = 0;
decile_max[i] = (wmax * (i + 1) + 5) / 10;
}
deciles[9] = 0;
decile_max[9] = wmax;
for (i = 0; i < wear_eb_count; ++i) {
int d;
unsigned long wear = erase_block_wear[i];
for (d = 0; d < 10; ++d)
if (wear <= decile_max[d]) {
deciles[d] += 1;
break;
}
}
avg = tot / wear_eb_count;
/* Output wear report */
NS_INFO("*** Wear Report ***\n");
NS_INFO("Total numbers of erases: %lu\n", tot);
NS_INFO("Number of erase blocks: %u\n", wear_eb_count);
NS_INFO("Average number of erases: %lu\n", avg);
NS_INFO("Maximum number of erases: %lu\n", wmax);
NS_INFO("Minimum number of erases: %lu\n", wmin);
for (i = 0; i < 10; ++i) {
unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
if (from > decile_max[i])
continue;
NS_INFO("Number of ebs with erase counts from %lu to %lu : %lu\n",
from,
decile_max[i],
deciles[i]);
}
NS_INFO("*** End of Wear Report ***\n");
}
/*
* Returns the string representation of 'state' state.
*/
static char *get_state_name(uint32_t state)
{
switch (NS_STATE(state)) {
case STATE_CMD_READ0:
return "STATE_CMD_READ0";
case STATE_CMD_READ1:
return "STATE_CMD_READ1";
case STATE_CMD_PAGEPROG:
return "STATE_CMD_PAGEPROG";
case STATE_CMD_READOOB:
return "STATE_CMD_READOOB";
case STATE_CMD_READSTART:
return "STATE_CMD_READSTART";
case STATE_CMD_ERASE1:
return "STATE_CMD_ERASE1";
case STATE_CMD_STATUS:
return "STATE_CMD_STATUS";
case STATE_CMD_STATUS_M:
return "STATE_CMD_STATUS_M";
case STATE_CMD_SEQIN:
return "STATE_CMD_SEQIN";
case STATE_CMD_READID:
return "STATE_CMD_READID";
case STATE_CMD_ERASE2:
return "STATE_CMD_ERASE2";
case STATE_CMD_RESET:
return "STATE_CMD_RESET";
case STATE_CMD_RNDOUT:
return "STATE_CMD_RNDOUT";
case STATE_CMD_RNDOUTSTART:
return "STATE_CMD_RNDOUTSTART";
case STATE_ADDR_PAGE:
return "STATE_ADDR_PAGE";
case STATE_ADDR_SEC:
return "STATE_ADDR_SEC";
case STATE_ADDR_ZERO:
return "STATE_ADDR_ZERO";
case STATE_ADDR_COLUMN:
return "STATE_ADDR_COLUMN";
case STATE_DATAIN:
return "STATE_DATAIN";
case STATE_DATAOUT:
return "STATE_DATAOUT";
case STATE_DATAOUT_ID:
return "STATE_DATAOUT_ID";
case STATE_DATAOUT_STATUS:
return "STATE_DATAOUT_STATUS";
case STATE_DATAOUT_STATUS_M:
return "STATE_DATAOUT_STATUS_M";
case STATE_READY:
return "STATE_READY";
case STATE_UNKNOWN:
return "STATE_UNKNOWN";
}
NS_ERR("get_state_name: unknown state, BUG\n");
return NULL;
}
/*
* Check if command is valid.
*
* RETURNS: 1 if wrong command, 0 if right.
*/
static int check_command(int cmd)
{
switch (cmd) {
case NAND_CMD_READ0:
case NAND_CMD_READ1:
case NAND_CMD_READSTART:
case NAND_CMD_PAGEPROG:
case NAND_CMD_READOOB:
case NAND_CMD_ERASE1:
case NAND_CMD_STATUS:
case NAND_CMD_SEQIN:
case NAND_CMD_READID:
case NAND_CMD_ERASE2:
case NAND_CMD_RESET:
case NAND_CMD_RNDOUT:
case NAND_CMD_RNDOUTSTART:
return 0;
case NAND_CMD_STATUS_MULTI:
default:
return 1;
}
}
/*
* Returns state after command is accepted by command number.
*/
static uint32_t get_state_by_command(unsigned command)
{
switch (command) {
case NAND_CMD_READ0:
return STATE_CMD_READ0;
case NAND_CMD_READ1:
return STATE_CMD_READ1;
case NAND_CMD_PAGEPROG:
return STATE_CMD_PAGEPROG;
case NAND_CMD_READSTART:
return STATE_CMD_READSTART;
case NAND_CMD_READOOB:
return STATE_CMD_READOOB;
case NAND_CMD_ERASE1:
return STATE_CMD_ERASE1;
case NAND_CMD_STATUS:
return STATE_CMD_STATUS;
case NAND_CMD_STATUS_MULTI:
return STATE_CMD_STATUS_M;
case NAND_CMD_SEQIN:
return STATE_CMD_SEQIN;
case NAND_CMD_READID:
return STATE_CMD_READID;
case NAND_CMD_ERASE2:
return STATE_CMD_ERASE2;
case NAND_CMD_RESET:
return STATE_CMD_RESET;
case NAND_CMD_RNDOUT:
return STATE_CMD_RNDOUT;
case NAND_CMD_RNDOUTSTART:
return STATE_CMD_RNDOUTSTART;
}
NS_ERR("get_state_by_command: unknown command, BUG\n");
return 0;
}
/*
* Move an address byte to the correspondent internal register.
*/
static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
{
uint byte = (uint)bt;
if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
ns->regs.column |= (byte << 8 * ns->regs.count);
else {
ns->regs.row |= (byte << 8 * (ns->regs.count -
ns->geom.pgaddrbytes +
ns->geom.secaddrbytes));
}
return;
}
/*
* Switch to STATE_READY state.
*/
static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
{
NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
ns->state = STATE_READY;
ns->nxstate = STATE_UNKNOWN;
ns->op = NULL;
ns->npstates = 0;
ns->stateidx = 0;
ns->regs.num = 0;
ns->regs.count = 0;
ns->regs.off = 0;
ns->regs.row = 0;
ns->regs.column = 0;
ns->regs.status = status;
}
/*
* If the operation isn't known yet, try to find it in the global array
* of supported operations.
*
* Operation can be unknown because of the following.
* 1. New command was accepted and this is the first call to find the
* correspondent states chain. In this case ns->npstates = 0;
* 2. There are several operations which begin with the same command(s)
* (for example program from the second half and read from the
* second half operations both begin with the READ1 command). In this
* case the ns->pstates[] array contains previous states.
*
* Thus, the function tries to find operation containing the following
* states (if the 'flag' parameter is 0):
* ns->pstates[0], ... ns->pstates[ns->npstates], ns->state
*
* If (one and only one) matching operation is found, it is accepted (
* ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
* zeroed).
*
* If there are several matches, the current state is pushed to the
* ns->pstates.
*
* The operation can be unknown only while commands are input to the chip.
* As soon as address command is accepted, the operation must be known.
* In such situation the function is called with 'flag' != 0, and the
* operation is searched using the following pattern:
* ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
*
* It is supposed that this pattern must either match one operation or
* none. There can't be ambiguity in that case.
*
* If no matches found, the function does the following:
* 1. if there are saved states present, try to ignore them and search
* again only using the last command. If nothing was found, switch
* to the STATE_READY state.
* 2. if there are no saved states, switch to the STATE_READY state.
*
* RETURNS: -2 - no matched operations found.
* -1 - several matches.
* 0 - operation is found.
*/
static int find_operation(struct nandsim *ns, uint32_t flag)
{
int opsfound = 0;
int i, j, idx = 0;
for (i = 0; i < NS_OPER_NUM; i++) {
int found = 1;
if (!(ns->options & ops[i].reqopts))
/* Ignore operations we can't perform */
continue;
if (flag) {
if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
continue;
} else {
if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
continue;
}
for (j = 0; j < ns->npstates; j++)
if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
&& (ns->options & ops[idx].reqopts)) {
found = 0;
break;
}
if (found) {
idx = i;
opsfound += 1;
}
}
if (opsfound == 1) {
/* Exact match */
ns->op = &ops[idx].states[0];
if (flag) {
/*
* In this case the find_operation function was
* called when address has just began input. But it isn't
* yet fully input and the current state must
* not be one of STATE_ADDR_*, but the STATE_ADDR_*
* state must be the next state (ns->nxstate).
*/
ns->stateidx = ns->npstates - 1;
} else {
ns->stateidx = ns->npstates;
}
ns->npstates = 0;
ns->state = ns->op[ns->stateidx];
ns->nxstate = ns->op[ns->stateidx + 1];
NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
idx, get_state_name(ns->state), get_state_name(ns->nxstate));
return 0;
}
if (opsfound == 0) {
/* Nothing was found. Try to ignore previous commands (if any) and search again */
if (ns->npstates != 0) {
NS_DBG("find_operation: no operation found, try again with state %s\n",
get_state_name(ns->state));
ns->npstates = 0;
return find_operation(ns, 0);
}
NS_DBG("find_operation: no operations found\n");
switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return -2;
}
if (flag) {
/* This shouldn't happen */
NS_DBG("find_operation: BUG, operation must be known if address is input\n");
return -2;
}
NS_DBG("find_operation: there is still ambiguity\n");
ns->pstates[ns->npstates++] = ns->state;
return -1;
}
static void put_pages(struct nandsim *ns)
{
int i;
for (i = 0; i < ns->held_cnt; i++)
page_cache_release(ns->held_pages[i]);
}
/* Get page cache pages in advance to provide NOFS memory allocation */
static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
{
pgoff_t index, start_index, end_index;
struct page *page;
struct address_space *mapping = file->f_mapping;
start_index = pos >> PAGE_CACHE_SHIFT;
end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
return -EINVAL;
ns->held_cnt = 0;
for (index = start_index; index <= end_index; index++) {
page = find_get_page(mapping, index);
if (page == NULL) {
page = find_or_create_page(mapping, index, GFP_NOFS);
if (page == NULL) {
write_inode_now(mapping->host, 1);
page = find_or_create_page(mapping, index, GFP_NOFS);
}
if (page == NULL) {
put_pages(ns);
return -ENOMEM;
}
unlock_page(page);
}
ns->held_pages[ns->held_cnt++] = page;
}
return 0;
}
static int set_memalloc(void)
{
if (current->flags & PF_MEMALLOC)
return 0;
current->flags |= PF_MEMALLOC;
return 1;
}
static void clear_memalloc(int memalloc)
{
if (memalloc)
current->flags &= ~PF_MEMALLOC;
}
static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
{
mm_segment_t old_fs;
ssize_t tx;
int err, memalloc;
err = get_pages(ns, file, count, *pos);
if (err)
return err;
old_fs = get_fs();
set_fs(get_ds());
memalloc = set_memalloc();
tx = vfs_read(file, (char __user *)buf, count, pos);
clear_memalloc(memalloc);
set_fs(old_fs);
put_pages(ns);
return tx;
}
static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
{
mm_segment_t old_fs;
ssize_t tx;
int err, memalloc;
err = get_pages(ns, file, count, *pos);
if (err)
return err;
old_fs = get_fs();
set_fs(get_ds());
memalloc = set_memalloc();
tx = vfs_write(file, (char __user *)buf, count, pos);
clear_memalloc(memalloc);
set_fs(old_fs);
put_pages(ns);
return tx;
}
/*
* Returns a pointer to the current page.
*/
static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
{
return &(ns->pages[ns->regs.row]);
}
/*
* Retuns a pointer to the current byte, within the current page.
*/
static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
{
return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
}
int do_read_error(struct nandsim *ns, int num)
{
unsigned int page_no = ns->regs.row;
if (read_error(page_no)) {
int i;
memset(ns->buf.byte, 0xFF, num);
for (i = 0; i < num; ++i)
ns->buf.byte[i] = random32();
NS_WARN("simulating read error in page %u\n", page_no);
return 1;
}
return 0;
}
void do_bit_flips(struct nandsim *ns, int num)
{
if (bitflips && random32() < (1 << 22)) {
int flips = 1;
if (bitflips > 1)
flips = (random32() % (int) bitflips) + 1;
while (flips--) {
int pos = random32() % (num * 8);
ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
NS_WARN("read_page: flipping bit %d in page %d "
"reading from %d ecc: corrected=%u failed=%u\n",
pos, ns->regs.row, ns->regs.column + ns->regs.off,
nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
}
}
}
/*
* Fill the NAND buffer with data read from the specified page.
*/
static void read_page(struct nandsim *ns, int num)
{
union ns_mem *mypage;
if (ns->cfile) {
if (!ns->pages_written[ns->regs.row]) {
NS_DBG("read_page: page %d not written\n", ns->regs.row);
memset(ns->buf.byte, 0xFF, num);
} else {
loff_t pos;
ssize_t tx;
NS_DBG("read_page: page %d written, reading from %d\n",
ns->regs.row, ns->regs.column + ns->regs.off);
if (do_read_error(ns, num))
return;
pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
tx = read_file(ns, ns->cfile, ns->buf.byte, num, &pos);
if (tx != num) {
NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
return;
}
do_bit_flips(ns, num);
}
return;
}
mypage = NS_GET_PAGE(ns);
if (mypage->byte == NULL) {
NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
memset(ns->buf.byte, 0xFF, num);
} else {
NS_DBG("read_page: page %d allocated, reading from %d\n",
ns->regs.row, ns->regs.column + ns->regs.off);
if (do_read_error(ns, num))
return;
memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
do_bit_flips(ns, num);
}
}
/*
* Erase all pages in the specified sector.
*/
static void erase_sector(struct nandsim *ns)
{
union ns_mem *mypage;
int i;
if (ns->cfile) {
for (i = 0; i < ns->geom.pgsec; i++)
if (ns->pages_written[ns->regs.row + i]) {
NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
ns->pages_written[ns->regs.row + i] = 0;
}
return;
}
mypage = NS_GET_PAGE(ns);
for (i = 0; i < ns->geom.pgsec; i++) {
if (mypage->byte != NULL) {
NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
kmem_cache_free(ns->nand_pages_slab, mypage->byte);
mypage->byte = NULL;
}
mypage++;
}
}
/*
* Program the specified page with the contents from the NAND buffer.
*/
static int prog_page(struct nandsim *ns, int num)
{
int i;
union ns_mem *mypage;
u_char *pg_off;
if (ns->cfile) {
loff_t off, pos;
ssize_t tx;
int all;
NS_DBG("prog_page: writing page %d\n", ns->regs.row);
pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
off = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
if (!ns->pages_written[ns->regs.row]) {
all = 1;
memset(ns->file_buf, 0xff, ns->geom.pgszoob);
} else {
all = 0;
pos = off;
tx = read_file(ns, ns->cfile, pg_off, num, &pos);
if (tx != num) {
NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
}
}
for (i = 0; i < num; i++)
pg_off[i] &= ns->buf.byte[i];
if (all) {
pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, &pos);
if (tx != ns->geom.pgszoob) {
NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
}
ns->pages_written[ns->regs.row] = 1;
} else {
pos = off;
tx = write_file(ns, ns->cfile, pg_off, num, &pos);
if (tx != num) {
NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
}
}
return 0;
}
mypage = NS_GET_PAGE(ns);
if (mypage->byte == NULL) {
NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
/*
* We allocate memory with GFP_NOFS because a flash FS may
* utilize this. If it is holding an FS lock, then gets here,
* then kernel memory alloc runs writeback which goes to the FS
* again and deadlocks. This was seen in practice.
*/
mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
if (mypage->byte == NULL) {
NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
return -1;
}
memset(mypage->byte, 0xFF, ns->geom.pgszoob);
}
pg_off = NS_PAGE_BYTE_OFF(ns);
for (i = 0; i < num; i++)
pg_off[i] &= ns->buf.byte[i];
return 0;
}
/*
* If state has any action bit, perform this action.
*
* RETURNS: 0 if success, -1 if error.
*/
static int do_state_action(struct nandsim *ns, uint32_t action)
{
int num;
int busdiv = ns->busw == 8 ? 1 : 2;
unsigned int erase_block_no, page_no;
action &= ACTION_MASK;
/* Check that page address input is correct */
if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
return -1;
}
switch (action) {
case ACTION_CPY:
/*
* Copy page data to the internal buffer.
*/
/* Column shouldn't be very large */
if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
NS_ERR("do_state_action: column number is too large\n");
break;
}
num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
read_page(ns, num);
NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
num, NS_RAW_OFFSET(ns) + ns->regs.off);
if (ns->regs.off == 0)
NS_LOG("read page %d\n", ns->regs.row);
else if (ns->regs.off < ns->geom.pgsz)
NS_LOG("read page %d (second half)\n", ns->regs.row);
else
NS_LOG("read OOB of page %d\n", ns->regs.row);
NS_UDELAY(access_delay);
NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
break;
case ACTION_SECERASE:
/*
* Erase sector.
*/
if (ns->lines.wp) {
NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
return -1;
}
if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
|| (ns->regs.row & ~(ns->geom.secsz - 1))) {
NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
return -1;
}
ns->regs.row = (ns->regs.row <<
8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
ns->regs.column = 0;
erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
ns->regs.row, NS_RAW_OFFSET(ns));
NS_LOG("erase sector %u\n", erase_block_no);
erase_sector(ns);
NS_MDELAY(erase_delay);
if (erase_block_wear)
update_wear(erase_block_no);
if (erase_error(erase_block_no)) {
NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
return -1;
}
break;
case ACTION_PRGPAGE:
/*
* Program page - move internal buffer data to the page.
*/
if (ns->lines.wp) {
NS_WARN("do_state_action: device is write-protected, programm\n");
return -1;
}
num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
if (num != ns->regs.count) {
NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
ns->regs.count, num);
return -1;
}
if (prog_page(ns, num) == -1)
return -1;
page_no = ns->regs.row;
NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
NS_LOG("programm page %d\n", ns->regs.row);
NS_UDELAY(programm_delay);
NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
if (write_error(page_no)) {
NS_WARN("simulating write failure in page %u\n", page_no);
return -1;
}
break;
case ACTION_ZEROOFF:
NS_DBG("do_state_action: set internal offset to 0\n");
ns->regs.off = 0;
break;
case ACTION_HALFOFF:
if (!(ns->options & OPT_PAGE512_8BIT)) {
NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
"byte page size 8x chips\n");
return -1;
}
NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
ns->regs.off = ns->geom.pgsz/2;
break;
case ACTION_OOBOFF:
NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
ns->regs.off = ns->geom.pgsz;
break;
default:
NS_DBG("do_state_action: BUG! unknown action\n");
}
return 0;
}
/*
* Switch simulator's state.
*/
static void switch_state(struct nandsim *ns)
{
if (ns->op) {
/*
* The current operation have already been identified.
* Just follow the states chain.
*/
ns->stateidx += 1;
ns->state = ns->nxstate;
ns->nxstate = ns->op[ns->stateidx + 1];
NS_DBG("switch_state: operation is known, switch to the next state, "
"state: %s, nxstate: %s\n",
get_state_name(ns->state), get_state_name(ns->nxstate));
/* See, whether we need to do some action */
if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
} else {
/*
* We don't yet know which operation we perform.
* Try to identify it.
*/
/*
* The only event causing the switch_state function to
* be called with yet unknown operation is new command.
*/
ns->state = get_state_by_command(ns->regs.command);
NS_DBG("switch_state: operation is unknown, try to find it\n");
if (find_operation(ns, 0) != 0)
return;
if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
}
/* For 16x devices column means the page offset in words */
if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
NS_DBG("switch_state: double the column number for 16x device\n");
ns->regs.column <<= 1;
}
if (NS_STATE(ns->nxstate) == STATE_READY) {
/*
* The current state is the last. Return to STATE_READY
*/
u_char status = NS_STATUS_OK(ns);
/* In case of data states, see if all bytes were input/output */
if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
&& ns->regs.count != ns->regs.num) {
NS_WARN("switch_state: not all bytes were processed, %d left\n",
ns->regs.num - ns->regs.count);
status = NS_STATUS_FAILED(ns);
}
NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
switch_to_ready_state(ns, status);
return;
} else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
/*
* If the next state is data input/output, switch to it now
*/
ns->state = ns->nxstate;
ns->nxstate = ns->op[++ns->stateidx + 1];
ns->regs.num = ns->regs.count = 0;
NS_DBG("switch_state: the next state is data I/O, switch, "
"state: %s, nxstate: %s\n",
get_state_name(ns->state), get_state_name(ns->nxstate));
/*
* Set the internal register to the count of bytes which
* are expected to be input or output
*/
switch (NS_STATE(ns->state)) {
case STATE_DATAIN:
case STATE_DATAOUT:
ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
break;
case STATE_DATAOUT_ID:
ns->regs.num = ns->geom.idbytes;
break;
case STATE_DATAOUT_STATUS:
case STATE_DATAOUT_STATUS_M:
ns->regs.count = ns->regs.num = 0;
break;
default:
NS_ERR("switch_state: BUG! unknown data state\n");
}
} else if (ns->nxstate & STATE_ADDR_MASK) {
/*
* If the next state is address input, set the internal
* register to the number of expected address bytes
*/
ns->regs.count = 0;
switch (NS_STATE(ns->nxstate)) {
case STATE_ADDR_PAGE:
ns->regs.num = ns->geom.pgaddrbytes;
break;
case STATE_ADDR_SEC:
ns->regs.num = ns->geom.secaddrbytes;
break;
case STATE_ADDR_ZERO:
ns->regs.num = 1;
break;
case STATE_ADDR_COLUMN:
/* Column address is always 2 bytes */
ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
break;
default:
NS_ERR("switch_state: BUG! unknown address state\n");
}
} else {
/*
* Just reset internal counters.
*/
ns->regs.num = 0;
ns->regs.count = 0;
}
}
static u_char ns_nand_read_byte(struct mtd_info *mtd)
{
struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
u_char outb = 0x00;
/* Sanity and correctness checks */
if (!ns->lines.ce) {
NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
return outb;
}
if (ns->lines.ale || ns->lines.cle) {
NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
return outb;
}
if (!(ns->state & STATE_DATAOUT_MASK)) {
NS_WARN("read_byte: unexpected data output cycle, state is %s "
"return %#x\n", get_state_name(ns->state), (uint)outb);
return outb;
}
/* Status register may be read as many times as it is wanted */
if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
NS_DBG("read_byte: return %#x status\n", ns->regs.status);
return ns->regs.status;
}
/* Check if there is any data in the internal buffer which may be read */
if (ns->regs.count == ns->regs.num) {
NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
return outb;
}
switch (NS_STATE(ns->state)) {
case STATE_DATAOUT:
if (ns->busw == 8) {
outb = ns->buf.byte[ns->regs.count];
ns->regs.count += 1;
} else {
outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
ns->regs.count += 2;
}
break;
case STATE_DATAOUT_ID:
NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
outb = ns->ids[ns->regs.count];
ns->regs.count += 1;
break;
default:
BUG();
}
if (ns->regs.count == ns->regs.num) {
NS_DBG("read_byte: all bytes were read\n");
/*
* The OPT_AUTOINCR allows to read next consecutive pages without
* new read operation cycle.
*/
if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
ns->regs.count = 0;
if (ns->regs.row + 1 < ns->geom.pgnum)
ns->regs.row += 1;
NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
do_state_action(ns, ACTION_CPY);
}
else if (NS_STATE(ns->nxstate) == STATE_READY)
switch_state(ns);
}
return outb;
}
static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
{
struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
/* Sanity and correctness checks */
if (!ns->lines.ce) {
NS_ERR("write_byte: chip is disabled, ignore write\n");
return;
}
if (ns->lines.ale && ns->lines.cle) {
NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
return;
}
if (ns->lines.cle == 1) {
/*
* The byte written is a command.
*/
if (byte == NAND_CMD_RESET) {
NS_LOG("reset chip\n");
switch_to_ready_state(ns, NS_STATUS_OK(ns));
return;
}
/* Check that the command byte is correct */
if (check_command(byte)) {
NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
return;
}
if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
|| NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M
|| NS_STATE(ns->state) == STATE_DATAOUT) {
int row = ns->regs.row;
switch_state(ns);
if (byte == NAND_CMD_RNDOUT)
ns->regs.row = row;
}
/* Check if chip is expecting command */
if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
/* Do not warn if only 2 id bytes are read */
if (!(ns->regs.command == NAND_CMD_READID &&
NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
/*
* We are in situation when something else (not command)
* was expected but command was input. In this case ignore
* previous command(s)/state(s) and accept the last one.
*/
NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
"ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
}
switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
}
NS_DBG("command byte corresponding to %s state accepted\n",
get_state_name(get_state_by_command(byte)));
ns->regs.command = byte;
switch_state(ns);
} else if (ns->lines.ale == 1) {
/*
* The byte written is an address.
*/
if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
NS_DBG("write_byte: operation isn't known yet, identify it\n");
if (find_operation(ns, 1) < 0)
return;
if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
ns->regs.count = 0;
switch (NS_STATE(ns->nxstate)) {
case STATE_ADDR_PAGE:
ns->regs.num = ns->geom.pgaddrbytes;
break;
case STATE_ADDR_SEC:
ns->regs.num = ns->geom.secaddrbytes;
break;
case STATE_ADDR_ZERO:
ns->regs.num = 1;
break;
default:
BUG();
}
}
/* Check that chip is expecting address */
if (!(ns->nxstate & STATE_ADDR_MASK)) {
NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
"switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
/* Check if this is expected byte */
if (ns->regs.count == ns->regs.num) {
NS_ERR("write_byte: no more address bytes expected\n");
switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
accept_addr_byte(ns, byte);
ns->regs.count += 1;
NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
(uint)byte, ns->regs.count, ns->regs.num);
if (ns->regs.count == ns->regs.num) {
NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
switch_state(ns);
}
} else {
/*
* The byte written is an input data.
*/
/* Check that chip is expecting data input */
if (!(ns->state & STATE_DATAIN_MASK)) {
NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
"switch to %s\n", (uint)byte,
get_state_name(ns->state), get_state_name(STATE_READY));
switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
/* Check if this is expected byte */
if (ns->regs.count == ns->regs.num) {
NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
ns->regs.num);
return;
}
if (ns->busw == 8) {
ns->buf.byte[ns->regs.count] = byte;
ns->regs.count += 1;
} else {
ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
ns->regs.count += 2;
}
}
return;
}
static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
{
struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
ns->lines.ce = bitmask & NAND_NCE ? 1 : 0;
if (cmd != NAND_CMD_NONE)
ns_nand_write_byte(mtd, cmd);
}
static int ns_device_ready(struct mtd_info *mtd)
{
NS_DBG("device_ready\n");
return 1;
}
static uint16_t ns_nand_read_word(struct mtd_info *mtd)
{
struct nand_chip *chip = (struct nand_chip *)mtd->priv;
NS_DBG("read_word\n");
return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8);
}
static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
/* Check that chip is expecting data input */
if (!(ns->state & STATE_DATAIN_MASK)) {
NS_ERR("write_buf: data input isn't expected, state is %s, "
"switch to STATE_READY\n", get_state_name(ns->state));
switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
/* Check if these are expected bytes */
if (ns->regs.count + len > ns->regs.num) {
NS_ERR("write_buf: too many input bytes\n");
switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
memcpy(ns->buf.byte + ns->regs.count, buf, len);
ns->regs.count += len;
if (ns->regs.count == ns->regs.num) {
NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
}
}
static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
/* Sanity and correctness checks */
if (!ns->lines.ce) {
NS_ERR("read_buf: chip is disabled\n");
return;
}
if (ns->lines.ale || ns->lines.cle) {
NS_ERR("read_buf: ALE or CLE pin is high\n");
return;
}
if (!(ns->state & STATE_DATAOUT_MASK)) {
NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
get_state_name(ns->state));
return;
}
if (NS_STATE(ns->state) != STATE_DATAOUT) {
int i;
for (i = 0; i < len; i++)
buf[i] = ((struct nand_chip *)mtd->priv)->read_byte(mtd);
return;
}
/* Check if these are expected bytes */
if (ns->regs.count + len > ns->regs.num) {
NS_ERR("read_buf: too many bytes to read\n");
switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
memcpy(buf, ns->buf.byte + ns->regs.count, len);
ns->regs.count += len;
if (ns->regs.count == ns->regs.num) {
if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
ns->regs.count = 0;
if (ns->regs.row + 1 < ns->geom.pgnum)
ns->regs.row += 1;
NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
do_state_action(ns, ACTION_CPY);
}
else if (NS_STATE(ns->nxstate) == STATE_READY)
switch_state(ns);
}
return;
}
static int ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
ns_nand_read_buf(mtd, (u_char *)&ns_verify_buf[0], len);
if (!memcmp(buf, &ns_verify_buf[0], len)) {
NS_DBG("verify_buf: the buffer is OK\n");
return 0;
} else {
NS_DBG("verify_buf: the buffer is wrong\n");
return -EFAULT;
}
}
/*
* Module initialization function
*/
static int __init ns_init_module(void)
{
struct nand_chip *chip;
struct nandsim *nand;
int retval = -ENOMEM, i;
if (bus_width != 8 && bus_width != 16) {
NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
return -EINVAL;
}
/* Allocate and initialize mtd_info, nand_chip and nandsim structures */
nsmtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip)
+ sizeof(struct nandsim), GFP_KERNEL);
if (!nsmtd) {
NS_ERR("unable to allocate core structures.\n");
return -ENOMEM;
}
chip = (struct nand_chip *)(nsmtd + 1);
nsmtd->priv = (void *)chip;
nand = (struct nandsim *)(chip + 1);
chip->priv = (void *)nand;
/*
* Register simulator's callbacks.
*/
chip->cmd_ctrl = ns_hwcontrol;
chip->read_byte = ns_nand_read_byte;
chip->dev_ready = ns_device_ready;
chip->write_buf = ns_nand_write_buf;
chip->read_buf = ns_nand_read_buf;
chip->verify_buf = ns_nand_verify_buf;
chip->read_word = ns_nand_read_word;
chip->ecc.mode = NAND_ECC_SOFT;
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
/* and 'badblocks' parameters to work */
chip->options |= NAND_SKIP_BBTSCAN;
switch (bbt) {
case 2:
chip->options |= NAND_USE_FLASH_BBT_NO_OOB;
case 1:
chip->options |= NAND_USE_FLASH_BBT;
case 0:
break;
default:
NS_ERR("bbt has to be 0..2\n");
retval = -EINVAL;
goto error;
}
/*
* Perform minimum nandsim structure initialization to handle
* the initial ID read command correctly
*/
if (third_id_byte != 0xFF || fourth_id_byte != 0xFF)
nand->geom.idbytes = 4;
else
nand->geom.idbytes = 2;
nand->regs.status = NS_STATUS_OK(nand);
nand->nxstate = STATE_UNKNOWN;
nand->options |= OPT_PAGE256; /* temporary value */
nand->ids[0] = first_id_byte;
nand->ids[1] = second_id_byte;
nand->ids[2] = third_id_byte;
nand->ids[3] = fourth_id_byte;
if (bus_width == 16) {
nand->busw = 16;
chip->options |= NAND_BUSWIDTH_16;
}
nsmtd->owner = THIS_MODULE;
if ((retval = parse_weakblocks()) != 0)
goto error;
if ((retval = parse_weakpages()) != 0)
goto error;
if ((retval = parse_gravepages()) != 0)
goto error;
retval = nand_scan_ident(nsmtd, 1, NULL);
if (retval) {
NS_ERR("cannot scan NAND Simulator device\n");
if (retval > 0)
retval = -ENXIO;
goto error;
}
if (bch) {
unsigned int eccsteps, eccbytes;
if (!mtd_nand_has_bch()) {
NS_ERR("BCH ECC support is disabled\n");
retval = -EINVAL;
goto error;
}
/* use 512-byte ecc blocks */
eccsteps = nsmtd->writesize/512;
eccbytes = (bch*13+7)/8;
/* do not bother supporting small page devices */
if ((nsmtd->oobsize < 64) || !eccsteps) {
NS_ERR("bch not available on small page devices\n");
retval = -EINVAL;
goto error;
}
if ((eccbytes*eccsteps+2) > nsmtd->oobsize) {
NS_ERR("invalid bch value %u\n", bch);
retval = -EINVAL;
goto error;
}
chip->ecc.mode = NAND_ECC_SOFT_BCH;
chip->ecc.size = 512;
chip->ecc.bytes = eccbytes;
NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
}
retval = nand_scan_tail(nsmtd);
if (retval) {
NS_ERR("can't register NAND Simulator\n");
if (retval > 0)
retval = -ENXIO;
goto error;
}
if (overridesize) {
uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
if (new_size >> overridesize != nsmtd->erasesize) {
NS_ERR("overridesize is too big\n");
retval = -EINVAL;
goto err_exit;
}
/* N.B. This relies on nand_scan not doing anything with the size before we change it */
nsmtd->size = new_size;
chip->chipsize = new_size;
chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
}
if ((retval = setup_wear_reporting(nsmtd)) != 0)
goto err_exit;
if ((retval = init_nandsim(nsmtd)) != 0)
goto err_exit;
if ((retval = nand_default_bbt(nsmtd)) != 0)
goto err_exit;
if ((retval = parse_badblocks(nand, nsmtd)) != 0)
goto err_exit;
/* Register NAND partitions */
retval = mtd_device_register(nsmtd, &nand->partitions[0],
nand->nbparts);
if (retval != 0)
goto err_exit;
return 0;
err_exit:
free_nandsim(nand);
nand_release(nsmtd);
for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
kfree(nand->partitions[i].name);
error:
kfree(nsmtd);
free_lists();
return retval;
}
module_init(ns_init_module);
/*
* Module clean-up function
*/
static void __exit ns_cleanup_module(void)
{
struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
int i;
free_nandsim(ns); /* Free nandsim private resources */
nand_release(nsmtd); /* Unregister driver */
for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
kfree(ns->partitions[i].name);
kfree(nsmtd); /* Free other structures */
free_lists();
}
module_exit(ns_cleanup_module);
MODULE_LICENSE ("GPL");
MODULE_AUTHOR ("Artem B. Bityuckiy");
MODULE_DESCRIPTION ("The NAND flash simulator");
| gpl-2.0 |
droidroidz/USCC_R970_kernel_KK | kernel/power/snapshot.c | 1701 | 61949 | /*
* linux/kernel/power/snapshot.c
*
* This file provides system snapshot/restore functionality for swsusp.
*
* Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
*
* This file is released under the GPLv2.
*
*/
#include <linux/version.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/suspend.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/syscalls.h>
#include <linux/console.h>
#include <linux/highmem.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
#include "power.h"
static int swsusp_page_is_free(struct page *);
static void swsusp_set_page_forbidden(struct page *);
static void swsusp_unset_page_forbidden(struct page *);
/*
* Number of bytes to reserve for memory allocations made by device drivers
* from their ->freeze() and ->freeze_noirq() callbacks so that they don't
* cause image creation to fail (tunable via /sys/power/reserved_size).
*/
unsigned long reserved_size;
void __init hibernate_reserved_size_init(void)
{
reserved_size = SPARE_PAGES * PAGE_SIZE;
}
/*
* Preferred image size in bytes (tunable via /sys/power/image_size).
* When it is set to N, swsusp will do its best to ensure the image
* size will not exceed N bytes, but if that is impossible, it will
* try to create the smallest image possible.
*/
unsigned long image_size;
void __init hibernate_image_size_init(void)
{
image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
}
/* List of PBEs needed for restoring the pages that were allocated before
* the suspend and included in the suspend image, but have also been
* allocated by the "resume" kernel, so their contents cannot be written
* directly to their "original" page frames.
*/
struct pbe *restore_pblist;
/* Pointer to an auxiliary buffer (1 page) */
static void *buffer;
/**
* @safe_needed - on resume, for storing the PBE list and the image,
* we can only use memory pages that do not conflict with the pages
* used before suspend. The unsafe pages have PageNosaveFree set
* and we count them using unsafe_pages.
*
* Each allocated image page is marked as PageNosave and PageNosaveFree
* so that swsusp_free() can release it.
*/
#define PG_ANY 0
#define PG_SAFE 1
#define PG_UNSAFE_CLEAR 1
#define PG_UNSAFE_KEEP 0
static unsigned int allocated_unsafe_pages;
static void *get_image_page(gfp_t gfp_mask, int safe_needed)
{
void *res;
res = (void *)get_zeroed_page(gfp_mask);
if (safe_needed)
while (res && swsusp_page_is_free(virt_to_page(res))) {
/* The page is unsafe, mark it for swsusp_free() */
swsusp_set_page_forbidden(virt_to_page(res));
allocated_unsafe_pages++;
res = (void *)get_zeroed_page(gfp_mask);
}
if (res) {
swsusp_set_page_forbidden(virt_to_page(res));
swsusp_set_page_free(virt_to_page(res));
}
return res;
}
unsigned long get_safe_page(gfp_t gfp_mask)
{
return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
}
static struct page *alloc_image_page(gfp_t gfp_mask)
{
struct page *page;
page = alloc_page(gfp_mask);
if (page) {
swsusp_set_page_forbidden(page);
swsusp_set_page_free(page);
}
return page;
}
/**
* free_image_page - free page represented by @addr, allocated with
* get_image_page (page flags set by it must be cleared)
*/
static inline void free_image_page(void *addr, int clear_nosave_free)
{
struct page *page;
BUG_ON(!virt_addr_valid(addr));
page = virt_to_page(addr);
swsusp_unset_page_forbidden(page);
if (clear_nosave_free)
swsusp_unset_page_free(page);
__free_page(page);
}
/* struct linked_page is used to build chains of pages */
#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
struct linked_page {
struct linked_page *next;
char data[LINKED_PAGE_DATA_SIZE];
} __attribute__((packed));
static inline void
free_list_of_pages(struct linked_page *list, int clear_page_nosave)
{
while (list) {
struct linked_page *lp = list->next;
free_image_page(list, clear_page_nosave);
list = lp;
}
}
/**
* struct chain_allocator is used for allocating small objects out of
* a linked list of pages called 'the chain'.
*
* The chain grows each time when there is no room for a new object in
* the current page. The allocated objects cannot be freed individually.
* It is only possible to free them all at once, by freeing the entire
* chain.
*
* NOTE: The chain allocator may be inefficient if the allocated objects
* are not much smaller than PAGE_SIZE.
*/
struct chain_allocator {
struct linked_page *chain; /* the chain */
unsigned int used_space; /* total size of objects allocated out
* of the current page
*/
gfp_t gfp_mask; /* mask for allocating pages */
int safe_needed; /* if set, only "safe" pages are allocated */
};
static void
chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
{
ca->chain = NULL;
ca->used_space = LINKED_PAGE_DATA_SIZE;
ca->gfp_mask = gfp_mask;
ca->safe_needed = safe_needed;
}
static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
{
void *ret;
if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
struct linked_page *lp;
lp = get_image_page(ca->gfp_mask, ca->safe_needed);
if (!lp)
return NULL;
lp->next = ca->chain;
ca->chain = lp;
ca->used_space = 0;
}
ret = ca->chain->data + ca->used_space;
ca->used_space += size;
return ret;
}
/**
* Data types related to memory bitmaps.
*
* Memory bitmap is a structure consiting of many linked lists of
* objects. The main list's elements are of type struct zone_bitmap
* and each of them corresonds to one zone. For each zone bitmap
* object there is a list of objects of type struct bm_block that
* represent each blocks of bitmap in which information is stored.
*
* struct memory_bitmap contains a pointer to the main list of zone
* bitmap objects, a struct bm_position used for browsing the bitmap,
* and a pointer to the list of pages used for allocating all of the
* zone bitmap objects and bitmap block objects.
*
* NOTE: It has to be possible to lay out the bitmap in memory
* using only allocations of order 0. Additionally, the bitmap is
* designed to work with arbitrary number of zones (this is over the
* top for now, but let's avoid making unnecessary assumptions ;-).
*
* struct zone_bitmap contains a pointer to a list of bitmap block
* objects and a pointer to the bitmap block object that has been
* most recently used for setting bits. Additionally, it contains the
* pfns that correspond to the start and end of the represented zone.
*
* struct bm_block contains a pointer to the memory page in which
* information is stored (in the form of a block of bitmap)
* It also contains the pfns that correspond to the start and end of
* the represented memory area.
*/
#define BM_END_OF_MAP (~0UL)
#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
struct bm_block {
struct list_head hook; /* hook into a list of bitmap blocks */
unsigned long start_pfn; /* pfn represented by the first bit */
unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
unsigned long *data; /* bitmap representing pages */
};
static inline unsigned long bm_block_bits(struct bm_block *bb)
{
return bb->end_pfn - bb->start_pfn;
}
/* strcut bm_position is used for browsing memory bitmaps */
struct bm_position {
struct bm_block *block;
int bit;
};
struct memory_bitmap {
struct list_head blocks; /* list of bitmap blocks */
struct linked_page *p_list; /* list of pages used to store zone
* bitmap objects and bitmap block
* objects
*/
struct bm_position cur; /* most recently used bit position */
};
/* Functions that operate on memory bitmaps */
static void memory_bm_position_reset(struct memory_bitmap *bm)
{
bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
bm->cur.bit = 0;
}
static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
/**
* create_bm_block_list - create a list of block bitmap objects
* @pages - number of pages to track
* @list - list to put the allocated blocks into
* @ca - chain allocator to be used for allocating memory
*/
static int create_bm_block_list(unsigned long pages,
struct list_head *list,
struct chain_allocator *ca)
{
unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
while (nr_blocks-- > 0) {
struct bm_block *bb;
bb = chain_alloc(ca, sizeof(struct bm_block));
if (!bb)
return -ENOMEM;
list_add(&bb->hook, list);
}
return 0;
}
struct mem_extent {
struct list_head hook;
unsigned long start;
unsigned long end;
};
/**
* free_mem_extents - free a list of memory extents
* @list - list of extents to empty
*/
static void free_mem_extents(struct list_head *list)
{
struct mem_extent *ext, *aux;
list_for_each_entry_safe(ext, aux, list, hook) {
list_del(&ext->hook);
kfree(ext);
}
}
/**
* create_mem_extents - create a list of memory extents representing
* contiguous ranges of PFNs
* @list - list to put the extents into
* @gfp_mask - mask to use for memory allocations
*/
static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
{
struct zone *zone;
INIT_LIST_HEAD(list);
for_each_populated_zone(zone) {
unsigned long zone_start, zone_end;
struct mem_extent *ext, *cur, *aux;
zone_start = zone->zone_start_pfn;
zone_end = zone->zone_start_pfn + zone->spanned_pages;
list_for_each_entry(ext, list, hook)
if (zone_start <= ext->end)
break;
if (&ext->hook == list || zone_end < ext->start) {
/* New extent is necessary */
struct mem_extent *new_ext;
new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
if (!new_ext) {
free_mem_extents(list);
return -ENOMEM;
}
new_ext->start = zone_start;
new_ext->end = zone_end;
list_add_tail(&new_ext->hook, &ext->hook);
continue;
}
/* Merge this zone's range of PFNs with the existing one */
if (zone_start < ext->start)
ext->start = zone_start;
if (zone_end > ext->end)
ext->end = zone_end;
/* More merging may be possible */
cur = ext;
list_for_each_entry_safe_continue(cur, aux, list, hook) {
if (zone_end < cur->start)
break;
if (zone_end < cur->end)
ext->end = cur->end;
list_del(&cur->hook);
kfree(cur);
}
}
return 0;
}
/**
* memory_bm_create - allocate memory for a memory bitmap
*/
static int
memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
{
struct chain_allocator ca;
struct list_head mem_extents;
struct mem_extent *ext;
int error;
chain_init(&ca, gfp_mask, safe_needed);
INIT_LIST_HEAD(&bm->blocks);
error = create_mem_extents(&mem_extents, gfp_mask);
if (error)
return error;
list_for_each_entry(ext, &mem_extents, hook) {
struct bm_block *bb;
unsigned long pfn = ext->start;
unsigned long pages = ext->end - ext->start;
bb = list_entry(bm->blocks.prev, struct bm_block, hook);
error = create_bm_block_list(pages, bm->blocks.prev, &ca);
if (error)
goto Error;
list_for_each_entry_continue(bb, &bm->blocks, hook) {
bb->data = get_image_page(gfp_mask, safe_needed);
if (!bb->data) {
error = -ENOMEM;
goto Error;
}
bb->start_pfn = pfn;
if (pages >= BM_BITS_PER_BLOCK) {
pfn += BM_BITS_PER_BLOCK;
pages -= BM_BITS_PER_BLOCK;
} else {
/* This is executed only once in the loop */
pfn += pages;
}
bb->end_pfn = pfn;
}
}
bm->p_list = ca.chain;
memory_bm_position_reset(bm);
Exit:
free_mem_extents(&mem_extents);
return error;
Error:
bm->p_list = ca.chain;
memory_bm_free(bm, PG_UNSAFE_CLEAR);
goto Exit;
}
/**
* memory_bm_free - free memory occupied by the memory bitmap @bm
*/
static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
{
struct bm_block *bb;
list_for_each_entry(bb, &bm->blocks, hook)
if (bb->data)
free_image_page(bb->data, clear_nosave_free);
free_list_of_pages(bm->p_list, clear_nosave_free);
INIT_LIST_HEAD(&bm->blocks);
}
/**
* memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
* to given pfn. The cur_zone_bm member of @bm and the cur_block member
* of @bm->cur_zone_bm are updated.
*/
static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
void **addr, unsigned int *bit_nr)
{
struct bm_block *bb;
/*
* Check if the pfn corresponds to the current bitmap block and find
* the block where it fits if this is not the case.
*/
bb = bm->cur.block;
if (pfn < bb->start_pfn)
list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
if (pfn >= bb->start_pfn)
break;
if (pfn >= bb->end_pfn)
list_for_each_entry_continue(bb, &bm->blocks, hook)
if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
break;
if (&bb->hook == &bm->blocks)
return -EFAULT;
/* The block has been found */
bm->cur.block = bb;
pfn -= bb->start_pfn;
bm->cur.bit = pfn + 1;
*bit_nr = pfn;
*addr = bb->data;
return 0;
}
static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
unsigned int bit;
int error;
error = memory_bm_find_bit(bm, pfn, &addr, &bit);
BUG_ON(error);
set_bit(bit, addr);
}
static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
unsigned int bit;
int error;
error = memory_bm_find_bit(bm, pfn, &addr, &bit);
if (!error)
set_bit(bit, addr);
return error;
}
static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
unsigned int bit;
int error;
error = memory_bm_find_bit(bm, pfn, &addr, &bit);
BUG_ON(error);
clear_bit(bit, addr);
}
static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
unsigned int bit;
int error;
error = memory_bm_find_bit(bm, pfn, &addr, &bit);
BUG_ON(error);
return test_bit(bit, addr);
}
static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
unsigned int bit;
return !memory_bm_find_bit(bm, pfn, &addr, &bit);
}
/**
* memory_bm_next_pfn - find the pfn that corresponds to the next set bit
* in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
* returned.
*
* It is required to run memory_bm_position_reset() before the first call to
* this function.
*/
static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
{
struct bm_block *bb;
int bit;
bb = bm->cur.block;
do {
bit = bm->cur.bit;
bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
if (bit < bm_block_bits(bb))
goto Return_pfn;
bb = list_entry(bb->hook.next, struct bm_block, hook);
bm->cur.block = bb;
bm->cur.bit = 0;
} while (&bb->hook != &bm->blocks);
memory_bm_position_reset(bm);
return BM_END_OF_MAP;
Return_pfn:
bm->cur.bit = bit + 1;
return bb->start_pfn + bit;
}
/**
* This structure represents a range of page frames the contents of which
* should not be saved during the suspend.
*/
struct nosave_region {
struct list_head list;
unsigned long start_pfn;
unsigned long end_pfn;
};
static LIST_HEAD(nosave_regions);
/**
* register_nosave_region - register a range of page frames the contents
* of which should not be saved during the suspend (to be used in the early
* initialization code)
*/
void __init
__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
int use_kmalloc)
{
struct nosave_region *region;
if (start_pfn >= end_pfn)
return;
if (!list_empty(&nosave_regions)) {
/* Try to extend the previous region (they should be sorted) */
region = list_entry(nosave_regions.prev,
struct nosave_region, list);
if (region->end_pfn == start_pfn) {
region->end_pfn = end_pfn;
goto Report;
}
}
if (use_kmalloc) {
/* during init, this shouldn't fail */
region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
BUG_ON(!region);
} else
/* This allocation cannot fail */
region = alloc_bootmem(sizeof(struct nosave_region));
region->start_pfn = start_pfn;
region->end_pfn = end_pfn;
list_add_tail(®ion->list, &nosave_regions);
Report:
printk(KERN_INFO "PM: Registered nosave memory: %016lx - %016lx\n",
start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
}
/*
* Set bits in this map correspond to the page frames the contents of which
* should not be saved during the suspend.
*/
static struct memory_bitmap *forbidden_pages_map;
/* Set bits in this map correspond to free page frames. */
static struct memory_bitmap *free_pages_map;
/*
* Each page frame allocated for creating the image is marked by setting the
* corresponding bits in forbidden_pages_map and free_pages_map simultaneously
*/
void swsusp_set_page_free(struct page *page)
{
if (free_pages_map)
memory_bm_set_bit(free_pages_map, page_to_pfn(page));
}
static int swsusp_page_is_free(struct page *page)
{
return free_pages_map ?
memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
}
void swsusp_unset_page_free(struct page *page)
{
if (free_pages_map)
memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
}
static void swsusp_set_page_forbidden(struct page *page)
{
if (forbidden_pages_map)
memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
}
int swsusp_page_is_forbidden(struct page *page)
{
return forbidden_pages_map ?
memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
}
static void swsusp_unset_page_forbidden(struct page *page)
{
if (forbidden_pages_map)
memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
}
/**
* mark_nosave_pages - set bits corresponding to the page frames the
* contents of which should not be saved in a given bitmap.
*/
static void mark_nosave_pages(struct memory_bitmap *bm)
{
struct nosave_region *region;
if (list_empty(&nosave_regions))
return;
list_for_each_entry(region, &nosave_regions, list) {
unsigned long pfn;
pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
(unsigned long long) region->start_pfn << PAGE_SHIFT,
((unsigned long long) region->end_pfn << PAGE_SHIFT)
- 1);
for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
if (pfn_valid(pfn)) {
/*
* It is safe to ignore the result of
* mem_bm_set_bit_check() here, since we won't
* touch the PFNs for which the error is
* returned anyway.
*/
mem_bm_set_bit_check(bm, pfn);
}
}
}
/**
* create_basic_memory_bitmaps - create bitmaps needed for marking page
* frames that should not be saved and free page frames. The pointers
* forbidden_pages_map and free_pages_map are only modified if everything
* goes well, because we don't want the bits to be used before both bitmaps
* are set up.
*/
int create_basic_memory_bitmaps(void)
{
struct memory_bitmap *bm1, *bm2;
int error = 0;
BUG_ON(forbidden_pages_map || free_pages_map);
bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
if (!bm1)
return -ENOMEM;
error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
if (error)
goto Free_first_object;
bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
if (!bm2)
goto Free_first_bitmap;
error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
if (error)
goto Free_second_object;
forbidden_pages_map = bm1;
free_pages_map = bm2;
mark_nosave_pages(forbidden_pages_map);
pr_debug("PM: Basic memory bitmaps created\n");
return 0;
Free_second_object:
kfree(bm2);
Free_first_bitmap:
memory_bm_free(bm1, PG_UNSAFE_CLEAR);
Free_first_object:
kfree(bm1);
return -ENOMEM;
}
/**
* free_basic_memory_bitmaps - free memory bitmaps allocated by
* create_basic_memory_bitmaps(). The auxiliary pointers are necessary
* so that the bitmaps themselves are not referred to while they are being
* freed.
*/
void free_basic_memory_bitmaps(void)
{
struct memory_bitmap *bm1, *bm2;
BUG_ON(!(forbidden_pages_map && free_pages_map));
bm1 = forbidden_pages_map;
bm2 = free_pages_map;
forbidden_pages_map = NULL;
free_pages_map = NULL;
memory_bm_free(bm1, PG_UNSAFE_CLEAR);
kfree(bm1);
memory_bm_free(bm2, PG_UNSAFE_CLEAR);
kfree(bm2);
pr_debug("PM: Basic memory bitmaps freed\n");
}
/**
* snapshot_additional_pages - estimate the number of additional pages
* be needed for setting up the suspend image data structures for given
* zone (usually the returned value is greater than the exact number)
*/
unsigned int snapshot_additional_pages(struct zone *zone)
{
unsigned int res;
res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
res += DIV_ROUND_UP(res * sizeof(struct bm_block),
LINKED_PAGE_DATA_SIZE);
return 2 * res;
}
#ifdef CONFIG_HIGHMEM
/**
* count_free_highmem_pages - compute the total number of free highmem
* pages, system-wide.
*/
static unsigned int count_free_highmem_pages(void)
{
struct zone *zone;
unsigned int cnt = 0;
for_each_populated_zone(zone)
if (is_highmem(zone))
cnt += zone_page_state(zone, NR_FREE_PAGES);
return cnt;
}
/**
* saveable_highmem_page - Determine whether a highmem page should be
* included in the suspend image.
*
* We should save the page if it isn't Nosave or NosaveFree, or Reserved,
* and it isn't a part of a free chunk of pages.
*/
static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
{
struct page *page;
if (!pfn_valid(pfn))
return NULL;
page = pfn_to_page(pfn);
if (page_zone(page) != zone)
return NULL;
BUG_ON(!PageHighMem(page));
if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
PageReserved(page))
return NULL;
if (page_is_guard(page))
return NULL;
return page;
}
/**
* count_highmem_pages - compute the total number of saveable highmem
* pages.
*/
static unsigned int count_highmem_pages(void)
{
struct zone *zone;
unsigned int n = 0;
for_each_populated_zone(zone) {
unsigned long pfn, max_zone_pfn;
if (!is_highmem(zone))
continue;
mark_free_pages(zone);
max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (saveable_highmem_page(zone, pfn))
n++;
}
return n;
}
#else
static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
{
return NULL;
}
#endif /* CONFIG_HIGHMEM */
/**
* saveable_page - Determine whether a non-highmem page should be included
* in the suspend image.
*
* We should save the page if it isn't Nosave, and is not in the range
* of pages statically defined as 'unsaveable', and it isn't a part of
* a free chunk of pages.
*/
static struct page *saveable_page(struct zone *zone, unsigned long pfn)
{
struct page *page;
if (!pfn_valid(pfn))
return NULL;
page = pfn_to_page(pfn);
if (page_zone(page) != zone)
return NULL;
BUG_ON(PageHighMem(page));
if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
return NULL;
if (PageReserved(page)
&& (!kernel_page_present(page) || pfn_is_nosave(pfn)))
return NULL;
if (page_is_guard(page))
return NULL;
return page;
}
/**
* count_data_pages - compute the total number of saveable non-highmem
* pages.
*/
static unsigned int count_data_pages(void)
{
struct zone *zone;
unsigned long pfn, max_zone_pfn;
unsigned int n = 0;
for_each_populated_zone(zone) {
if (is_highmem(zone))
continue;
mark_free_pages(zone);
max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (saveable_page(zone, pfn))
n++;
}
return n;
}
/* This is needed, because copy_page and memcpy are not usable for copying
* task structs.
*/
static inline void do_copy_page(long *dst, long *src)
{
int n;
for (n = PAGE_SIZE / sizeof(long); n; n--)
*dst++ = *src++;
}
/**
* safe_copy_page - check if the page we are going to copy is marked as
* present in the kernel page tables (this always is the case if
* CONFIG_DEBUG_PAGEALLOC is not set and in that case
* kernel_page_present() always returns 'true').
*/
static void safe_copy_page(void *dst, struct page *s_page)
{
if (kernel_page_present(s_page)) {
do_copy_page(dst, page_address(s_page));
} else {
kernel_map_pages(s_page, 1, 1);
do_copy_page(dst, page_address(s_page));
kernel_map_pages(s_page, 1, 0);
}
}
#ifdef CONFIG_HIGHMEM
static inline struct page *
page_is_saveable(struct zone *zone, unsigned long pfn)
{
return is_highmem(zone) ?
saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
}
static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
{
struct page *s_page, *d_page;
void *src, *dst;
s_page = pfn_to_page(src_pfn);
d_page = pfn_to_page(dst_pfn);
if (PageHighMem(s_page)) {
src = kmap_atomic(s_page);
dst = kmap_atomic(d_page);
do_copy_page(dst, src);
kunmap_atomic(dst);
kunmap_atomic(src);
} else {
if (PageHighMem(d_page)) {
/* Page pointed to by src may contain some kernel
* data modified by kmap_atomic()
*/
safe_copy_page(buffer, s_page);
dst = kmap_atomic(d_page);
copy_page(dst, buffer);
kunmap_atomic(dst);
} else {
safe_copy_page(page_address(d_page), s_page);
}
}
}
#else
#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
{
safe_copy_page(page_address(pfn_to_page(dst_pfn)),
pfn_to_page(src_pfn));
}
#endif /* CONFIG_HIGHMEM */
static void
copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
{
struct zone *zone;
unsigned long pfn;
for_each_populated_zone(zone) {
unsigned long max_zone_pfn;
mark_free_pages(zone);
max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (page_is_saveable(zone, pfn))
memory_bm_set_bit(orig_bm, pfn);
}
memory_bm_position_reset(orig_bm);
memory_bm_position_reset(copy_bm);
for(;;) {
pfn = memory_bm_next_pfn(orig_bm);
if (unlikely(pfn == BM_END_OF_MAP))
break;
copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
}
}
/* Total number of image pages */
static unsigned int nr_copy_pages;
/* Number of pages needed for saving the original pfns of the image pages */
static unsigned int nr_meta_pages;
/*
* Numbers of normal and highmem page frames allocated for hibernation image
* before suspending devices.
*/
unsigned int alloc_normal, alloc_highmem;
/*
* Memory bitmap used for marking saveable pages (during hibernation) or
* hibernation image pages (during restore)
*/
static struct memory_bitmap orig_bm;
/*
* Memory bitmap used during hibernation for marking allocated page frames that
* will contain copies of saveable pages. During restore it is initially used
* for marking hibernation image pages, but then the set bits from it are
* duplicated in @orig_bm and it is released. On highmem systems it is next
* used for marking "safe" highmem pages, but it has to be reinitialized for
* this purpose.
*/
static struct memory_bitmap copy_bm;
/**
* swsusp_free - free pages allocated for the suspend.
*
* Suspend pages are alocated before the atomic copy is made, so we
* need to release them after the resume.
*/
void swsusp_free(void)
{
struct zone *zone;
unsigned long pfn, max_zone_pfn;
for_each_populated_zone(zone) {
max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
if (swsusp_page_is_forbidden(page) &&
swsusp_page_is_free(page)) {
swsusp_unset_page_forbidden(page);
swsusp_unset_page_free(page);
__free_page(page);
}
}
}
nr_copy_pages = 0;
nr_meta_pages = 0;
restore_pblist = NULL;
buffer = NULL;
alloc_normal = 0;
alloc_highmem = 0;
}
/* Helper functions used for the shrinking of memory. */
#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
/**
* preallocate_image_pages - Allocate a number of pages for hibernation image
* @nr_pages: Number of page frames to allocate.
* @mask: GFP flags to use for the allocation.
*
* Return value: Number of page frames actually allocated
*/
static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
{
unsigned long nr_alloc = 0;
while (nr_pages > 0) {
struct page *page;
page = alloc_image_page(mask);
if (!page)
break;
memory_bm_set_bit(©_bm, page_to_pfn(page));
if (PageHighMem(page))
alloc_highmem++;
else
alloc_normal++;
nr_pages--;
nr_alloc++;
}
return nr_alloc;
}
static unsigned long preallocate_image_memory(unsigned long nr_pages,
unsigned long avail_normal)
{
unsigned long alloc;
if (avail_normal <= alloc_normal)
return 0;
alloc = avail_normal - alloc_normal;
if (nr_pages < alloc)
alloc = nr_pages;
return preallocate_image_pages(alloc, GFP_IMAGE);
}
#ifdef CONFIG_HIGHMEM
static unsigned long preallocate_image_highmem(unsigned long nr_pages)
{
return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
}
/**
* __fraction - Compute (an approximation of) x * (multiplier / base)
*/
static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
{
x *= multiplier;
do_div(x, base);
return (unsigned long)x;
}
static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
unsigned long highmem,
unsigned long total)
{
unsigned long alloc = __fraction(nr_pages, highmem, total);
return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
}
#else /* CONFIG_HIGHMEM */
static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
{
return 0;
}
static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
unsigned long highmem,
unsigned long total)
{
return 0;
}
#endif /* CONFIG_HIGHMEM */
/**
* free_unnecessary_pages - Release preallocated pages not needed for the image
*/
static void free_unnecessary_pages(void)
{
unsigned long save, to_free_normal, to_free_highmem;
save = count_data_pages();
if (alloc_normal >= save) {
to_free_normal = alloc_normal - save;
save = 0;
} else {
to_free_normal = 0;
save -= alloc_normal;
}
save += count_highmem_pages();
if (alloc_highmem >= save) {
to_free_highmem = alloc_highmem - save;
} else {
to_free_highmem = 0;
save -= alloc_highmem;
if (to_free_normal > save)
to_free_normal -= save;
else
to_free_normal = 0;
}
memory_bm_position_reset(©_bm);
while (to_free_normal > 0 || to_free_highmem > 0) {
unsigned long pfn = memory_bm_next_pfn(©_bm);
struct page *page = pfn_to_page(pfn);
if (PageHighMem(page)) {
if (!to_free_highmem)
continue;
to_free_highmem--;
alloc_highmem--;
} else {
if (!to_free_normal)
continue;
to_free_normal--;
alloc_normal--;
}
memory_bm_clear_bit(©_bm, pfn);
swsusp_unset_page_forbidden(page);
swsusp_unset_page_free(page);
__free_page(page);
}
}
/**
* minimum_image_size - Estimate the minimum acceptable size of an image
* @saveable: Number of saveable pages in the system.
*
* We want to avoid attempting to free too much memory too hard, so estimate the
* minimum acceptable size of a hibernation image to use as the lower limit for
* preallocating memory.
*
* We assume that the minimum image size should be proportional to
*
* [number of saveable pages] - [number of pages that can be freed in theory]
*
* where the second term is the sum of (1) reclaimable slab pages, (2) active
* and (3) inactive anonymouns pages, (4) active and (5) inactive file pages,
* minus mapped file pages.
*/
static unsigned long minimum_image_size(unsigned long saveable)
{
unsigned long size;
size = global_page_state(NR_SLAB_RECLAIMABLE)
+ global_page_state(NR_ACTIVE_ANON)
+ global_page_state(NR_INACTIVE_ANON)
+ global_page_state(NR_ACTIVE_FILE)
+ global_page_state(NR_INACTIVE_FILE)
- global_page_state(NR_FILE_MAPPED);
return saveable <= size ? 0 : saveable - size;
}
/**
* hibernate_preallocate_memory - Preallocate memory for hibernation image
*
* To create a hibernation image it is necessary to make a copy of every page
* frame in use. We also need a number of page frames to be free during
* hibernation for allocations made while saving the image and for device
* drivers, in case they need to allocate memory from their hibernation
* callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
* estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
* /sys/power/reserved_size, respectively). To make this happen, we compute the
* total number of available page frames and allocate at least
*
* ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
* + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
*
* of them, which corresponds to the maximum size of a hibernation image.
*
* If image_size is set below the number following from the above formula,
* the preallocation of memory is continued until the total number of saveable
* pages in the system is below the requested image size or the minimum
* acceptable image size returned by minimum_image_size(), whichever is greater.
*/
int hibernate_preallocate_memory(void)
{
struct zone *zone;
unsigned long saveable, size, max_size, count, highmem, pages = 0;
unsigned long alloc, save_highmem, pages_highmem, avail_normal;
struct timeval start, stop;
int error;
printk(KERN_INFO "PM: Preallocating image memory... ");
do_gettimeofday(&start);
error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
if (error)
goto err_out;
error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
if (error)
goto err_out;
alloc_normal = 0;
alloc_highmem = 0;
/* Count the number of saveable data pages. */
save_highmem = count_highmem_pages();
saveable = count_data_pages();
/*
* Compute the total number of page frames we can use (count) and the
* number of pages needed for image metadata (size).
*/
count = saveable;
saveable += save_highmem;
highmem = save_highmem;
size = 0;
for_each_populated_zone(zone) {
size += snapshot_additional_pages(zone);
if (is_highmem(zone))
highmem += zone_page_state(zone, NR_FREE_PAGES);
else
count += zone_page_state(zone, NR_FREE_PAGES);
}
avail_normal = count;
count += highmem;
count -= totalreserve_pages;
/* Add number of pages required for page keys (s390 only). */
size += page_key_additional_pages(saveable);
/* Compute the maximum number of saveable pages to leave in memory. */
max_size = (count - (size + PAGES_FOR_IO)) / 2
- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
/* Compute the desired number of image pages specified by image_size. */
size = DIV_ROUND_UP(image_size, PAGE_SIZE);
if (size > max_size)
size = max_size;
/*
* If the desired number of image pages is at least as large as the
* current number of saveable pages in memory, allocate page frames for
* the image and we're done.
*/
if (size >= saveable) {
pages = preallocate_image_highmem(save_highmem);
pages += preallocate_image_memory(saveable - pages, avail_normal);
goto out;
}
/* Estimate the minimum size of the image. */
pages = minimum_image_size(saveable);
/*
* To avoid excessive pressure on the normal zone, leave room in it to
* accommodate an image of the minimum size (unless it's already too
* small, in which case don't preallocate pages from it at all).
*/
if (avail_normal > pages)
avail_normal -= pages;
else
avail_normal = 0;
if (size < pages)
size = min_t(unsigned long, pages, max_size);
/*
* Let the memory management subsystem know that we're going to need a
* large number of page frames to allocate and make it free some memory.
* NOTE: If this is not done, performance will be hurt badly in some
* test cases.
*/
shrink_all_memory(saveable - size);
/*
* The number of saveable pages in memory was too high, so apply some
* pressure to decrease it. First, make room for the largest possible
* image and fail if that doesn't work. Next, try to decrease the size
* of the image as much as indicated by 'size' using allocations from
* highmem and non-highmem zones separately.
*/
pages_highmem = preallocate_image_highmem(highmem / 2);
alloc = count - max_size;
if (alloc > pages_highmem)
alloc -= pages_highmem;
else
alloc = 0;
pages = preallocate_image_memory(alloc, avail_normal);
if (pages < alloc) {
/* We have exhausted non-highmem pages, try highmem. */
alloc -= pages;
pages += pages_highmem;
pages_highmem = preallocate_image_highmem(alloc);
if (pages_highmem < alloc)
goto err_out;
pages += pages_highmem;
/*
* size is the desired number of saveable pages to leave in
* memory, so try to preallocate (all memory - size) pages.
*/
alloc = (count - pages) - size;
pages += preallocate_image_highmem(alloc);
} else {
/*
* There are approximately max_size saveable pages at this point
* and we want to reduce this number down to size.
*/
alloc = max_size - size;
size = preallocate_highmem_fraction(alloc, highmem, count);
pages_highmem += size;
alloc -= size;
size = preallocate_image_memory(alloc, avail_normal);
pages_highmem += preallocate_image_highmem(alloc - size);
pages += pages_highmem + size;
}
/*
* We only need as many page frames for the image as there are saveable
* pages in memory, but we have allocated more. Release the excessive
* ones now.
*/
free_unnecessary_pages();
out:
do_gettimeofday(&stop);
printk(KERN_CONT "done (allocated %lu pages)\n", pages);
swsusp_show_speed(&start, &stop, pages, "Allocated");
return 0;
err_out:
printk(KERN_CONT "\n");
swsusp_free();
return -ENOMEM;
}
#ifdef CONFIG_HIGHMEM
/**
* count_pages_for_highmem - compute the number of non-highmem pages
* that will be necessary for creating copies of highmem pages.
*/
static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
{
unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
if (free_highmem >= nr_highmem)
nr_highmem = 0;
else
nr_highmem -= free_highmem;
return nr_highmem;
}
#else
static unsigned int
count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
#endif /* CONFIG_HIGHMEM */
/**
* enough_free_mem - Make sure we have enough free memory for the
* snapshot image.
*/
static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
{
struct zone *zone;
unsigned int free = alloc_normal;
for_each_populated_zone(zone)
if (!is_highmem(zone))
free += zone_page_state(zone, NR_FREE_PAGES);
nr_pages += count_pages_for_highmem(nr_highmem);
pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
nr_pages, PAGES_FOR_IO, free);
return free > nr_pages + PAGES_FOR_IO;
}
#ifdef CONFIG_HIGHMEM
/**
* get_highmem_buffer - if there are some highmem pages in the suspend
* image, we may need the buffer to copy them and/or load their data.
*/
static inline int get_highmem_buffer(int safe_needed)
{
buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
return buffer ? 0 : -ENOMEM;
}
/**
* alloc_highmem_image_pages - allocate some highmem pages for the image.
* Try to allocate as many pages as needed, but if the number of free
* highmem pages is lesser than that, allocate them all.
*/
static inline unsigned int
alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
{
unsigned int to_alloc = count_free_highmem_pages();
if (to_alloc > nr_highmem)
to_alloc = nr_highmem;
nr_highmem -= to_alloc;
while (to_alloc-- > 0) {
struct page *page;
page = alloc_image_page(__GFP_HIGHMEM);
memory_bm_set_bit(bm, page_to_pfn(page));
}
return nr_highmem;
}
#else
static inline int get_highmem_buffer(int safe_needed) { return 0; }
static inline unsigned int
alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
#endif /* CONFIG_HIGHMEM */
/**
* swsusp_alloc - allocate memory for the suspend image
*
* We first try to allocate as many highmem pages as there are
* saveable highmem pages in the system. If that fails, we allocate
* non-highmem pages for the copies of the remaining highmem ones.
*
* In this approach it is likely that the copies of highmem pages will
* also be located in the high memory, because of the way in which
* copy_data_pages() works.
*/
static int
swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
unsigned int nr_pages, unsigned int nr_highmem)
{
if (nr_highmem > 0) {
if (get_highmem_buffer(PG_ANY))
goto err_out;
if (nr_highmem > alloc_highmem) {
nr_highmem -= alloc_highmem;
nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
}
}
if (nr_pages > alloc_normal) {
nr_pages -= alloc_normal;
while (nr_pages-- > 0) {
struct page *page;
page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
if (!page)
goto err_out;
memory_bm_set_bit(copy_bm, page_to_pfn(page));
}
}
return 0;
err_out:
swsusp_free();
return -ENOMEM;
}
asmlinkage int swsusp_save(void)
{
unsigned int nr_pages, nr_highmem;
printk(KERN_INFO "PM: Creating hibernation image:\n");
drain_local_pages(NULL);
nr_pages = count_data_pages();
nr_highmem = count_highmem_pages();
printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
if (!enough_free_mem(nr_pages, nr_highmem)) {
printk(KERN_ERR "PM: Not enough free memory\n");
return -ENOMEM;
}
if (swsusp_alloc(&orig_bm, ©_bm, nr_pages, nr_highmem)) {
printk(KERN_ERR "PM: Memory allocation failed\n");
return -ENOMEM;
}
/* During allocating of suspend pagedir, new cold pages may appear.
* Kill them.
*/
drain_local_pages(NULL);
copy_data_pages(©_bm, &orig_bm);
/*
* End of critical section. From now on, we can write to memory,
* but we should not touch disk. This specially means we must _not_
* touch swap space! Except we must write out our image of course.
*/
nr_pages += nr_highmem;
nr_copy_pages = nr_pages;
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
nr_pages);
return 0;
}
#ifndef CONFIG_ARCH_HIBERNATION_HEADER
static int init_header_complete(struct swsusp_info *info)
{
memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
info->version_code = LINUX_VERSION_CODE;
return 0;
}
static char *check_image_kernel(struct swsusp_info *info)
{
if (info->version_code != LINUX_VERSION_CODE)
return "kernel version";
if (strcmp(info->uts.sysname,init_utsname()->sysname))
return "system type";
if (strcmp(info->uts.release,init_utsname()->release))
return "kernel release";
if (strcmp(info->uts.version,init_utsname()->version))
return "version";
if (strcmp(info->uts.machine,init_utsname()->machine))
return "machine";
return NULL;
}
#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
unsigned long snapshot_get_image_size(void)
{
return nr_copy_pages + nr_meta_pages + 1;
}
static int init_header(struct swsusp_info *info)
{
memset(info, 0, sizeof(struct swsusp_info));
info->num_physpages = num_physpages;
info->image_pages = nr_copy_pages;
info->pages = snapshot_get_image_size();
info->size = info->pages;
info->size <<= PAGE_SHIFT;
return init_header_complete(info);
}
/**
* pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
* are stored in the array @buf[] (1 page at a time)
*/
static inline void
pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
{
int j;
for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
buf[j] = memory_bm_next_pfn(bm);
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
/* Save page key for data page (s390 only). */
page_key_read(buf + j);
}
}
/**
* snapshot_read_next - used for reading the system memory snapshot.
*
* On the first call to it @handle should point to a zeroed
* snapshot_handle structure. The structure gets updated and a pointer
* to it should be passed to this function every next time.
*
* On success the function returns a positive number. Then, the caller
* is allowed to read up to the returned number of bytes from the memory
* location computed by the data_of() macro.
*
* The function returns 0 to indicate the end of data stream condition,
* and a negative number is returned on error. In such cases the
* structure pointed to by @handle is not updated and should not be used
* any more.
*/
int snapshot_read_next(struct snapshot_handle *handle)
{
if (handle->cur > nr_meta_pages + nr_copy_pages)
return 0;
if (!buffer) {
/* This makes the buffer be freed by swsusp_free() */
buffer = get_image_page(GFP_ATOMIC, PG_ANY);
if (!buffer)
return -ENOMEM;
}
if (!handle->cur) {
int error;
error = init_header((struct swsusp_info *)buffer);
if (error)
return error;
handle->buffer = buffer;
memory_bm_position_reset(&orig_bm);
memory_bm_position_reset(©_bm);
} else if (handle->cur <= nr_meta_pages) {
clear_page(buffer);
pack_pfns(buffer, &orig_bm);
} else {
struct page *page;
page = pfn_to_page(memory_bm_next_pfn(©_bm));
if (PageHighMem(page)) {
/* Highmem pages are copied to the buffer,
* because we can't return with a kmapped
* highmem page (we may not be called again).
*/
void *kaddr;
kaddr = kmap_atomic(page);
copy_page(buffer, kaddr);
kunmap_atomic(kaddr);
handle->buffer = buffer;
} else {
handle->buffer = page_address(page);
}
}
handle->cur++;
return PAGE_SIZE;
}
/**
* mark_unsafe_pages - mark the pages that cannot be used for storing
* the image during resume, because they conflict with the pages that
* had been used before suspend
*/
static int mark_unsafe_pages(struct memory_bitmap *bm)
{
struct zone *zone;
unsigned long pfn, max_zone_pfn;
/* Clear page flags */
for_each_populated_zone(zone) {
max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (pfn_valid(pfn))
swsusp_unset_page_free(pfn_to_page(pfn));
}
/* Mark pages that correspond to the "original" pfns as "unsafe" */
memory_bm_position_reset(bm);
do {
pfn = memory_bm_next_pfn(bm);
if (likely(pfn != BM_END_OF_MAP)) {
if (likely(pfn_valid(pfn)))
swsusp_set_page_free(pfn_to_page(pfn));
else
return -EFAULT;
}
} while (pfn != BM_END_OF_MAP);
allocated_unsafe_pages = 0;
return 0;
}
static void
duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
{
unsigned long pfn;
memory_bm_position_reset(src);
pfn = memory_bm_next_pfn(src);
while (pfn != BM_END_OF_MAP) {
memory_bm_set_bit(dst, pfn);
pfn = memory_bm_next_pfn(src);
}
}
static int check_header(struct swsusp_info *info)
{
char *reason;
reason = check_image_kernel(info);
if (!reason && info->num_physpages != num_physpages)
reason = "memory size";
if (reason) {
printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
return -EPERM;
}
return 0;
}
/**
* load header - check the image header and copy data from it
*/
static int
load_header(struct swsusp_info *info)
{
int error;
restore_pblist = NULL;
error = check_header(info);
if (!error) {
nr_copy_pages = info->image_pages;
nr_meta_pages = info->pages - info->image_pages - 1;
}
return error;
}
/**
* unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
* the corresponding bit in the memory bitmap @bm
*/
static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
{
int j;
for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
/* Extract and buffer page key for data page (s390 only). */
page_key_memorize(buf + j);
if (memory_bm_pfn_present(bm, buf[j]))
memory_bm_set_bit(bm, buf[j]);
else
return -EFAULT;
}
return 0;
}
/* List of "safe" pages that may be used to store data loaded from the suspend
* image
*/
static struct linked_page *safe_pages_list;
#ifdef CONFIG_HIGHMEM
/* struct highmem_pbe is used for creating the list of highmem pages that
* should be restored atomically during the resume from disk, because the page
* frames they have occupied before the suspend are in use.
*/
struct highmem_pbe {
struct page *copy_page; /* data is here now */
struct page *orig_page; /* data was here before the suspend */
struct highmem_pbe *next;
};
/* List of highmem PBEs needed for restoring the highmem pages that were
* allocated before the suspend and included in the suspend image, but have
* also been allocated by the "resume" kernel, so their contents cannot be
* written directly to their "original" page frames.
*/
static struct highmem_pbe *highmem_pblist;
/**
* count_highmem_image_pages - compute the number of highmem pages in the
* suspend image. The bits in the memory bitmap @bm that correspond to the
* image pages are assumed to be set.
*/
static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
{
unsigned long pfn;
unsigned int cnt = 0;
memory_bm_position_reset(bm);
pfn = memory_bm_next_pfn(bm);
while (pfn != BM_END_OF_MAP) {
if (PageHighMem(pfn_to_page(pfn)))
cnt++;
pfn = memory_bm_next_pfn(bm);
}
return cnt;
}
/**
* prepare_highmem_image - try to allocate as many highmem pages as
* there are highmem image pages (@nr_highmem_p points to the variable
* containing the number of highmem image pages). The pages that are
* "safe" (ie. will not be overwritten when the suspend image is
* restored) have the corresponding bits set in @bm (it must be
* unitialized).
*
* NOTE: This function should not be called if there are no highmem
* image pages.
*/
static unsigned int safe_highmem_pages;
static struct memory_bitmap *safe_highmem_bm;
static int
prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
{
unsigned int to_alloc;
if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
return -ENOMEM;
if (get_highmem_buffer(PG_SAFE))
return -ENOMEM;
to_alloc = count_free_highmem_pages();
if (to_alloc > *nr_highmem_p)
to_alloc = *nr_highmem_p;
else
*nr_highmem_p = to_alloc;
safe_highmem_pages = 0;
while (to_alloc-- > 0) {
struct page *page;
page = alloc_page(__GFP_HIGHMEM);
if (!swsusp_page_is_free(page)) {
/* The page is "safe", set its bit the bitmap */
memory_bm_set_bit(bm, page_to_pfn(page));
safe_highmem_pages++;
}
/* Mark the page as allocated */
swsusp_set_page_forbidden(page);
swsusp_set_page_free(page);
}
memory_bm_position_reset(bm);
safe_highmem_bm = bm;
return 0;
}
/**
* get_highmem_page_buffer - for given highmem image page find the buffer
* that suspend_write_next() should set for its caller to write to.
*
* If the page is to be saved to its "original" page frame or a copy of
* the page is to be made in the highmem, @buffer is returned. Otherwise,
* the copy of the page is to be made in normal memory, so the address of
* the copy is returned.
*
* If @buffer is returned, the caller of suspend_write_next() will write
* the page's contents to @buffer, so they will have to be copied to the
* right location on the next call to suspend_write_next() and it is done
* with the help of copy_last_highmem_page(). For this purpose, if
* @buffer is returned, @last_highmem page is set to the page to which
* the data will have to be copied from @buffer.
*/
static struct page *last_highmem_page;
static void *
get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
{
struct highmem_pbe *pbe;
void *kaddr;
if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
/* We have allocated the "original" page frame and we can
* use it directly to store the loaded page.
*/
last_highmem_page = page;
return buffer;
}
/* The "original" page frame has not been allocated and we have to
* use a "safe" page frame to store the loaded page.
*/
pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
if (!pbe) {
swsusp_free();
return ERR_PTR(-ENOMEM);
}
pbe->orig_page = page;
if (safe_highmem_pages > 0) {
struct page *tmp;
/* Copy of the page will be stored in high memory */
kaddr = buffer;
tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
safe_highmem_pages--;
last_highmem_page = tmp;
pbe->copy_page = tmp;
} else {
/* Copy of the page will be stored in normal memory */
kaddr = safe_pages_list;
safe_pages_list = safe_pages_list->next;
pbe->copy_page = virt_to_page(kaddr);
}
pbe->next = highmem_pblist;
highmem_pblist = pbe;
return kaddr;
}
/**
* copy_last_highmem_page - copy the contents of a highmem image from
* @buffer, where the caller of snapshot_write_next() has place them,
* to the right location represented by @last_highmem_page .
*/
static void copy_last_highmem_page(void)
{
if (last_highmem_page) {
void *dst;
dst = kmap_atomic(last_highmem_page);
copy_page(dst, buffer);
kunmap_atomic(dst);
last_highmem_page = NULL;
}
}
static inline int last_highmem_page_copied(void)
{
return !last_highmem_page;
}
static inline void free_highmem_data(void)
{
if (safe_highmem_bm)
memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
if (buffer)
free_image_page(buffer, PG_UNSAFE_CLEAR);
}
#else
static inline int get_safe_write_buffer(void) { return 0; }
static unsigned int
count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
static inline int
prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
{
return 0;
}
static inline void *
get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
{
return ERR_PTR(-EINVAL);
}
static inline void copy_last_highmem_page(void) {}
static inline int last_highmem_page_copied(void) { return 1; }
static inline void free_highmem_data(void) {}
#endif /* CONFIG_HIGHMEM */
/**
* prepare_image - use the memory bitmap @bm to mark the pages that will
* be overwritten in the process of restoring the system memory state
* from the suspend image ("unsafe" pages) and allocate memory for the
* image.
*
* The idea is to allocate a new memory bitmap first and then allocate
* as many pages as needed for the image data, but not to assign these
* pages to specific tasks initially. Instead, we just mark them as
* allocated and create a lists of "safe" pages that will be used
* later. On systems with high memory a list of "safe" highmem pages is
* also created.
*/
#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
static int
prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
{
unsigned int nr_pages, nr_highmem;
struct linked_page *sp_list, *lp;
int error;
/* If there is no highmem, the buffer will not be necessary */
free_image_page(buffer, PG_UNSAFE_CLEAR);
buffer = NULL;
nr_highmem = count_highmem_image_pages(bm);
error = mark_unsafe_pages(bm);
if (error)
goto Free;
error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
if (error)
goto Free;
duplicate_memory_bitmap(new_bm, bm);
memory_bm_free(bm, PG_UNSAFE_KEEP);
if (nr_highmem > 0) {
error = prepare_highmem_image(bm, &nr_highmem);
if (error)
goto Free;
}
/* Reserve some safe pages for potential later use.
*
* NOTE: This way we make sure there will be enough safe pages for the
* chain_alloc() in get_buffer(). It is a bit wasteful, but
* nr_copy_pages cannot be greater than 50% of the memory anyway.
*/
sp_list = NULL;
/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
while (nr_pages > 0) {
lp = get_image_page(GFP_ATOMIC, PG_SAFE);
if (!lp) {
error = -ENOMEM;
goto Free;
}
lp->next = sp_list;
sp_list = lp;
nr_pages--;
}
/* Preallocate memory for the image */
safe_pages_list = NULL;
nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
while (nr_pages > 0) {
lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
if (!lp) {
error = -ENOMEM;
goto Free;
}
if (!swsusp_page_is_free(virt_to_page(lp))) {
/* The page is "safe", add it to the list */
lp->next = safe_pages_list;
safe_pages_list = lp;
}
/* Mark the page as allocated */
swsusp_set_page_forbidden(virt_to_page(lp));
swsusp_set_page_free(virt_to_page(lp));
nr_pages--;
}
/* Free the reserved safe pages so that chain_alloc() can use them */
while (sp_list) {
lp = sp_list->next;
free_image_page(sp_list, PG_UNSAFE_CLEAR);
sp_list = lp;
}
return 0;
Free:
swsusp_free();
return error;
}
/**
* get_buffer - compute the address that snapshot_write_next() should
* set for its caller to write to.
*/
static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
{
struct pbe *pbe;
struct page *page;
unsigned long pfn = memory_bm_next_pfn(bm);
if (pfn == BM_END_OF_MAP)
return ERR_PTR(-EFAULT);
page = pfn_to_page(pfn);
if (PageHighMem(page))
return get_highmem_page_buffer(page, ca);
if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
/* We have allocated the "original" page frame and we can
* use it directly to store the loaded page.
*/
return page_address(page);
/* The "original" page frame has not been allocated and we have to
* use a "safe" page frame to store the loaded page.
*/
pbe = chain_alloc(ca, sizeof(struct pbe));
if (!pbe) {
swsusp_free();
return ERR_PTR(-ENOMEM);
}
pbe->orig_address = page_address(page);
pbe->address = safe_pages_list;
safe_pages_list = safe_pages_list->next;
pbe->next = restore_pblist;
restore_pblist = pbe;
return pbe->address;
}
/**
* snapshot_write_next - used for writing the system memory snapshot.
*
* On the first call to it @handle should point to a zeroed
* snapshot_handle structure. The structure gets updated and a pointer
* to it should be passed to this function every next time.
*
* On success the function returns a positive number. Then, the caller
* is allowed to write up to the returned number of bytes to the memory
* location computed by the data_of() macro.
*
* The function returns 0 to indicate the "end of file" condition,
* and a negative number is returned on error. In such cases the
* structure pointed to by @handle is not updated and should not be used
* any more.
*/
int snapshot_write_next(struct snapshot_handle *handle)
{
static struct chain_allocator ca;
int error = 0;
/* Check if we have already loaded the entire image */
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
return 0;
handle->sync_read = 1;
if (!handle->cur) {
if (!buffer)
/* This makes the buffer be freed by swsusp_free() */
buffer = get_image_page(GFP_ATOMIC, PG_ANY);
if (!buffer)
return -ENOMEM;
handle->buffer = buffer;
} else if (handle->cur == 1) {
error = load_header(buffer);
if (error)
return error;
error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
if (error)
return error;
/* Allocate buffer for page keys. */
error = page_key_alloc(nr_copy_pages);
if (error)
return error;
} else if (handle->cur <= nr_meta_pages + 1) {
error = unpack_orig_pfns(buffer, ©_bm);
if (error)
return error;
if (handle->cur == nr_meta_pages + 1) {
error = prepare_image(&orig_bm, ©_bm);
if (error)
return error;
chain_init(&ca, GFP_ATOMIC, PG_SAFE);
memory_bm_position_reset(&orig_bm);
restore_pblist = NULL;
handle->buffer = get_buffer(&orig_bm, &ca);
handle->sync_read = 0;
if (IS_ERR(handle->buffer))
return PTR_ERR(handle->buffer);
}
} else {
copy_last_highmem_page();
/* Restore page key for data page (s390 only). */
page_key_write(handle->buffer);
handle->buffer = get_buffer(&orig_bm, &ca);
if (IS_ERR(handle->buffer))
return PTR_ERR(handle->buffer);
if (handle->buffer != buffer)
handle->sync_read = 0;
}
handle->cur++;
return PAGE_SIZE;
}
/**
* snapshot_write_finalize - must be called after the last call to
* snapshot_write_next() in case the last page in the image happens
* to be a highmem page and its contents should be stored in the
* highmem. Additionally, it releases the memory that will not be
* used any more.
*/
void snapshot_write_finalize(struct snapshot_handle *handle)
{
copy_last_highmem_page();
/* Restore page key for data page (s390 only). */
page_key_write(handle->buffer);
page_key_free();
/* Free only if we have loaded the image entirely */
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
free_highmem_data();
}
}
int snapshot_image_loaded(struct snapshot_handle *handle)
{
return !(!nr_copy_pages || !last_highmem_page_copied() ||
handle->cur <= nr_meta_pages + nr_copy_pages);
}
#ifdef CONFIG_HIGHMEM
/* Assumes that @buf is ready and points to a "safe" page */
static inline void
swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
{
void *kaddr1, *kaddr2;
kaddr1 = kmap_atomic(p1);
kaddr2 = kmap_atomic(p2);
copy_page(buf, kaddr1);
copy_page(kaddr1, kaddr2);
copy_page(kaddr2, buf);
kunmap_atomic(kaddr2);
kunmap_atomic(kaddr1);
}
/**
* restore_highmem - for each highmem page that was allocated before
* the suspend and included in the suspend image, and also has been
* allocated by the "resume" kernel swap its current (ie. "before
* resume") contents with the previous (ie. "before suspend") one.
*
* If the resume eventually fails, we can call this function once
* again and restore the "before resume" highmem state.
*/
int restore_highmem(void)
{
struct highmem_pbe *pbe = highmem_pblist;
void *buf;
if (!pbe)
return 0;
buf = get_image_page(GFP_ATOMIC, PG_SAFE);
if (!buf)
return -ENOMEM;
while (pbe) {
swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
pbe = pbe->next;
}
free_image_page(buf, PG_UNSAFE_CLEAR);
return 0;
}
#endif /* CONFIG_HIGHMEM */
| gpl-2.0 |
mcgi5sr2/kernel_samsung_smdk4412 | drivers/gpu/drm/nouveau/nouveau_irq.c | 2213 | 3977 | /*
* Copyright (C) 2006 Ben Skeggs.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
/*
* Authors:
* Ben Skeggs <darktama@iinet.net.au>
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drm.h"
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_ramht.h"
#include "nouveau_util.h"
void
nouveau_irq_preinstall(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
/* Master disable */
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
INIT_LIST_HEAD(&dev_priv->vbl_waiting);
}
int
nouveau_irq_postinstall(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
/* Master enable */
nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
if (dev_priv->msi_enabled)
nv_wr08(dev, 0x00088068, 0xff);
return 0;
}
void
nouveau_irq_uninstall(struct drm_device *dev)
{
/* Master disable */
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
}
irqreturn_t
nouveau_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned long flags;
u32 stat;
int i;
stat = nv_rd32(dev, NV03_PMC_INTR_0);
if (!stat)
return IRQ_NONE;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
for (i = 0; i < 32 && stat; i++) {
if (!(stat & (1 << i)) || !dev_priv->irq_handler[i])
continue;
dev_priv->irq_handler[i](dev);
stat &= ~(1 << i);
}
if (dev_priv->msi_enabled)
nv_wr08(dev, 0x00088068, 0xff);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
if (stat && nouveau_ratelimit())
NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat);
return IRQ_HANDLED;
}
int
nouveau_irq_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
ret = pci_enable_msi(dev->pdev);
if (ret == 0) {
NV_INFO(dev, "enabled MSI\n");
dev_priv->msi_enabled = true;
}
}
return drm_irq_install(dev);
}
void
nouveau_irq_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
drm_irq_uninstall(dev);
if (dev_priv->msi_enabled)
pci_disable_msi(dev->pdev);
}
void
nouveau_irq_register(struct drm_device *dev, int status_bit,
void (*handler)(struct drm_device *))
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
dev_priv->irq_handler[status_bit] = handler;
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
void
nouveau_irq_unregister(struct drm_device *dev, int status_bit)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
dev_priv->irq_handler[status_bit] = NULL;
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
| gpl-2.0 |
choco81/0E_kernel | arch/tile/kernel/process.c | 2725 | 21750 | /*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#include <linux/sched.h>
#include <linux/preempt.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/kprobes.h>
#include <linux/elfcore.h>
#include <linux/tick.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/compat.h>
#include <linux/hardirq.h>
#include <linux/syscalls.h>
#include <linux/kernel.h>
#include <linux/tracehook.h>
#include <linux/signal.h>
#include <asm/system.h>
#include <asm/stack.h>
#include <asm/homecache.h>
#include <asm/syscalls.h>
#include <asm/traps.h>
#ifdef CONFIG_HARDWALL
#include <asm/hardwall.h>
#endif
#include <arch/chip.h>
#include <arch/abi.h>
/*
* Use the (x86) "idle=poll" option to prefer low latency when leaving the
* idle loop over low power while in the idle loop, e.g. if we have
* one thread per core and we want to get threads out of futex waits fast.
*/
static int no_idle_nap;
static int __init idle_setup(char *str)
{
if (!str)
return -EINVAL;
if (!strcmp(str, "poll")) {
pr_info("using polling idle threads.\n");
no_idle_nap = 1;
} else if (!strcmp(str, "halt"))
no_idle_nap = 0;
else
return -1;
return 0;
}
early_param("idle", idle_setup);
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
* somebody to say that they'd like to reschedule)
*/
void cpu_idle(void)
{
int cpu = smp_processor_id();
current_thread_info()->status |= TS_POLLING;
if (no_idle_nap) {
while (1) {
while (!need_resched())
cpu_relax();
schedule();
}
}
/* endless idle loop with no priority at all */
while (1) {
tick_nohz_stop_sched_tick(1);
while (!need_resched()) {
if (cpu_is_offline(cpu))
BUG(); /* no HOTPLUG_CPU */
local_irq_disable();
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we
* test NEED_RESCHED:
*/
smp_mb();
if (!need_resched())
_cpu_idle();
else
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
}
tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
schedule();
preempt_disable();
}
}
struct thread_info *alloc_thread_info_node(struct task_struct *task, int node)
{
struct page *page;
gfp_t flags = GFP_KERNEL;
#ifdef CONFIG_DEBUG_STACK_USAGE
flags |= __GFP_ZERO;
#endif
page = alloc_pages_node(node, flags, THREAD_SIZE_ORDER);
if (!page)
return NULL;
return (struct thread_info *)page_address(page);
}
/*
* Free a thread_info node, and all of its derivative
* data structures.
*/
void free_thread_info(struct thread_info *info)
{
struct single_step_state *step_state = info->step_state;
#ifdef CONFIG_HARDWALL
/*
* We free a thread_info from the context of the task that has
* been scheduled next, so the original task is already dead.
* Calling deactivate here just frees up the data structures.
* If the task we're freeing held the last reference to a
* hardwall fd, it would have been released prior to this point
* anyway via exit_files(), and "hardwall" would be NULL by now.
*/
if (info->task->thread.hardwall)
hardwall_deactivate(info->task);
#endif
if (step_state) {
/*
* FIXME: we don't munmap step_state->buffer
* because the mm_struct for this process (info->task->mm)
* has already been zeroed in exit_mm(). Keeping a
* reference to it here seems like a bad move, so this
* means we can't munmap() the buffer, and therefore if we
* ptrace multiple threads in a process, we will slowly
* leak user memory. (Note that as soon as the last
* thread in a process dies, we will reclaim all user
* memory including single-step buffers in the usual way.)
* We should either assign a kernel VA to this buffer
* somehow, or we should associate the buffer(s) with the
* mm itself so we can clean them up that way.
*/
kfree(step_state);
}
free_pages((unsigned long)info, THREAD_SIZE_ORDER);
}
static void save_arch_state(struct thread_struct *t);
int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long stack_size,
struct task_struct *p, struct pt_regs *regs)
{
struct pt_regs *childregs;
unsigned long ksp;
/*
* When creating a new kernel thread we pass sp as zero.
* Assign it to a reasonable value now that we have the stack.
*/
if (sp == 0 && regs->ex1 == PL_ICS_EX1(KERNEL_PL, 0))
sp = KSTK_TOP(p);
/*
* Do not clone step state from the parent; each thread
* must make its own lazily.
*/
task_thread_info(p)->step_state = NULL;
/*
* Start new thread in ret_from_fork so it schedules properly
* and then return from interrupt like the parent.
*/
p->thread.pc = (unsigned long) ret_from_fork;
/* Save user stack top pointer so we can ID the stack vm area later. */
p->thread.usp0 = sp;
/* Record the pid of the process that created this one. */
p->thread.creator_pid = current->pid;
/*
* Copy the registers onto the kernel stack so the
* return-from-interrupt code will reload it into registers.
*/
childregs = task_pt_regs(p);
*childregs = *regs;
childregs->regs[0] = 0; /* return value is zero */
childregs->sp = sp; /* override with new user stack pointer */
/*
* If CLONE_SETTLS is set, set "tp" in the new task to "r4",
* which is passed in as arg #5 to sys_clone().
*/
if (clone_flags & CLONE_SETTLS)
childregs->tp = regs->regs[4];
/*
* Copy the callee-saved registers from the passed pt_regs struct
* into the context-switch callee-saved registers area.
* This way when we start the interrupt-return sequence, the
* callee-save registers will be correctly in registers, which
* is how we assume the compiler leaves them as we start doing
* the normal return-from-interrupt path after calling C code.
* Zero out the C ABI save area to mark the top of the stack.
*/
ksp = (unsigned long) childregs;
ksp -= C_ABI_SAVE_AREA_SIZE; /* interrupt-entry save area */
((long *)ksp)[0] = ((long *)ksp)[1] = 0;
ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long);
memcpy((void *)ksp, ®s->regs[CALLEE_SAVED_FIRST_REG],
CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long));
ksp -= C_ABI_SAVE_AREA_SIZE; /* __switch_to() save area */
((long *)ksp)[0] = ((long *)ksp)[1] = 0;
p->thread.ksp = ksp;
#if CHIP_HAS_TILE_DMA()
/*
* No DMA in the new thread. We model this on the fact that
* fork() clears the pending signals, alarms, and aio for the child.
*/
memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state));
memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
#endif
#if CHIP_HAS_SN_PROC()
/* Likewise, the new thread is not running static processor code. */
p->thread.sn_proc_running = 0;
memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb));
#endif
#if CHIP_HAS_PROC_STATUS_SPR()
/* New thread has its miscellaneous processor state bits clear. */
p->thread.proc_status = 0;
#endif
#ifdef CONFIG_HARDWALL
/* New thread does not own any networks. */
p->thread.hardwall = NULL;
#endif
/*
* Start the new thread with the current architecture state
* (user interrupt masks, etc.).
*/
save_arch_state(&p->thread);
return 0;
}
/*
* Return "current" if it looks plausible, or else a pointer to a dummy.
* This can be helpful if we are just trying to emit a clean panic.
*/
struct task_struct *validate_current(void)
{
static struct task_struct corrupt = { .comm = "<corrupt>" };
struct task_struct *tsk = current;
if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
(void *)tsk > high_memory ||
((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
tsk = &corrupt;
}
return tsk;
}
/* Take and return the pointer to the previous task, for schedule_tail(). */
struct task_struct *sim_notify_fork(struct task_struct *prev)
{
struct task_struct *tsk = current;
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT |
(tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS));
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK |
(tsk->pid << _SIM_CONTROL_OPERATOR_BITS));
return prev;
}
int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
{
struct pt_regs *ptregs = task_pt_regs(tsk);
elf_core_copy_regs(regs, ptregs);
return 1;
}
#if CHIP_HAS_TILE_DMA()
/* Allow user processes to access the DMA SPRs */
void grant_dma_mpls(void)
{
#if CONFIG_KERNEL_PL == 2
__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
#else
__insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
#endif
}
/* Forbid user processes from accessing the DMA SPRs */
void restrict_dma_mpls(void)
{
#if CONFIG_KERNEL_PL == 2
__insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
#else
__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
#endif
}
/* Pause the DMA engine, then save off its state registers. */
static void save_tile_dma_state(struct tile_dma_state *dma)
{
unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS);
unsigned long post_suspend_state;
/* If we're running, suspend the engine. */
if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK)
__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
/*
* Wait for the engine to idle, then save regs. Note that we
* want to record the "running" bit from before suspension,
* and the "done" bit from after, so that we can properly
* distinguish a case where the user suspended the engine from
* the case where the kernel suspended as part of the context
* swap.
*/
do {
post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS);
} while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK);
dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR);
dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR);
dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR);
dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR);
dma->strides = __insn_mfspr(SPR_DMA_STRIDE);
dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE);
dma->byte = __insn_mfspr(SPR_DMA_BYTE);
dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) |
(post_suspend_state & SPR_DMA_STATUS__DONE_MASK);
}
/* Restart a DMA that was running before we were context-switched out. */
static void restore_tile_dma_state(struct thread_struct *t)
{
const struct tile_dma_state *dma = &t->tile_dma_state;
/*
* The only way to restore the done bit is to run a zero
* length transaction.
*/
if ((dma->status & SPR_DMA_STATUS__DONE_MASK) &&
!(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) {
__insn_mtspr(SPR_DMA_BYTE, 0);
__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
while (__insn_mfspr(SPR_DMA_USER_STATUS) &
SPR_DMA_STATUS__BUSY_MASK)
;
}
__insn_mtspr(SPR_DMA_SRC_ADDR, dma->src);
__insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk);
__insn_mtspr(SPR_DMA_DST_ADDR, dma->dest);
__insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk);
__insn_mtspr(SPR_DMA_STRIDE, dma->strides);
__insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size);
__insn_mtspr(SPR_DMA_BYTE, dma->byte);
/*
* Restart the engine if we were running and not done.
* Clear a pending async DMA fault that we were waiting on return
* to user space to execute, since we expect the DMA engine
* to regenerate those faults for us now. Note that we don't
* try to clear the TIF_ASYNC_TLB flag, since it's relatively
* harmless if set, and it covers both DMA and the SN processor.
*/
if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) {
t->dma_async_tlb.fault_num = 0;
__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
}
}
#endif
static void save_arch_state(struct thread_struct *t)
{
#if CHIP_HAS_SPLIT_INTR_MASK()
t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) |
((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32);
#else
t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0);
#endif
t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0);
t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1);
t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0);
t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1);
t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
#if CHIP_HAS_PROC_STATUS_SPR()
t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
#endif
#if !CHIP_HAS_FIXED_INTVEC_BASE()
t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
#endif
#if CHIP_HAS_TILE_RTF_HWM()
t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
#endif
#if CHIP_HAS_DSTREAM_PF()
t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
#endif
}
static void restore_arch_state(const struct thread_struct *t)
{
#if CHIP_HAS_SPLIT_INTR_MASK()
__insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask);
__insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32);
#else
__insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask);
#endif
__insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]);
__insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]);
__insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]);
__insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]);
__insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
__insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
__insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
#if CHIP_HAS_PROC_STATUS_SPR()
__insn_mtspr(SPR_PROC_STATUS, t->proc_status);
#endif
#if !CHIP_HAS_FIXED_INTVEC_BASE()
__insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
#endif
#if CHIP_HAS_TILE_RTF_HWM()
__insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
#endif
#if CHIP_HAS_DSTREAM_PF()
__insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
#endif
}
void _prepare_arch_switch(struct task_struct *next)
{
#if CHIP_HAS_SN_PROC()
int snctl;
#endif
#if CHIP_HAS_TILE_DMA()
struct tile_dma_state *dma = ¤t->thread.tile_dma_state;
if (dma->enabled)
save_tile_dma_state(dma);
#endif
#if CHIP_HAS_SN_PROC()
/*
* Suspend the static network processor if it was running.
* We do not suspend the fabric itself, just like we don't
* try to suspend the UDN.
*/
snctl = __insn_mfspr(SPR_SNCTL);
current->thread.sn_proc_running =
(snctl & SPR_SNCTL__FRZPROC_MASK) == 0;
if (current->thread.sn_proc_running)
__insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK);
#endif
}
struct task_struct *__sched _switch_to(struct task_struct *prev,
struct task_struct *next)
{
/* DMA state is already saved; save off other arch state. */
save_arch_state(&prev->thread);
#if CHIP_HAS_TILE_DMA()
/*
* Restore DMA in new task if desired.
* Note that it is only safe to restart here since interrupts
* are disabled, so we can't take any DMATLB miss or access
* interrupts before we have finished switching stacks.
*/
if (next->thread.tile_dma_state.enabled) {
restore_tile_dma_state(&next->thread);
grant_dma_mpls();
} else {
restrict_dma_mpls();
}
#endif
/* Restore other arch state. */
restore_arch_state(&next->thread);
#if CHIP_HAS_SN_PROC()
/*
* Restart static network processor in the new process
* if it was running before.
*/
if (next->thread.sn_proc_running) {
int snctl = __insn_mfspr(SPR_SNCTL);
__insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK);
}
#endif
#ifdef CONFIG_HARDWALL
/* Enable or disable access to the network registers appropriately. */
if (prev->thread.hardwall != NULL) {
if (next->thread.hardwall == NULL)
restrict_network_mpls();
} else if (next->thread.hardwall != NULL) {
grant_network_mpls();
}
#endif
/*
* Switch kernel SP, PC, and callee-saved registers.
* In the context of the new task, return the old task pointer
* (i.e. the task that actually called __switch_to).
* Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
*/
return __switch_to(prev, next, next_current_ksp0(next));
}
/*
* This routine is called on return from interrupt if any of the
* TIF_WORK_MASK flags are set in thread_info->flags. It is
* entered with interrupts disabled so we don't miss an event
* that modified the thread_info flags. If any flag is set, we
* handle it and return, and the calling assembly code will
* re-disable interrupts, reload the thread flags, and call back
* if more flags need to be handled.
*
* We return whether we need to check the thread_info flags again
* or not. Note that we don't clear TIF_SINGLESTEP here, so it's
* important that it be tested last, and then claim that we don't
* need to recheck the flags.
*/
int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
{
if (thread_info_flags & _TIF_NEED_RESCHED) {
schedule();
return 1;
}
#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
if (thread_info_flags & _TIF_ASYNC_TLB) {
do_async_page_fault(regs);
return 1;
}
#endif
if (thread_info_flags & _TIF_SIGPENDING) {
do_signal(regs);
return 1;
}
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
if (current->replacement_session_keyring)
key_replace_session_keyring();
return 1;
}
if (thread_info_flags & _TIF_SINGLESTEP) {
if ((regs->ex1 & SPR_EX_CONTEXT_1_1__PL_MASK) == 0)
single_step_once(regs);
return 0;
}
panic("work_pending: bad flags %#x\n", thread_info_flags);
}
/* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
void __user *, parent_tidptr, void __user *, child_tidptr,
struct pt_regs *, regs)
{
if (!newsp)
newsp = regs->sp;
return do_fork(clone_flags, newsp, regs, 0,
parent_tidptr, child_tidptr);
}
/*
* sys_execve() executes a new program.
*/
SYSCALL_DEFINE4(execve, const char __user *, path,
const char __user *const __user *, argv,
const char __user *const __user *, envp,
struct pt_regs *, regs)
{
long error;
char *filename;
filename = getname(path);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename, argv, envp, regs);
putname(filename);
if (error == 0)
single_step_execve();
out:
return error;
}
#ifdef CONFIG_COMPAT
long compat_sys_execve(const char __user *path,
compat_uptr_t __user *argv,
compat_uptr_t __user *envp,
struct pt_regs *regs)
{
long error;
char *filename;
filename = getname(path);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = compat_do_execve(filename, argv, envp, regs);
putname(filename);
if (error == 0)
single_step_execve();
out:
return error;
}
#endif
unsigned long get_wchan(struct task_struct *p)
{
struct KBacktraceIterator kbt;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
for (KBacktraceIterator_init(&kbt, p, NULL);
!KBacktraceIterator_end(&kbt);
KBacktraceIterator_next(&kbt)) {
if (!in_sched_functions(kbt.it.pc))
return kbt.it.pc;
}
return 0;
}
/*
* We pass in lr as zero (cleared in kernel_thread) and the caller
* part of the backtrace ABI on the stack also zeroed (in copy_thread)
* so that backtraces will stop with this function.
* Note that we don't use r0, since copy_thread() clears it.
*/
static void start_kernel_thread(int dummy, int (*fn)(int), int arg)
{
do_exit(fn(arg));
}
/*
* Create a kernel thread
*/
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
struct pt_regs regs;
memset(®s, 0, sizeof(regs));
regs.ex1 = PL_ICS_EX1(KERNEL_PL, 0); /* run at kernel PL, no ICS */
regs.pc = (long) start_kernel_thread;
regs.flags = PT_FLAGS_CALLER_SAVES; /* need to restore r1 and r2 */
regs.regs[1] = (long) fn; /* function pointer */
regs.regs[2] = (long) arg; /* parameter register */
/* Ok, create the new process.. */
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s,
0, NULL, NULL);
}
EXPORT_SYMBOL(kernel_thread);
/* Flush thread state. */
void flush_thread(void)
{
/* Nothing */
}
/*
* Free current thread data structures etc..
*/
void exit_thread(void)
{
/* Nothing */
}
void show_regs(struct pt_regs *regs)
{
struct task_struct *tsk = validate_current();
int i;
pr_err("\n");
pr_err(" Pid: %d, comm: %20s, CPU: %d\n",
tsk->pid, tsk->comm, smp_processor_id());
#ifdef __tilegx__
for (i = 0; i < 51; i += 3)
pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
i, regs->regs[i], i+1, regs->regs[i+1],
i+2, regs->regs[i+2]);
pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n",
regs->regs[51], regs->regs[52], regs->tp);
pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
#else
for (i = 0; i < 52; i += 4)
pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
i, regs->regs[i], i+1, regs->regs[i+1],
i+2, regs->regs[i+2], i+3, regs->regs[i+3]);
pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
regs->regs[52], regs->tp, regs->sp, regs->lr);
#endif
pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n",
regs->pc, regs->ex1, regs->faultnum);
dump_stack_regs(regs);
}
| gpl-2.0 |
jfmcbrayer/grouper-kernel | arch/ia64/hp/sim/simserial.c | 2981 | 24227 | /*
* Simulated Serial Driver (fake serial)
*
* This driver is mostly used for bringup purposes and will go away.
* It has a strong dependency on the system console. All outputs
* are rerouted to the same facility as the one used by printk which, in our
* case means sys_sim.c console (goes via the simulator). The code hereafter
* is completely leveraged from the serial.c driver.
*
* Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 02/04/00 D. Mosberger Merged in serial.c bug fixes in rs_close().
* 02/25/00 D. Mosberger Synced up with 2.3.99pre-5 version of serial.c.
* 07/30/02 D. Mosberger Replace sti()/cli() with explicit spinlocks & local irq masking
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/major.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/capability.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/serialP.h>
#include <linux/sysrq.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/uaccess.h>
#undef SIMSERIAL_DEBUG /* define this to get some debug information */
#define KEYBOARD_INTR 3 /* must match with simulator! */
#define NR_PORTS 1 /* only one port for now */
#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? IRQF_SHARED : IRQF_DISABLED)
#define SSC_GETCHAR 21
extern long ia64_ssc (long, long, long, long, int);
extern void ia64_ssc_connect_irq (long intr, long irq);
static char *serial_name = "SimSerial driver";
static char *serial_version = "0.6";
/*
* This has been extracted from asm/serial.h. We need one eventually but
* I don't know exactly what we're going to put in it so just fake one
* for now.
*/
#define BASE_BAUD ( 1843200 / 16 )
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
/*
* Most of the values here are meaningless to this particular driver.
* However some values must be preserved for the code (leveraged from serial.c
* to work correctly).
* port must not be 0
* type must not be UNKNOWN
* So I picked arbitrary (guess from where?) values instead
*/
static struct serial_state rs_table[NR_PORTS]={
/* UART CLK PORT IRQ FLAGS */
{ 0, BASE_BAUD, 0x3F8, 0, STD_COM_FLAGS,0,PORT_16550 } /* ttyS0 */
};
/*
* Just for the fun of it !
*/
static struct serial_uart_config uart_config[] = {
{ "unknown", 1, 0 },
{ "8250", 1, 0 },
{ "16450", 1, 0 },
{ "16550", 1, 0 },
{ "16550A", 16, UART_CLEAR_FIFO | UART_USE_FIFO },
{ "cirrus", 1, 0 },
{ "ST16650", 1, UART_CLEAR_FIFO | UART_STARTECH },
{ "ST16650V2", 32, UART_CLEAR_FIFO | UART_USE_FIFO |
UART_STARTECH },
{ "TI16750", 64, UART_CLEAR_FIFO | UART_USE_FIFO},
{ NULL, 0}
};
struct tty_driver *hp_simserial_driver;
static struct async_struct *IRQ_ports[NR_IRQS];
static struct console *console;
static unsigned char *tmp_buf;
extern struct console *console_drivers; /* from kernel/printk.c */
/*
* ------------------------------------------------------------
* rs_stop() and rs_start()
*
* This routines are called before setting or resetting tty->stopped.
* They enable or disable transmitter interrupts, as necessary.
* ------------------------------------------------------------
*/
static void rs_stop(struct tty_struct *tty)
{
#ifdef SIMSERIAL_DEBUG
printk("rs_stop: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n",
tty->stopped, tty->hw_stopped, tty->flow_stopped);
#endif
}
static void rs_start(struct tty_struct *tty)
{
#ifdef SIMSERIAL_DEBUG
printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n",
tty->stopped, tty->hw_stopped, tty->flow_stopped);
#endif
}
static void receive_chars(struct tty_struct *tty)
{
unsigned char ch;
static unsigned char seen_esc = 0;
while ( (ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR)) ) {
if ( ch == 27 && seen_esc == 0 ) {
seen_esc = 1;
continue;
} else {
if ( seen_esc==1 && ch == 'O' ) {
seen_esc = 2;
continue;
} else if ( seen_esc == 2 ) {
if ( ch == 'P' ) /* F1 */
show_state();
#ifdef CONFIG_MAGIC_SYSRQ
if ( ch == 'S' ) { /* F4 */
do
ch = ia64_ssc(0, 0, 0, 0,
SSC_GETCHAR);
while (!ch);
handle_sysrq(ch);
}
#endif
seen_esc = 0;
continue;
}
}
seen_esc = 0;
if (tty_insert_flip_char(tty, ch, TTY_NORMAL) == 0)
break;
}
tty_flip_buffer_push(tty);
}
/*
* This is the serial driver's interrupt routine for a single port
*/
static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
{
struct async_struct * info;
/*
* I don't know exactly why they don't use the dev_id opaque data
* pointer instead of this extra lookup table
*/
info = IRQ_ports[irq];
if (!info || !info->tty) {
printk(KERN_INFO "simrs_interrupt_single: info|tty=0 info=%p problem\n", info);
return IRQ_NONE;
}
/*
* pretty simple in our case, because we only get interrupts
* on inbound traffic
*/
receive_chars(info->tty);
return IRQ_HANDLED;
}
/*
* -------------------------------------------------------------------
* Here ends the serial interrupt routines.
* -------------------------------------------------------------------
*/
static void do_softint(struct work_struct *private_)
{
printk(KERN_ERR "simserial: do_softint called\n");
}
static int rs_put_char(struct tty_struct *tty, unsigned char ch)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
unsigned long flags;
if (!tty || !info->xmit.buf)
return 0;
local_irq_save(flags);
if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) {
local_irq_restore(flags);
return 0;
}
info->xmit.buf[info->xmit.head] = ch;
info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1);
local_irq_restore(flags);
return 1;
}
static void transmit_chars(struct async_struct *info, int *intr_done)
{
int count;
unsigned long flags;
local_irq_save(flags);
if (info->x_char) {
char c = info->x_char;
console->write(console, &c, 1);
info->state->icount.tx++;
info->x_char = 0;
goto out;
}
if (info->xmit.head == info->xmit.tail || info->tty->stopped || info->tty->hw_stopped) {
#ifdef SIMSERIAL_DEBUG
printk("transmit_chars: head=%d, tail=%d, stopped=%d\n",
info->xmit.head, info->xmit.tail, info->tty->stopped);
#endif
goto out;
}
/*
* We removed the loop and try to do it in to chunks. We need
* 2 operations maximum because it's a ring buffer.
*
* First from current to tail if possible.
* Then from the beginning of the buffer until necessary
*/
count = min(CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE),
SERIAL_XMIT_SIZE - info->xmit.tail);
console->write(console, info->xmit.buf+info->xmit.tail, count);
info->xmit.tail = (info->xmit.tail+count) & (SERIAL_XMIT_SIZE-1);
/*
* We have more at the beginning of the buffer
*/
count = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
if (count) {
console->write(console, info->xmit.buf, count);
info->xmit.tail += count;
}
out:
local_irq_restore(flags);
}
static void rs_flush_chars(struct tty_struct *tty)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
if (info->xmit.head == info->xmit.tail || tty->stopped || tty->hw_stopped ||
!info->xmit.buf)
return;
transmit_chars(info, NULL);
}
static int rs_write(struct tty_struct * tty,
const unsigned char *buf, int count)
{
int c, ret = 0;
struct async_struct *info = (struct async_struct *)tty->driver_data;
unsigned long flags;
if (!tty || !info->xmit.buf || !tmp_buf) return 0;
local_irq_save(flags);
while (1) {
c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
if (count < c)
c = count;
if (c <= 0) {
break;
}
memcpy(info->xmit.buf + info->xmit.head, buf, c);
info->xmit.head = ((info->xmit.head + c) &
(SERIAL_XMIT_SIZE-1));
buf += c;
count -= c;
ret += c;
}
local_irq_restore(flags);
/*
* Hey, we transmit directly from here in our case
*/
if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE)
&& !tty->stopped && !tty->hw_stopped) {
transmit_chars(info, NULL);
}
return ret;
}
static int rs_write_room(struct tty_struct *tty)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
static int rs_chars_in_buffer(struct tty_struct *tty)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
static void rs_flush_buffer(struct tty_struct *tty)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
unsigned long flags;
local_irq_save(flags);
info->xmit.head = info->xmit.tail = 0;
local_irq_restore(flags);
tty_wakeup(tty);
}
/*
* This function is used to send a high-priority XON/XOFF character to
* the device
*/
static void rs_send_xchar(struct tty_struct *tty, char ch)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
info->x_char = ch;
if (ch) {
/*
* I guess we could call console->write() directly but
* let's do that for now.
*/
transmit_chars(info, NULL);
}
}
/*
* ------------------------------------------------------------
* rs_throttle()
*
* This routine is called by the upper-layer tty layer to signal that
* incoming characters should be throttled.
* ------------------------------------------------------------
*/
static void rs_throttle(struct tty_struct * tty)
{
if (I_IXOFF(tty)) rs_send_xchar(tty, STOP_CHAR(tty));
printk(KERN_INFO "simrs_throttle called\n");
}
static void rs_unthrottle(struct tty_struct * tty)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
else
rs_send_xchar(tty, START_CHAR(tty));
}
printk(KERN_INFO "simrs_unthrottle called\n");
}
static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
(cmd != TIOCMIWAIT)) {
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
}
switch (cmd) {
case TIOCGSERIAL:
printk(KERN_INFO "simrs_ioctl TIOCGSERIAL called\n");
return 0;
case TIOCSSERIAL:
printk(KERN_INFO "simrs_ioctl TIOCSSERIAL called\n");
return 0;
case TIOCSERCONFIG:
printk(KERN_INFO "rs_ioctl: TIOCSERCONFIG called\n");
return -EINVAL;
case TIOCSERGETLSR: /* Get line status register */
printk(KERN_INFO "rs_ioctl: TIOCSERGETLSR called\n");
return -EINVAL;
case TIOCSERGSTRUCT:
printk(KERN_INFO "rs_ioctl: TIOCSERGSTRUCT called\n");
#if 0
if (copy_to_user((struct async_struct *) arg,
info, sizeof(struct async_struct)))
return -EFAULT;
#endif
return 0;
/*
* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
* - mask passed in arg for lines of interest
* (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
* Caller should use TIOCGICOUNT to see which one it was
*/
case TIOCMIWAIT:
printk(KERN_INFO "rs_ioctl: TIOCMIWAIT: called\n");
return 0;
case TIOCSERGWILD:
case TIOCSERSWILD:
/* "setserial -W" is called in Debian boot */
printk (KERN_INFO "TIOCSER?WILD ioctl obsolete, ignored.\n");
return 0;
default:
return -ENOIOCTLCMD;
}
return 0;
}
#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
/* Handle turning off CRTSCTS */
if ((old_termios->c_cflag & CRTSCTS) &&
!(tty->termios->c_cflag & CRTSCTS)) {
tty->hw_stopped = 0;
rs_start(tty);
}
}
/*
* This routine will shutdown a serial port; interrupts are disabled, and
* DTR is dropped if the hangup on close termio flag is on.
*/
static void shutdown(struct async_struct * info)
{
unsigned long flags;
struct serial_state *state;
int retval;
if (!(info->flags & ASYNC_INITIALIZED)) return;
state = info->state;
#ifdef SIMSERIAL_DEBUG
printk("Shutting down serial port %d (irq %d)....", info->line,
state->irq);
#endif
local_irq_save(flags);
{
/*
* First unlink the serial port from the IRQ chain...
*/
if (info->next_port)
info->next_port->prev_port = info->prev_port;
if (info->prev_port)
info->prev_port->next_port = info->next_port;
else
IRQ_ports[state->irq] = info->next_port;
/*
* Free the IRQ, if necessary
*/
if (state->irq && (!IRQ_ports[state->irq] ||
!IRQ_ports[state->irq]->next_port)) {
if (IRQ_ports[state->irq]) {
free_irq(state->irq, NULL);
retval = request_irq(state->irq, rs_interrupt_single,
IRQ_T(info), "serial", NULL);
if (retval)
printk(KERN_ERR "serial shutdown: request_irq: error %d"
" Couldn't reacquire IRQ.\n", retval);
} else
free_irq(state->irq, NULL);
}
if (info->xmit.buf) {
free_page((unsigned long) info->xmit.buf);
info->xmit.buf = NULL;
}
if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags);
info->flags &= ~ASYNC_INITIALIZED;
}
local_irq_restore(flags);
}
/*
* ------------------------------------------------------------
* rs_close()
*
* This routine is called when the serial port gets closed. First, we
* wait for the last remaining data to be sent. Then, we unlink its
* async structure from the interrupt chain if necessary, and we free
* that IRQ if nothing is left in the chain.
* ------------------------------------------------------------
*/
static void rs_close(struct tty_struct *tty, struct file * filp)
{
struct async_struct * info = (struct async_struct *)tty->driver_data;
struct serial_state *state;
unsigned long flags;
if (!info ) return;
state = info->state;
local_irq_save(flags);
if (tty_hung_up_p(filp)) {
#ifdef SIMSERIAL_DEBUG
printk("rs_close: hung_up\n");
#endif
local_irq_restore(flags);
return;
}
#ifdef SIMSERIAL_DEBUG
printk("rs_close ttys%d, count = %d\n", info->line, state->count);
#endif
if ((tty->count == 1) && (state->count != 1)) {
/*
* Uh, oh. tty->count is 1, which means that the tty
* structure will be freed. state->count should always
* be one in these conditions. If it's greater than
* one, we've got real problems, since it means the
* serial port won't be shutdown.
*/
printk(KERN_ERR "rs_close: bad serial port count; tty->count is 1, "
"state->count is %d\n", state->count);
state->count = 1;
}
if (--state->count < 0) {
printk(KERN_ERR "rs_close: bad serial port count for ttys%d: %d\n",
info->line, state->count);
state->count = 0;
}
if (state->count) {
local_irq_restore(flags);
return;
}
info->flags |= ASYNC_CLOSING;
local_irq_restore(flags);
/*
* Now we wait for the transmit buffer to clear; and we notify
* the line discipline to only process XON/XOFF characters.
*/
shutdown(info);
rs_flush_buffer(tty);
tty_ldisc_flush(tty);
info->event = 0;
info->tty = NULL;
if (info->blocked_open) {
if (info->close_delay)
schedule_timeout_interruptible(info->close_delay);
wake_up_interruptible(&info->open_wait);
}
info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
wake_up_interruptible(&info->close_wait);
}
/*
* rs_wait_until_sent() --- wait until the transmitter is empty
*/
static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
{
}
/*
* rs_hangup() --- called by tty_hangup() when a hangup is signaled.
*/
static void rs_hangup(struct tty_struct *tty)
{
struct async_struct * info = (struct async_struct *)tty->driver_data;
struct serial_state *state = info->state;
#ifdef SIMSERIAL_DEBUG
printk("rs_hangup: called\n");
#endif
state = info->state;
rs_flush_buffer(tty);
if (info->flags & ASYNC_CLOSING)
return;
shutdown(info);
info->event = 0;
state->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
info->tty = NULL;
wake_up_interruptible(&info->open_wait);
}
static int get_async_struct(int line, struct async_struct **ret_info)
{
struct async_struct *info;
struct serial_state *sstate;
sstate = rs_table + line;
sstate->count++;
if (sstate->info) {
*ret_info = sstate->info;
return 0;
}
info = kzalloc(sizeof(struct async_struct), GFP_KERNEL);
if (!info) {
sstate->count--;
return -ENOMEM;
}
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
init_waitqueue_head(&info->delta_msr_wait);
info->magic = SERIAL_MAGIC;
info->port = sstate->port;
info->flags = sstate->flags;
info->xmit_fifo_size = sstate->xmit_fifo_size;
info->line = line;
INIT_WORK(&info->work, do_softint);
info->state = sstate;
if (sstate->info) {
kfree(info);
*ret_info = sstate->info;
return 0;
}
*ret_info = sstate->info = info;
return 0;
}
static int
startup(struct async_struct *info)
{
unsigned long flags;
int retval=0;
irq_handler_t handler;
struct serial_state *state= info->state;
unsigned long page;
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
local_irq_save(flags);
if (info->flags & ASYNC_INITIALIZED) {
free_page(page);
goto errout;
}
if (!state->port || !state->type) {
if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags);
free_page(page);
goto errout;
}
if (info->xmit.buf)
free_page(page);
else
info->xmit.buf = (unsigned char *) page;
#ifdef SIMSERIAL_DEBUG
printk("startup: ttys%d (irq %d)...", info->line, state->irq);
#endif
/*
* Allocate the IRQ if necessary
*/
if (state->irq && (!IRQ_ports[state->irq] ||
!IRQ_ports[state->irq]->next_port)) {
if (IRQ_ports[state->irq]) {
retval = -EBUSY;
goto errout;
} else
handler = rs_interrupt_single;
retval = request_irq(state->irq, handler, IRQ_T(info), "simserial", NULL);
if (retval) {
if (capable(CAP_SYS_ADMIN)) {
if (info->tty)
set_bit(TTY_IO_ERROR,
&info->tty->flags);
retval = 0;
}
goto errout;
}
}
/*
* Insert serial port into IRQ chain.
*/
info->prev_port = NULL;
info->next_port = IRQ_ports[state->irq];
if (info->next_port)
info->next_port->prev_port = info;
IRQ_ports[state->irq] = info;
if (info->tty) clear_bit(TTY_IO_ERROR, &info->tty->flags);
info->xmit.head = info->xmit.tail = 0;
#if 0
/*
* Set up serial timers...
*/
timer_table[RS_TIMER].expires = jiffies + 2*HZ/100;
timer_active |= 1 << RS_TIMER;
#endif
/*
* Set up the tty->alt_speed kludge
*/
if (info->tty) {
if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
info->tty->alt_speed = 57600;
if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
info->tty->alt_speed = 115200;
if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
info->tty->alt_speed = 230400;
if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
info->tty->alt_speed = 460800;
}
info->flags |= ASYNC_INITIALIZED;
local_irq_restore(flags);
return 0;
errout:
local_irq_restore(flags);
return retval;
}
/*
* This routine is called whenever a serial port is opened. It
* enables interrupts for a serial port, linking in its async structure into
* the IRQ chain. It also performs the serial-specific
* initialization for the tty structure.
*/
static int rs_open(struct tty_struct *tty, struct file * filp)
{
struct async_struct *info;
int retval, line;
unsigned long page;
line = tty->index;
if ((line < 0) || (line >= NR_PORTS))
return -ENODEV;
retval = get_async_struct(line, &info);
if (retval)
return retval;
tty->driver_data = info;
info->tty = tty;
#ifdef SIMSERIAL_DEBUG
printk("rs_open %s, count = %d\n", tty->name, info->state->count);
#endif
info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
if (!tmp_buf) {
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
if (tmp_buf)
free_page(page);
else
tmp_buf = (unsigned char *) page;
}
/*
* If the port is the middle of closing, bail out now
*/
if (tty_hung_up_p(filp) ||
(info->flags & ASYNC_CLOSING)) {
if (info->flags & ASYNC_CLOSING)
interruptible_sleep_on(&info->close_wait);
#ifdef SERIAL_DO_RESTART
return ((info->flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS);
#else
return -EAGAIN;
#endif
}
/*
* Start up serial port
*/
retval = startup(info);
if (retval) {
return retval;
}
/*
* figure out which console to use (should be one already)
*/
console = console_drivers;
while (console) {
if ((console->flags & CON_ENABLED) && console->write) break;
console = console->next;
}
#ifdef SIMSERIAL_DEBUG
printk("rs_open ttys%d successful\n", info->line);
#endif
return 0;
}
/*
* /proc fs routines....
*/
static inline void line_info(struct seq_file *m, struct serial_state *state)
{
seq_printf(m, "%d: uart:%s port:%lX irq:%d\n",
state->line, uart_config[state->type].name,
state->port, state->irq);
}
static int rs_proc_show(struct seq_file *m, void *v)
{
int i;
seq_printf(m, "simserinfo:1.0 driver:%s\n", serial_version);
for (i = 0; i < NR_PORTS; i++)
line_info(m, &rs_table[i]);
return 0;
}
static int rs_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, rs_proc_show, NULL);
}
static const struct file_operations rs_proc_fops = {
.owner = THIS_MODULE,
.open = rs_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* ---------------------------------------------------------------------
* rs_init() and friends
*
* rs_init() is called at boot-time to initialize the serial driver.
* ---------------------------------------------------------------------
*/
/*
* This routine prints out the appropriate serial driver version
* number, and identifies which options were configured into this
* driver.
*/
static inline void show_serial_version(void)
{
printk(KERN_INFO "%s version %s with", serial_name, serial_version);
printk(KERN_INFO " no serial options enabled\n");
}
static const struct tty_operations hp_ops = {
.open = rs_open,
.close = rs_close,
.write = rs_write,
.put_char = rs_put_char,
.flush_chars = rs_flush_chars,
.write_room = rs_write_room,
.chars_in_buffer = rs_chars_in_buffer,
.flush_buffer = rs_flush_buffer,
.ioctl = rs_ioctl,
.throttle = rs_throttle,
.unthrottle = rs_unthrottle,
.send_xchar = rs_send_xchar,
.set_termios = rs_set_termios,
.stop = rs_stop,
.start = rs_start,
.hangup = rs_hangup,
.wait_until_sent = rs_wait_until_sent,
.proc_fops = &rs_proc_fops,
};
/*
* The serial driver boot-time initialization code!
*/
static int __init
simrs_init (void)
{
int i, rc;
struct serial_state *state;
if (!ia64_platform_is("hpsim"))
return -ENODEV;
hp_simserial_driver = alloc_tty_driver(1);
if (!hp_simserial_driver)
return -ENOMEM;
show_serial_version();
/* Initialize the tty_driver structure */
hp_simserial_driver->owner = THIS_MODULE;
hp_simserial_driver->driver_name = "simserial";
hp_simserial_driver->name = "ttyS";
hp_simserial_driver->major = TTY_MAJOR;
hp_simserial_driver->minor_start = 64;
hp_simserial_driver->type = TTY_DRIVER_TYPE_SERIAL;
hp_simserial_driver->subtype = SERIAL_TYPE_NORMAL;
hp_simserial_driver->init_termios = tty_std_termios;
hp_simserial_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
hp_simserial_driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(hp_simserial_driver, &hp_ops);
/*
* Let's have a little bit of fun !
*/
for (i = 0, state = rs_table; i < NR_PORTS; i++,state++) {
if (state->type == PORT_UNKNOWN) continue;
if (!state->irq) {
if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0)
panic("%s: out of interrupt vectors!\n",
__func__);
state->irq = rc;
ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq);
}
printk(KERN_INFO "ttyS%d at 0x%04lx (irq = %d) is a %s\n",
state->line,
state->port, state->irq,
uart_config[state->type].name);
}
if (tty_register_driver(hp_simserial_driver))
panic("Couldn't register simserial driver\n");
return 0;
}
#ifndef MODULE
__initcall(simrs_init);
#endif
| gpl-2.0 |
kozmikkick/tripndroid-endeavoru-3.0 | arch/arm/mach-ks8695/irq.c | 2981 | 4284 | /*
* arch/arm/mach-ks8695/irq.c
*
* Copyright (C) 2006 Ben Dooks <ben@simtec.co.uk>
* Copyright (C) 2006 Simtec Electronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/sysdev.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <mach/regs-irq.h>
#include <mach/regs-gpio.h>
static void ks8695_irq_mask(struct irq_data *d)
{
unsigned long inten;
inten = __raw_readl(KS8695_IRQ_VA + KS8695_INTEN);
inten &= ~(1 << d->irq);
__raw_writel(inten, KS8695_IRQ_VA + KS8695_INTEN);
}
static void ks8695_irq_unmask(struct irq_data *d)
{
unsigned long inten;
inten = __raw_readl(KS8695_IRQ_VA + KS8695_INTEN);
inten |= (1 << d->irq);
__raw_writel(inten, KS8695_IRQ_VA + KS8695_INTEN);
}
static void ks8695_irq_ack(struct irq_data *d)
{
__raw_writel((1 << d->irq), KS8695_IRQ_VA + KS8695_INTST);
}
static struct irq_chip ks8695_irq_level_chip;
static struct irq_chip ks8695_irq_edge_chip;
static int ks8695_irq_set_type(struct irq_data *d, unsigned int type)
{
unsigned long ctrl, mode;
unsigned short level_triggered = 0;
ctrl = __raw_readl(KS8695_GPIO_VA + KS8695_IOPC);
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
mode = IOPC_TM_HIGH;
level_triggered = 1;
break;
case IRQ_TYPE_LEVEL_LOW:
mode = IOPC_TM_LOW;
level_triggered = 1;
break;
case IRQ_TYPE_EDGE_RISING:
mode = IOPC_TM_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
mode = IOPC_TM_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
mode = IOPC_TM_EDGE;
break;
default:
return -EINVAL;
}
switch (d->irq) {
case KS8695_IRQ_EXTERN0:
ctrl &= ~IOPC_IOEINT0TM;
ctrl |= IOPC_IOEINT0_MODE(mode);
break;
case KS8695_IRQ_EXTERN1:
ctrl &= ~IOPC_IOEINT1TM;
ctrl |= IOPC_IOEINT1_MODE(mode);
break;
case KS8695_IRQ_EXTERN2:
ctrl &= ~IOPC_IOEINT2TM;
ctrl |= IOPC_IOEINT2_MODE(mode);
break;
case KS8695_IRQ_EXTERN3:
ctrl &= ~IOPC_IOEINT3TM;
ctrl |= IOPC_IOEINT3_MODE(mode);
break;
default:
return -EINVAL;
}
if (level_triggered) {
irq_set_chip_and_handler(d->irq, &ks8695_irq_level_chip,
handle_level_irq);
}
else {
irq_set_chip_and_handler(d->irq, &ks8695_irq_edge_chip,
handle_edge_irq);
}
__raw_writel(ctrl, KS8695_GPIO_VA + KS8695_IOPC);
return 0;
}
static struct irq_chip ks8695_irq_level_chip = {
.irq_ack = ks8695_irq_mask,
.irq_mask = ks8695_irq_mask,
.irq_unmask = ks8695_irq_unmask,
.irq_set_type = ks8695_irq_set_type,
};
static struct irq_chip ks8695_irq_edge_chip = {
.irq_ack = ks8695_irq_ack,
.irq_mask = ks8695_irq_mask,
.irq_unmask = ks8695_irq_unmask,
.irq_set_type = ks8695_irq_set_type,
};
void __init ks8695_init_irq(void)
{
unsigned int irq;
/* Disable all interrupts initially */
__raw_writel(0, KS8695_IRQ_VA + KS8695_INTMC);
__raw_writel(0, KS8695_IRQ_VA + KS8695_INTEN);
for (irq = 0; irq < NR_IRQS; irq++) {
switch (irq) {
/* Level-triggered interrupts */
case KS8695_IRQ_BUS_ERROR:
case KS8695_IRQ_UART_MODEM_STATUS:
case KS8695_IRQ_UART_LINE_STATUS:
case KS8695_IRQ_UART_RX:
case KS8695_IRQ_COMM_TX:
case KS8695_IRQ_COMM_RX:
irq_set_chip_and_handler(irq,
&ks8695_irq_level_chip,
handle_level_irq);
break;
/* Edge-triggered interrupts */
default:
/* clear pending bit */
ks8695_irq_ack(irq_get_irq_data(irq));
irq_set_chip_and_handler(irq,
&ks8695_irq_edge_chip,
handle_edge_irq);
}
set_irq_flags(irq, IRQF_VALID);
}
}
| gpl-2.0 |
rutvik95/speedx_kernel_i9082 | arch/ia64/hp/sim/simserial.c | 2981 | 24227 | /*
* Simulated Serial Driver (fake serial)
*
* This driver is mostly used for bringup purposes and will go away.
* It has a strong dependency on the system console. All outputs
* are rerouted to the same facility as the one used by printk which, in our
* case means sys_sim.c console (goes via the simulator). The code hereafter
* is completely leveraged from the serial.c driver.
*
* Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 02/04/00 D. Mosberger Merged in serial.c bug fixes in rs_close().
* 02/25/00 D. Mosberger Synced up with 2.3.99pre-5 version of serial.c.
* 07/30/02 D. Mosberger Replace sti()/cli() with explicit spinlocks & local irq masking
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/major.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/capability.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/serialP.h>
#include <linux/sysrq.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/uaccess.h>
#undef SIMSERIAL_DEBUG /* define this to get some debug information */
#define KEYBOARD_INTR 3 /* must match with simulator! */
#define NR_PORTS 1 /* only one port for now */
#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? IRQF_SHARED : IRQF_DISABLED)
#define SSC_GETCHAR 21
extern long ia64_ssc (long, long, long, long, int);
extern void ia64_ssc_connect_irq (long intr, long irq);
static char *serial_name = "SimSerial driver";
static char *serial_version = "0.6";
/*
* This has been extracted from asm/serial.h. We need one eventually but
* I don't know exactly what we're going to put in it so just fake one
* for now.
*/
#define BASE_BAUD ( 1843200 / 16 )
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
/*
* Most of the values here are meaningless to this particular driver.
* However some values must be preserved for the code (leveraged from serial.c
* to work correctly).
* port must not be 0
* type must not be UNKNOWN
* So I picked arbitrary (guess from where?) values instead
*/
static struct serial_state rs_table[NR_PORTS]={
/* UART CLK PORT IRQ FLAGS */
{ 0, BASE_BAUD, 0x3F8, 0, STD_COM_FLAGS,0,PORT_16550 } /* ttyS0 */
};
/*
* Just for the fun of it !
*/
static struct serial_uart_config uart_config[] = {
{ "unknown", 1, 0 },
{ "8250", 1, 0 },
{ "16450", 1, 0 },
{ "16550", 1, 0 },
{ "16550A", 16, UART_CLEAR_FIFO | UART_USE_FIFO },
{ "cirrus", 1, 0 },
{ "ST16650", 1, UART_CLEAR_FIFO | UART_STARTECH },
{ "ST16650V2", 32, UART_CLEAR_FIFO | UART_USE_FIFO |
UART_STARTECH },
{ "TI16750", 64, UART_CLEAR_FIFO | UART_USE_FIFO},
{ NULL, 0}
};
struct tty_driver *hp_simserial_driver;
static struct async_struct *IRQ_ports[NR_IRQS];
static struct console *console;
static unsigned char *tmp_buf;
extern struct console *console_drivers; /* from kernel/printk.c */
/*
* ------------------------------------------------------------
* rs_stop() and rs_start()
*
* This routines are called before setting or resetting tty->stopped.
* They enable or disable transmitter interrupts, as necessary.
* ------------------------------------------------------------
*/
static void rs_stop(struct tty_struct *tty)
{
#ifdef SIMSERIAL_DEBUG
printk("rs_stop: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n",
tty->stopped, tty->hw_stopped, tty->flow_stopped);
#endif
}
static void rs_start(struct tty_struct *tty)
{
#ifdef SIMSERIAL_DEBUG
printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n",
tty->stopped, tty->hw_stopped, tty->flow_stopped);
#endif
}
static void receive_chars(struct tty_struct *tty)
{
unsigned char ch;
static unsigned char seen_esc = 0;
while ( (ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR)) ) {
if ( ch == 27 && seen_esc == 0 ) {
seen_esc = 1;
continue;
} else {
if ( seen_esc==1 && ch == 'O' ) {
seen_esc = 2;
continue;
} else if ( seen_esc == 2 ) {
if ( ch == 'P' ) /* F1 */
show_state();
#ifdef CONFIG_MAGIC_SYSRQ
if ( ch == 'S' ) { /* F4 */
do
ch = ia64_ssc(0, 0, 0, 0,
SSC_GETCHAR);
while (!ch);
handle_sysrq(ch);
}
#endif
seen_esc = 0;
continue;
}
}
seen_esc = 0;
if (tty_insert_flip_char(tty, ch, TTY_NORMAL) == 0)
break;
}
tty_flip_buffer_push(tty);
}
/*
* This is the serial driver's interrupt routine for a single port
*/
static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
{
struct async_struct * info;
/*
* I don't know exactly why they don't use the dev_id opaque data
* pointer instead of this extra lookup table
*/
info = IRQ_ports[irq];
if (!info || !info->tty) {
printk(KERN_INFO "simrs_interrupt_single: info|tty=0 info=%p problem\n", info);
return IRQ_NONE;
}
/*
* pretty simple in our case, because we only get interrupts
* on inbound traffic
*/
receive_chars(info->tty);
return IRQ_HANDLED;
}
/*
* -------------------------------------------------------------------
* Here ends the serial interrupt routines.
* -------------------------------------------------------------------
*/
static void do_softint(struct work_struct *private_)
{
printk(KERN_ERR "simserial: do_softint called\n");
}
static int rs_put_char(struct tty_struct *tty, unsigned char ch)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
unsigned long flags;
if (!tty || !info->xmit.buf)
return 0;
local_irq_save(flags);
if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) {
local_irq_restore(flags);
return 0;
}
info->xmit.buf[info->xmit.head] = ch;
info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1);
local_irq_restore(flags);
return 1;
}
static void transmit_chars(struct async_struct *info, int *intr_done)
{
int count;
unsigned long flags;
local_irq_save(flags);
if (info->x_char) {
char c = info->x_char;
console->write(console, &c, 1);
info->state->icount.tx++;
info->x_char = 0;
goto out;
}
if (info->xmit.head == info->xmit.tail || info->tty->stopped || info->tty->hw_stopped) {
#ifdef SIMSERIAL_DEBUG
printk("transmit_chars: head=%d, tail=%d, stopped=%d\n",
info->xmit.head, info->xmit.tail, info->tty->stopped);
#endif
goto out;
}
/*
* We removed the loop and try to do it in to chunks. We need
* 2 operations maximum because it's a ring buffer.
*
* First from current to tail if possible.
* Then from the beginning of the buffer until necessary
*/
count = min(CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE),
SERIAL_XMIT_SIZE - info->xmit.tail);
console->write(console, info->xmit.buf+info->xmit.tail, count);
info->xmit.tail = (info->xmit.tail+count) & (SERIAL_XMIT_SIZE-1);
/*
* We have more at the beginning of the buffer
*/
count = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
if (count) {
console->write(console, info->xmit.buf, count);
info->xmit.tail += count;
}
out:
local_irq_restore(flags);
}
static void rs_flush_chars(struct tty_struct *tty)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
if (info->xmit.head == info->xmit.tail || tty->stopped || tty->hw_stopped ||
!info->xmit.buf)
return;
transmit_chars(info, NULL);
}
static int rs_write(struct tty_struct * tty,
const unsigned char *buf, int count)
{
int c, ret = 0;
struct async_struct *info = (struct async_struct *)tty->driver_data;
unsigned long flags;
if (!tty || !info->xmit.buf || !tmp_buf) return 0;
local_irq_save(flags);
while (1) {
c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
if (count < c)
c = count;
if (c <= 0) {
break;
}
memcpy(info->xmit.buf + info->xmit.head, buf, c);
info->xmit.head = ((info->xmit.head + c) &
(SERIAL_XMIT_SIZE-1));
buf += c;
count -= c;
ret += c;
}
local_irq_restore(flags);
/*
* Hey, we transmit directly from here in our case
*/
if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE)
&& !tty->stopped && !tty->hw_stopped) {
transmit_chars(info, NULL);
}
return ret;
}
static int rs_write_room(struct tty_struct *tty)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
static int rs_chars_in_buffer(struct tty_struct *tty)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
static void rs_flush_buffer(struct tty_struct *tty)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
unsigned long flags;
local_irq_save(flags);
info->xmit.head = info->xmit.tail = 0;
local_irq_restore(flags);
tty_wakeup(tty);
}
/*
* This function is used to send a high-priority XON/XOFF character to
* the device
*/
static void rs_send_xchar(struct tty_struct *tty, char ch)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
info->x_char = ch;
if (ch) {
/*
* I guess we could call console->write() directly but
* let's do that for now.
*/
transmit_chars(info, NULL);
}
}
/*
* ------------------------------------------------------------
* rs_throttle()
*
* This routine is called by the upper-layer tty layer to signal that
* incoming characters should be throttled.
* ------------------------------------------------------------
*/
static void rs_throttle(struct tty_struct * tty)
{
if (I_IXOFF(tty)) rs_send_xchar(tty, STOP_CHAR(tty));
printk(KERN_INFO "simrs_throttle called\n");
}
static void rs_unthrottle(struct tty_struct * tty)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
else
rs_send_xchar(tty, START_CHAR(tty));
}
printk(KERN_INFO "simrs_unthrottle called\n");
}
static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
(cmd != TIOCMIWAIT)) {
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
}
switch (cmd) {
case TIOCGSERIAL:
printk(KERN_INFO "simrs_ioctl TIOCGSERIAL called\n");
return 0;
case TIOCSSERIAL:
printk(KERN_INFO "simrs_ioctl TIOCSSERIAL called\n");
return 0;
case TIOCSERCONFIG:
printk(KERN_INFO "rs_ioctl: TIOCSERCONFIG called\n");
return -EINVAL;
case TIOCSERGETLSR: /* Get line status register */
printk(KERN_INFO "rs_ioctl: TIOCSERGETLSR called\n");
return -EINVAL;
case TIOCSERGSTRUCT:
printk(KERN_INFO "rs_ioctl: TIOCSERGSTRUCT called\n");
#if 0
if (copy_to_user((struct async_struct *) arg,
info, sizeof(struct async_struct)))
return -EFAULT;
#endif
return 0;
/*
* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
* - mask passed in arg for lines of interest
* (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
* Caller should use TIOCGICOUNT to see which one it was
*/
case TIOCMIWAIT:
printk(KERN_INFO "rs_ioctl: TIOCMIWAIT: called\n");
return 0;
case TIOCSERGWILD:
case TIOCSERSWILD:
/* "setserial -W" is called in Debian boot */
printk (KERN_INFO "TIOCSER?WILD ioctl obsolete, ignored.\n");
return 0;
default:
return -ENOIOCTLCMD;
}
return 0;
}
#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
/* Handle turning off CRTSCTS */
if ((old_termios->c_cflag & CRTSCTS) &&
!(tty->termios->c_cflag & CRTSCTS)) {
tty->hw_stopped = 0;
rs_start(tty);
}
}
/*
* This routine will shutdown a serial port; interrupts are disabled, and
* DTR is dropped if the hangup on close termio flag is on.
*/
static void shutdown(struct async_struct * info)
{
unsigned long flags;
struct serial_state *state;
int retval;
if (!(info->flags & ASYNC_INITIALIZED)) return;
state = info->state;
#ifdef SIMSERIAL_DEBUG
printk("Shutting down serial port %d (irq %d)....", info->line,
state->irq);
#endif
local_irq_save(flags);
{
/*
* First unlink the serial port from the IRQ chain...
*/
if (info->next_port)
info->next_port->prev_port = info->prev_port;
if (info->prev_port)
info->prev_port->next_port = info->next_port;
else
IRQ_ports[state->irq] = info->next_port;
/*
* Free the IRQ, if necessary
*/
if (state->irq && (!IRQ_ports[state->irq] ||
!IRQ_ports[state->irq]->next_port)) {
if (IRQ_ports[state->irq]) {
free_irq(state->irq, NULL);
retval = request_irq(state->irq, rs_interrupt_single,
IRQ_T(info), "serial", NULL);
if (retval)
printk(KERN_ERR "serial shutdown: request_irq: error %d"
" Couldn't reacquire IRQ.\n", retval);
} else
free_irq(state->irq, NULL);
}
if (info->xmit.buf) {
free_page((unsigned long) info->xmit.buf);
info->xmit.buf = NULL;
}
if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags);
info->flags &= ~ASYNC_INITIALIZED;
}
local_irq_restore(flags);
}
/*
* ------------------------------------------------------------
* rs_close()
*
* This routine is called when the serial port gets closed. First, we
* wait for the last remaining data to be sent. Then, we unlink its
* async structure from the interrupt chain if necessary, and we free
* that IRQ if nothing is left in the chain.
* ------------------------------------------------------------
*/
static void rs_close(struct tty_struct *tty, struct file * filp)
{
struct async_struct * info = (struct async_struct *)tty->driver_data;
struct serial_state *state;
unsigned long flags;
if (!info ) return;
state = info->state;
local_irq_save(flags);
if (tty_hung_up_p(filp)) {
#ifdef SIMSERIAL_DEBUG
printk("rs_close: hung_up\n");
#endif
local_irq_restore(flags);
return;
}
#ifdef SIMSERIAL_DEBUG
printk("rs_close ttys%d, count = %d\n", info->line, state->count);
#endif
if ((tty->count == 1) && (state->count != 1)) {
/*
* Uh, oh. tty->count is 1, which means that the tty
* structure will be freed. state->count should always
* be one in these conditions. If it's greater than
* one, we've got real problems, since it means the
* serial port won't be shutdown.
*/
printk(KERN_ERR "rs_close: bad serial port count; tty->count is 1, "
"state->count is %d\n", state->count);
state->count = 1;
}
if (--state->count < 0) {
printk(KERN_ERR "rs_close: bad serial port count for ttys%d: %d\n",
info->line, state->count);
state->count = 0;
}
if (state->count) {
local_irq_restore(flags);
return;
}
info->flags |= ASYNC_CLOSING;
local_irq_restore(flags);
/*
* Now we wait for the transmit buffer to clear; and we notify
* the line discipline to only process XON/XOFF characters.
*/
shutdown(info);
rs_flush_buffer(tty);
tty_ldisc_flush(tty);
info->event = 0;
info->tty = NULL;
if (info->blocked_open) {
if (info->close_delay)
schedule_timeout_interruptible(info->close_delay);
wake_up_interruptible(&info->open_wait);
}
info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
wake_up_interruptible(&info->close_wait);
}
/*
* rs_wait_until_sent() --- wait until the transmitter is empty
*/
static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
{
}
/*
* rs_hangup() --- called by tty_hangup() when a hangup is signaled.
*/
static void rs_hangup(struct tty_struct *tty)
{
struct async_struct * info = (struct async_struct *)tty->driver_data;
struct serial_state *state = info->state;
#ifdef SIMSERIAL_DEBUG
printk("rs_hangup: called\n");
#endif
state = info->state;
rs_flush_buffer(tty);
if (info->flags & ASYNC_CLOSING)
return;
shutdown(info);
info->event = 0;
state->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
info->tty = NULL;
wake_up_interruptible(&info->open_wait);
}
static int get_async_struct(int line, struct async_struct **ret_info)
{
struct async_struct *info;
struct serial_state *sstate;
sstate = rs_table + line;
sstate->count++;
if (sstate->info) {
*ret_info = sstate->info;
return 0;
}
info = kzalloc(sizeof(struct async_struct), GFP_KERNEL);
if (!info) {
sstate->count--;
return -ENOMEM;
}
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
init_waitqueue_head(&info->delta_msr_wait);
info->magic = SERIAL_MAGIC;
info->port = sstate->port;
info->flags = sstate->flags;
info->xmit_fifo_size = sstate->xmit_fifo_size;
info->line = line;
INIT_WORK(&info->work, do_softint);
info->state = sstate;
if (sstate->info) {
kfree(info);
*ret_info = sstate->info;
return 0;
}
*ret_info = sstate->info = info;
return 0;
}
static int
startup(struct async_struct *info)
{
unsigned long flags;
int retval=0;
irq_handler_t handler;
struct serial_state *state= info->state;
unsigned long page;
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
local_irq_save(flags);
if (info->flags & ASYNC_INITIALIZED) {
free_page(page);
goto errout;
}
if (!state->port || !state->type) {
if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags);
free_page(page);
goto errout;
}
if (info->xmit.buf)
free_page(page);
else
info->xmit.buf = (unsigned char *) page;
#ifdef SIMSERIAL_DEBUG
printk("startup: ttys%d (irq %d)...", info->line, state->irq);
#endif
/*
* Allocate the IRQ if necessary
*/
if (state->irq && (!IRQ_ports[state->irq] ||
!IRQ_ports[state->irq]->next_port)) {
if (IRQ_ports[state->irq]) {
retval = -EBUSY;
goto errout;
} else
handler = rs_interrupt_single;
retval = request_irq(state->irq, handler, IRQ_T(info), "simserial", NULL);
if (retval) {
if (capable(CAP_SYS_ADMIN)) {
if (info->tty)
set_bit(TTY_IO_ERROR,
&info->tty->flags);
retval = 0;
}
goto errout;
}
}
/*
* Insert serial port into IRQ chain.
*/
info->prev_port = NULL;
info->next_port = IRQ_ports[state->irq];
if (info->next_port)
info->next_port->prev_port = info;
IRQ_ports[state->irq] = info;
if (info->tty) clear_bit(TTY_IO_ERROR, &info->tty->flags);
info->xmit.head = info->xmit.tail = 0;
#if 0
/*
* Set up serial timers...
*/
timer_table[RS_TIMER].expires = jiffies + 2*HZ/100;
timer_active |= 1 << RS_TIMER;
#endif
/*
* Set up the tty->alt_speed kludge
*/
if (info->tty) {
if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
info->tty->alt_speed = 57600;
if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
info->tty->alt_speed = 115200;
if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
info->tty->alt_speed = 230400;
if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
info->tty->alt_speed = 460800;
}
info->flags |= ASYNC_INITIALIZED;
local_irq_restore(flags);
return 0;
errout:
local_irq_restore(flags);
return retval;
}
/*
* This routine is called whenever a serial port is opened. It
* enables interrupts for a serial port, linking in its async structure into
* the IRQ chain. It also performs the serial-specific
* initialization for the tty structure.
*/
static int rs_open(struct tty_struct *tty, struct file * filp)
{
struct async_struct *info;
int retval, line;
unsigned long page;
line = tty->index;
if ((line < 0) || (line >= NR_PORTS))
return -ENODEV;
retval = get_async_struct(line, &info);
if (retval)
return retval;
tty->driver_data = info;
info->tty = tty;
#ifdef SIMSERIAL_DEBUG
printk("rs_open %s, count = %d\n", tty->name, info->state->count);
#endif
info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
if (!tmp_buf) {
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
if (tmp_buf)
free_page(page);
else
tmp_buf = (unsigned char *) page;
}
/*
* If the port is the middle of closing, bail out now
*/
if (tty_hung_up_p(filp) ||
(info->flags & ASYNC_CLOSING)) {
if (info->flags & ASYNC_CLOSING)
interruptible_sleep_on(&info->close_wait);
#ifdef SERIAL_DO_RESTART
return ((info->flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS);
#else
return -EAGAIN;
#endif
}
/*
* Start up serial port
*/
retval = startup(info);
if (retval) {
return retval;
}
/*
* figure out which console to use (should be one already)
*/
console = console_drivers;
while (console) {
if ((console->flags & CON_ENABLED) && console->write) break;
console = console->next;
}
#ifdef SIMSERIAL_DEBUG
printk("rs_open ttys%d successful\n", info->line);
#endif
return 0;
}
/*
* /proc fs routines....
*/
static inline void line_info(struct seq_file *m, struct serial_state *state)
{
seq_printf(m, "%d: uart:%s port:%lX irq:%d\n",
state->line, uart_config[state->type].name,
state->port, state->irq);
}
static int rs_proc_show(struct seq_file *m, void *v)
{
int i;
seq_printf(m, "simserinfo:1.0 driver:%s\n", serial_version);
for (i = 0; i < NR_PORTS; i++)
line_info(m, &rs_table[i]);
return 0;
}
static int rs_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, rs_proc_show, NULL);
}
static const struct file_operations rs_proc_fops = {
.owner = THIS_MODULE,
.open = rs_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* ---------------------------------------------------------------------
* rs_init() and friends
*
* rs_init() is called at boot-time to initialize the serial driver.
* ---------------------------------------------------------------------
*/
/*
* This routine prints out the appropriate serial driver version
* number, and identifies which options were configured into this
* driver.
*/
static inline void show_serial_version(void)
{
printk(KERN_INFO "%s version %s with", serial_name, serial_version);
printk(KERN_INFO " no serial options enabled\n");
}
static const struct tty_operations hp_ops = {
.open = rs_open,
.close = rs_close,
.write = rs_write,
.put_char = rs_put_char,
.flush_chars = rs_flush_chars,
.write_room = rs_write_room,
.chars_in_buffer = rs_chars_in_buffer,
.flush_buffer = rs_flush_buffer,
.ioctl = rs_ioctl,
.throttle = rs_throttle,
.unthrottle = rs_unthrottle,
.send_xchar = rs_send_xchar,
.set_termios = rs_set_termios,
.stop = rs_stop,
.start = rs_start,
.hangup = rs_hangup,
.wait_until_sent = rs_wait_until_sent,
.proc_fops = &rs_proc_fops,
};
/*
* The serial driver boot-time initialization code!
*/
static int __init
simrs_init (void)
{
int i, rc;
struct serial_state *state;
if (!ia64_platform_is("hpsim"))
return -ENODEV;
hp_simserial_driver = alloc_tty_driver(1);
if (!hp_simserial_driver)
return -ENOMEM;
show_serial_version();
/* Initialize the tty_driver structure */
hp_simserial_driver->owner = THIS_MODULE;
hp_simserial_driver->driver_name = "simserial";
hp_simserial_driver->name = "ttyS";
hp_simserial_driver->major = TTY_MAJOR;
hp_simserial_driver->minor_start = 64;
hp_simserial_driver->type = TTY_DRIVER_TYPE_SERIAL;
hp_simserial_driver->subtype = SERIAL_TYPE_NORMAL;
hp_simserial_driver->init_termios = tty_std_termios;
hp_simserial_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
hp_simserial_driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(hp_simserial_driver, &hp_ops);
/*
* Let's have a little bit of fun !
*/
for (i = 0, state = rs_table; i < NR_PORTS; i++,state++) {
if (state->type == PORT_UNKNOWN) continue;
if (!state->irq) {
if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0)
panic("%s: out of interrupt vectors!\n",
__func__);
state->irq = rc;
ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq);
}
printk(KERN_INFO "ttyS%d at 0x%04lx (irq = %d) is a %s\n",
state->line,
state->port, state->irq,
uart_config[state->type].name);
}
if (tty_register_driver(hp_simserial_driver))
panic("Couldn't register simserial driver\n");
return 0;
}
#ifndef MODULE
__initcall(simrs_init);
#endif
| gpl-2.0 |
lgs3137/android_kernel_samsung_msm8660-common | arch/arm/mach-s3c2440/mach-rx3715.c | 2981 | 5098 | /* linux/arch/arm/mach-s3c2440/mach-rx3715.c
*
* Copyright (c) 2003-2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* http://www.handhelds.org/projects/rx3715.html
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/memblock.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/sysdev.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
#include <mach/regs-gpio.h>
#include <mach/regs-lcd.h>
#include <mach/h1940.h>
#include <plat/nand.h>
#include <mach/fb.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/pm.h>
static struct map_desc rx3715_iodesc[] __initdata = {
/* dump ISA space somewhere unused */
{
.virtual = (u32)S3C24XX_VA_ISA_WORD,
.pfn = __phys_to_pfn(S3C2410_CS3),
.length = SZ_1M,
.type = MT_DEVICE,
}, {
.virtual = (u32)S3C24XX_VA_ISA_BYTE,
.pfn = __phys_to_pfn(S3C2410_CS3),
.length = SZ_1M,
.type = MT_DEVICE,
},
};
static struct s3c24xx_uart_clksrc rx3715_serial_clocks[] = {
[0] = {
.name = "fclk",
.divisor = 0,
.min_baud = 0,
.max_baud = 0,
}
};
static struct s3c2410_uartcfg rx3715_uartcfgs[] = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = 0x3c5,
.ulcon = 0x03,
.ufcon = 0x51,
.clocks = rx3715_serial_clocks,
.clocks_size = ARRAY_SIZE(rx3715_serial_clocks),
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = 0x3c5,
.ulcon = 0x03,
.ufcon = 0x00,
.clocks = rx3715_serial_clocks,
.clocks_size = ARRAY_SIZE(rx3715_serial_clocks),
},
/* IR port */
[2] = {
.hwport = 2,
.uart_flags = UPF_CONS_FLOW,
.ucon = 0x3c5,
.ulcon = 0x43,
.ufcon = 0x51,
.clocks = rx3715_serial_clocks,
.clocks_size = ARRAY_SIZE(rx3715_serial_clocks),
}
};
/* framebuffer lcd controller information */
static struct s3c2410fb_display rx3715_lcdcfg __initdata = {
.lcdcon5 = S3C2410_LCDCON5_INVVLINE |
S3C2410_LCDCON5_FRM565 |
S3C2410_LCDCON5_HWSWP,
.type = S3C2410_LCDCON1_TFT,
.width = 240,
.height = 320,
.pixclock = 260000,
.xres = 240,
.yres = 320,
.bpp = 16,
.left_margin = 36,
.right_margin = 36,
.hsync_len = 8,
.upper_margin = 6,
.lower_margin = 7,
.vsync_len = 3,
};
static struct s3c2410fb_mach_info rx3715_fb_info __initdata = {
.displays = &rx3715_lcdcfg,
.num_displays = 1,
.default_display = 0,
.lpcsel = 0xf82,
.gpccon = 0xaa955699,
.gpccon_mask = 0xffc003cc,
.gpcup = 0x0000ffff,
.gpcup_mask = 0xffffffff,
.gpdcon = 0xaa95aaa1,
.gpdcon_mask = 0xffc0fff0,
.gpdup = 0x0000faff,
.gpdup_mask = 0xffffffff,
};
static struct mtd_partition __initdata rx3715_nand_part[] = {
[0] = {
.name = "Whole Flash",
.offset = 0,
.size = MTDPART_SIZ_FULL,
.mask_flags = MTD_WRITEABLE,
}
};
static struct s3c2410_nand_set __initdata rx3715_nand_sets[] = {
[0] = {
.name = "Internal",
.nr_chips = 1,
.nr_partitions = ARRAY_SIZE(rx3715_nand_part),
.partitions = rx3715_nand_part,
},
};
static struct s3c2410_platform_nand __initdata rx3715_nand_info = {
.tacls = 25,
.twrph0 = 50,
.twrph1 = 15,
.nr_sets = ARRAY_SIZE(rx3715_nand_sets),
.sets = rx3715_nand_sets,
};
static struct platform_device *rx3715_devices[] __initdata = {
&s3c_device_ohci,
&s3c_device_lcd,
&s3c_device_wdt,
&s3c_device_i2c0,
&s3c_device_iis,
&s3c_device_nand,
};
static void __init rx3715_map_io(void)
{
s3c24xx_init_io(rx3715_iodesc, ARRAY_SIZE(rx3715_iodesc));
s3c24xx_init_clocks(16934000);
s3c24xx_init_uarts(rx3715_uartcfgs, ARRAY_SIZE(rx3715_uartcfgs));
}
/* H1940 and RX3715 need to reserve this for suspend */
static void __init rx3715_reserve(void)
{
memblock_reserve(0x30003000, 0x1000);
memblock_reserve(0x30081000, 0x1000);
}
static void __init rx3715_init_irq(void)
{
s3c24xx_init_irq();
}
static void __init rx3715_init_machine(void)
{
#ifdef CONFIG_PM_H1940
memcpy(phys_to_virt(H1940_SUSPEND_RESUMEAT), h1940_pm_return, 1024);
#endif
s3c_pm_init();
s3c_nand_set_platdata(&rx3715_nand_info);
s3c24xx_fb_set_platdata(&rx3715_fb_info);
platform_add_devices(rx3715_devices, ARRAY_SIZE(rx3715_devices));
}
MACHINE_START(RX3715, "IPAQ-RX3715")
/* Maintainer: Ben Dooks <ben-linux@fluff.org> */
.boot_params = S3C2410_SDRAM_PA + 0x100,
.map_io = rx3715_map_io,
.reserve = rx3715_reserve,
.init_irq = rx3715_init_irq,
.init_machine = rx3715_init_machine,
.timer = &s3c24xx_timer,
MACHINE_END
| gpl-2.0 |
dsb9938/HTC_One_max | arch/mips/mm/fault.c | 4005 | 8083 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995 - 2000 by Ralf Baechle
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <asm/branch.h>
#include <asm/mmu_context.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/highmem.h> /* For VMALLOC_END */
#include <linux/kdebug.h>
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write,
unsigned long address)
{
struct vm_area_struct * vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
const int field = sizeof(unsigned long) * 2;
siginfo_t info;
int fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(write ? FAULT_FLAG_WRITE : 0);
#if 0
printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
current->comm, current->pid, field, address, write,
field, regs->cp0_epc);
#endif
#ifdef CONFIG_KPROBES
/*
* This is to notify the fault handler of the kprobes. The
* exception code is redundant as it is also carried in REGS,
* but we pass it anyhow.
*/
if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
(regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP)
return;
#endif
info.si_code = SEGV_MAPERR;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
#ifdef CONFIG_64BIT
# define VMALLOC_FAULT_TARGET no_context
#else
# define VMALLOC_FAULT_TARGET vmalloc_fault
#endif
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
goto VMALLOC_FAULT_TARGET;
#ifdef MODULE_START
if (unlikely(address >= MODULE_START && address < MODULE_END))
goto VMALLOC_FAULT_TARGET;
#endif
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto bad_area_nosemaphore;
retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
info.si_code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
if (kernel_uses_smartmips_rixi) {
if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
raw_smp_processor_id(),
current->comm, current->pid,
field, address, write,
field, regs->cp0_epc);
#endif
goto bad_area;
}
if (!(vma->vm_flags & VM_READ)) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
raw_smp_processor_id(),
current->comm, current->pid,
field, address, write,
field, regs->cp0_epc);
#endif
goto bad_area;
}
} else {
if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
goto bad_area;
}
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
tsk->maj_flt++;
} else {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
tsk->min_flt++;
}
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;
/*
* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
}
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
tsk->thread.cp0_badvaddr = address;
tsk->thread.error_code = write;
#if 0
printk("do_page_fault() #2: sending SIGSEGV to %s for "
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address,
field, (unsigned long) regs->cp0_epc,
field, (unsigned long) regs->regs[31]);
#endif
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* info.si_code has been set above */
info.si_addr = (void __user *) address;
force_sig_info(SIGSEGV, &info, tsk);
return;
}
no_context:
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
current->thread.cp0_baduaddr = address;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
"virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
raw_smp_processor_id(), field, address, field, regs->cp0_epc,
field, regs->regs[31]);
die("Oops", regs);
out_of_memory:
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
up_read(&mm->mmap_sem);
pagefault_out_of_memory();
return;
do_sigbus:
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
else
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
#if 0
printk("do_page_fault() #3: sending SIGBUS to %s for "
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address,
field, (unsigned long) regs->cp0_epc,
field, (unsigned long) regs->regs[31]);
#endif
tsk->thread.cp0_badvaddr = address;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void __user *) address;
force_sig_info(SIGBUS, &info, tsk);
return;
#ifndef CONFIG_64BIT
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
int offset = __pgd_offset(address);
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
pgd_k = init_mm.pgd + offset;
if (!pgd_present(*pgd_k))
goto no_context;
set_pgd(pgd, *pgd_k);
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
goto no_context;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
goto no_context;
set_pmd(pmd, *pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto no_context;
return;
}
#endif
}
| gpl-2.0 |
tudorsirb/lge_kernel_p700 | arch/arm/mach-omap1/timer.c | 4773 | 4073 | /**
* OMAP1 Dual-Mode Timers - platform device registration
*
* Contains first level initialization routines which internally
* generates timer device information and registers with linux
* device model. It also has low level function to chnage the timer
* input clock source.
*
* Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Tarun Kanti DebBarma <tarun.kanti@ti.com>
* Thara Gopinath <thara@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <mach/irqs.h>
#include <plat/dmtimer.h>
#define OMAP1610_GPTIMER1_BASE 0xfffb1400
#define OMAP1610_GPTIMER2_BASE 0xfffb1c00
#define OMAP1610_GPTIMER3_BASE 0xfffb2400
#define OMAP1610_GPTIMER4_BASE 0xfffb2c00
#define OMAP1610_GPTIMER5_BASE 0xfffb3400
#define OMAP1610_GPTIMER6_BASE 0xfffb3c00
#define OMAP1610_GPTIMER7_BASE 0xfffb7400
#define OMAP1610_GPTIMER8_BASE 0xfffbd400
#define OMAP1_DM_TIMER_COUNT 8
static int omap1_dm_timer_set_src(struct platform_device *pdev,
int source)
{
int n = (pdev->id - 1) << 1;
u32 l;
l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
l |= source << n;
omap_writel(l, MOD_CONF_CTRL_1);
return 0;
}
int __init omap1_dm_timer_init(void)
{
int i;
int ret;
struct dmtimer_platform_data *pdata;
struct platform_device *pdev;
if (!cpu_is_omap16xx())
return 0;
for (i = 1; i <= OMAP1_DM_TIMER_COUNT; i++) {
struct resource res[2];
u32 base, irq;
switch (i) {
case 1:
base = OMAP1610_GPTIMER1_BASE;
irq = INT_1610_GPTIMER1;
break;
case 2:
base = OMAP1610_GPTIMER2_BASE;
irq = INT_1610_GPTIMER2;
break;
case 3:
base = OMAP1610_GPTIMER3_BASE;
irq = INT_1610_GPTIMER3;
break;
case 4:
base = OMAP1610_GPTIMER4_BASE;
irq = INT_1610_GPTIMER4;
break;
case 5:
base = OMAP1610_GPTIMER5_BASE;
irq = INT_1610_GPTIMER5;
break;
case 6:
base = OMAP1610_GPTIMER6_BASE;
irq = INT_1610_GPTIMER6;
break;
case 7:
base = OMAP1610_GPTIMER7_BASE;
irq = INT_1610_GPTIMER7;
break;
case 8:
base = OMAP1610_GPTIMER8_BASE;
irq = INT_1610_GPTIMER8;
break;
default:
/*
* not supposed to reach here.
* this is to remove warning.
*/
return -EINVAL;
}
pdev = platform_device_alloc("omap_timer", i);
if (!pdev) {
pr_err("%s: Failed to device alloc for dmtimer%d\n",
__func__, i);
return -ENOMEM;
}
memset(res, 0, 2 * sizeof(struct resource));
res[0].start = base;
res[0].end = base + 0x46;
res[0].flags = IORESOURCE_MEM;
res[1].start = irq;
res[1].end = irq;
res[1].flags = IORESOURCE_IRQ;
ret = platform_device_add_resources(pdev, res,
ARRAY_SIZE(res));
if (ret) {
dev_err(&pdev->dev, "%s: Failed to add resources.\n",
__func__);
goto err_free_pdev;
}
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev, "%s: Failed to allocate pdata.\n",
__func__);
ret = -ENOMEM;
goto err_free_pdata;
}
pdata->set_timer_src = omap1_dm_timer_set_src;
pdata->needs_manual_reset = 1;
ret = platform_device_add_data(pdev, pdata, sizeof(*pdata));
if (ret) {
dev_err(&pdev->dev, "%s: Failed to add platform data.\n",
__func__);
goto err_free_pdata;
}
ret = platform_device_add(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: Failed to add platform device.\n",
__func__);
goto err_free_pdata;
}
dev_dbg(&pdev->dev, " Registered.\n");
}
return 0;
err_free_pdata:
kfree(pdata);
err_free_pdev:
platform_device_unregister(pdev);
return ret;
}
arch_initcall(omap1_dm_timer_init);
| gpl-2.0 |
mdeejay/android_kernel_monarudo | drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c | 5029 | 4701 | /*
* Copyright (c) 2009,2010 One Laptop per Child
*
* This program is free software. You can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/gpio.h>
#include <asm/olpc.h>
/* TODO: this eventually belongs in linux/vx855.h */
#define NR_VX855_GPI 14
#define NR_VX855_GPO 13
#define NR_VX855_GPIO 15
#define VX855_GPI(n) (n)
#define VX855_GPO(n) (NR_VX855_GPI + (n))
#define VX855_GPIO(n) (NR_VX855_GPI + NR_VX855_GPO + (n))
#include "olpc_dcon.h"
/* Hardware setup on the XO 1.5:
* DCONLOAD connects to VX855_GPIO1 (not SMBCK2)
* DCONBLANK connects to VX855_GPIO8 (not SSPICLK) unused in driver
* DCONSTAT0 connects to VX855_GPI10 (not SSPISDI)
* DCONSTAT1 connects to VX855_GPI11 (not nSSPISS)
* DCONIRQ connects to VX855_GPIO12
* DCONSMBDATA connects to VX855 graphics CRTSPD
* DCONSMBCLK connects to VX855 graphics CRTSPCLK
*/
#define VX855_GENL_PURPOSE_OUTPUT 0x44c /* PMIO_Rx4c-4f */
#define VX855_GPI_STATUS_CHG 0x450 /* PMIO_Rx50 */
#define VX855_GPI_SCI_SMI 0x452 /* PMIO_Rx52 */
#define BIT_GPIO12 0x40
#define PREFIX "OLPC DCON:"
static void dcon_clear_irq(void)
{
/* irq status will appear in PMIO_Rx50[6] (RW1C) on gpio12 */
outb(BIT_GPIO12, VX855_GPI_STATUS_CHG);
}
static int dcon_was_irq(void)
{
u_int8_t tmp;
/* irq status will appear in PMIO_Rx50[6] on gpio12 */
tmp = inb(VX855_GPI_STATUS_CHG);
return !!(tmp & BIT_GPIO12);
return 0;
}
static int dcon_init_xo_1_5(struct dcon_priv *dcon)
{
unsigned int irq;
u_int8_t tmp;
struct pci_dev *pdev;
pdev = pci_get_device(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_VX855, NULL);
if (!pdev) {
printk(KERN_ERR "cannot find VX855 PCI ID\n");
return 1;
}
pci_read_config_byte(pdev, 0x95, &tmp);
pci_write_config_byte(pdev, 0x95, tmp|0x0c);
/* Set GPIO8 to GPIO mode, not SSPICLK */
pci_read_config_byte(pdev, 0xe3, &tmp);
pci_write_config_byte(pdev, 0xe3, tmp | 0x04);
/* Set GPI10/GPI11 to GPI mode, not SSPISDI/SSPISS */
pci_read_config_byte(pdev, 0xe4, &tmp);
pci_write_config_byte(pdev, 0xe4, tmp|0x08);
/* clear PMU_RxE1[6] to select SCI on GPIO12 */
/* clear PMU_RxE0[6] to choose falling edge */
pci_read_config_byte(pdev, 0xe1, &tmp);
pci_write_config_byte(pdev, 0xe1, tmp & ~BIT_GPIO12);
pci_read_config_byte(pdev, 0xe0, &tmp);
pci_write_config_byte(pdev, 0xe0, tmp & ~BIT_GPIO12);
dcon_clear_irq();
/* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
/* Determine the current state of DCONLOAD, likely set by firmware */
/* GPIO1 */
dcon->curr_src = (inl(VX855_GENL_PURPOSE_OUTPUT) & 0x1000) ?
DCON_SOURCE_CPU : DCON_SOURCE_DCON;
dcon->pending_src = dcon->curr_src;
pci_dev_put(pdev);
/* we're sharing the IRQ with ACPI */
irq = acpi_gbl_FADT.sci_interrupt;
if (request_irq(irq, &dcon_interrupt, IRQF_SHARED, "DCON", dcon)) {
printk(KERN_ERR PREFIX "DCON (IRQ%d) allocation failed\n", irq);
return 1;
}
return 0;
}
static void set_i2c_line(int sda, int scl)
{
unsigned char tmp;
unsigned int port = 0x26;
/* FIXME: This directly accesses the CRT GPIO controller !!! */
outb(port, 0x3c4);
tmp = inb(0x3c5);
if (scl)
tmp |= 0x20;
else
tmp &= ~0x20;
if (sda)
tmp |= 0x10;
else
tmp &= ~0x10;
tmp |= 0x01;
outb(port, 0x3c4);
outb(tmp, 0x3c5);
}
static void dcon_wiggle_xo_1_5(void)
{
int x;
/*
* According to HiMax, when powering the DCON up we should hold
* SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON
* state machine to reset to a (sane) initial state. Mitch Bradley
* did some testing and discovered that holding for 16 SMB_CLK cycles
* worked a lot more reliably, so that's what we do here.
*/
set_i2c_line(1, 1);
for (x = 0; x < 16; x++) {
udelay(5);
set_i2c_line(1, 0);
udelay(5);
set_i2c_line(1, 1);
}
udelay(5);
/* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
}
static void dcon_set_dconload_xo_1_5(int val)
{
gpio_set_value(VX855_GPIO(1), val);
}
static int dcon_read_status_xo_1_5(u8 *status)
{
if (!dcon_was_irq())
return -1;
/* i believe this is the same as "inb(0x44b) & 3" */
*status = gpio_get_value(VX855_GPI(10));
*status |= gpio_get_value(VX855_GPI(11)) << 1;
dcon_clear_irq();
return 0;
}
struct dcon_platform_data dcon_pdata_xo_1_5 = {
.init = dcon_init_xo_1_5,
.bus_stabilize_wiggle = dcon_wiggle_xo_1_5,
.set_dconload = dcon_set_dconload_xo_1_5,
.read_status = dcon_read_status_xo_1_5,
};
| gpl-2.0 |
bhargz/kernel_pico_3.0.16 | drivers/input/mouse/sermouse.c | 9893 | 9069 | /*
* Copyright (c) 1999-2001 Vojtech Pavlik
*/
/*
* Serial mouse driver for Linux
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/init.h>
#define DRIVER_DESC "Serial mouse driver"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static const char *sermouse_protocols[] = { "None", "Mouse Systems Mouse", "Sun Mouse", "Microsoft Mouse",
"Logitech M+ Mouse", "Microsoft MZ Mouse", "Logitech MZ+ Mouse",
"Logitech MZ++ Mouse"};
struct sermouse {
struct input_dev *dev;
signed char buf[8];
unsigned char count;
unsigned char type;
unsigned long last;
char phys[32];
};
/*
* sermouse_process_msc() analyzes the incoming MSC/Sun bytestream and
* applies some prediction to the data, resulting in 96 updates per
* second, which is as good as a PS/2 or USB mouse.
*/
static void sermouse_process_msc(struct sermouse *sermouse, signed char data)
{
struct input_dev *dev = sermouse->dev;
signed char *buf = sermouse->buf;
switch (sermouse->count) {
case 0:
if ((data & 0xf8) != 0x80)
return;
input_report_key(dev, BTN_LEFT, !(data & 4));
input_report_key(dev, BTN_RIGHT, !(data & 1));
input_report_key(dev, BTN_MIDDLE, !(data & 2));
break;
case 1:
case 3:
input_report_rel(dev, REL_X, data / 2);
input_report_rel(dev, REL_Y, -buf[1]);
buf[0] = data - data / 2;
break;
case 2:
case 4:
input_report_rel(dev, REL_X, buf[0]);
input_report_rel(dev, REL_Y, buf[1] - data);
buf[1] = data / 2;
break;
}
input_sync(dev);
if (++sermouse->count == 5)
sermouse->count = 0;
}
/*
* sermouse_process_ms() anlyzes the incoming MS(Z/+/++) bytestream and
* generates events. With prediction it gets 80 updates/sec, assuming
* standard 3-byte packets and 1200 bps.
*/
static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
{
struct input_dev *dev = sermouse->dev;
signed char *buf = sermouse->buf;
if (data & 0x40)
sermouse->count = 0;
else if (sermouse->count == 0)
return;
switch (sermouse->count) {
case 0:
buf[1] = data;
input_report_key(dev, BTN_LEFT, (data >> 5) & 1);
input_report_key(dev, BTN_RIGHT, (data >> 4) & 1);
break;
case 1:
buf[2] = data;
data = (signed char) (((buf[1] << 6) & 0xc0) | (data & 0x3f));
input_report_rel(dev, REL_X, data / 2);
input_report_rel(dev, REL_Y, buf[4]);
buf[3] = data - data / 2;
break;
case 2:
/* Guessing the state of the middle button on 3-button MS-protocol mice - ugly. */
if ((sermouse->type == SERIO_MS) && !data && !buf[2] && !((buf[0] & 0xf0) ^ buf[1]))
input_report_key(dev, BTN_MIDDLE, !test_bit(BTN_MIDDLE, dev->key));
buf[0] = buf[1];
data = (signed char) (((buf[1] << 4) & 0xc0) | (data & 0x3f));
input_report_rel(dev, REL_X, buf[3]);
input_report_rel(dev, REL_Y, data - buf[4]);
buf[4] = data / 2;
break;
case 3:
switch (sermouse->type) {
case SERIO_MS:
sermouse->type = SERIO_MP;
case SERIO_MP:
if ((data >> 2) & 3) break; /* M++ Wireless Extension packet. */
input_report_key(dev, BTN_MIDDLE, (data >> 5) & 1);
input_report_key(dev, BTN_SIDE, (data >> 4) & 1);
break;
case SERIO_MZP:
case SERIO_MZPP:
input_report_key(dev, BTN_SIDE, (data >> 5) & 1);
case SERIO_MZ:
input_report_key(dev, BTN_MIDDLE, (data >> 4) & 1);
input_report_rel(dev, REL_WHEEL, (data & 8) - (data & 7));
break;
}
break;
case 4:
case 6: /* MZ++ packet type. We can get these bytes for M++ too but we ignore them later. */
buf[1] = (data >> 2) & 0x0f;
break;
case 5:
case 7: /* Ignore anything besides MZ++ */
if (sermouse->type != SERIO_MZPP)
break;
switch (buf[1]) {
case 1: /* Extra mouse info */
input_report_key(dev, BTN_SIDE, (data >> 4) & 1);
input_report_key(dev, BTN_EXTRA, (data >> 5) & 1);
input_report_rel(dev, data & 0x80 ? REL_HWHEEL : REL_WHEEL, (data & 7) - (data & 8));
break;
default: /* We don't decode anything else yet. */
printk(KERN_WARNING
"sermouse.c: Received MZ++ packet %x, don't know how to handle.\n", buf[1]);
break;
}
break;
}
input_sync(dev);
sermouse->count++;
}
/*
* sermouse_interrupt() handles incoming characters, either gathering them into
* packets or passing them to the command routine as command output.
*/
static irqreturn_t sermouse_interrupt(struct serio *serio,
unsigned char data, unsigned int flags)
{
struct sermouse *sermouse = serio_get_drvdata(serio);
if (time_after(jiffies, sermouse->last + HZ/10))
sermouse->count = 0;
sermouse->last = jiffies;
if (sermouse->type > SERIO_SUN)
sermouse_process_ms(sermouse, data);
else
sermouse_process_msc(sermouse, data);
return IRQ_HANDLED;
}
/*
* sermouse_disconnect() cleans up after we don't want talk
* to the mouse anymore.
*/
static void sermouse_disconnect(struct serio *serio)
{
struct sermouse *sermouse = serio_get_drvdata(serio);
serio_close(serio);
serio_set_drvdata(serio, NULL);
input_unregister_device(sermouse->dev);
kfree(sermouse);
}
/*
* sermouse_connect() is a callback form the serio module when
* an unhandled serio port is found.
*/
static int sermouse_connect(struct serio *serio, struct serio_driver *drv)
{
struct sermouse *sermouse;
struct input_dev *input_dev;
unsigned char c = serio->id.extra;
int err = -ENOMEM;
sermouse = kzalloc(sizeof(struct sermouse), GFP_KERNEL);
input_dev = input_allocate_device();
if (!sermouse || !input_dev)
goto fail1;
sermouse->dev = input_dev;
snprintf(sermouse->phys, sizeof(sermouse->phys), "%s/input0", serio->phys);
sermouse->type = serio->id.proto;
input_dev->name = sermouse_protocols[sermouse->type];
input_dev->phys = sermouse->phys;
input_dev->id.bustype = BUS_RS232;
input_dev->id.vendor = sermouse->type;
input_dev->id.product = c;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &serio->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
input_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) |
BIT_MASK(BTN_RIGHT);
input_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
if (c & 0x01) set_bit(BTN_MIDDLE, input_dev->keybit);
if (c & 0x02) set_bit(BTN_SIDE, input_dev->keybit);
if (c & 0x04) set_bit(BTN_EXTRA, input_dev->keybit);
if (c & 0x10) set_bit(REL_WHEEL, input_dev->relbit);
if (c & 0x20) set_bit(REL_HWHEEL, input_dev->relbit);
serio_set_drvdata(serio, sermouse);
err = serio_open(serio, drv);
if (err)
goto fail2;
err = input_register_device(sermouse->dev);
if (err)
goto fail3;
return 0;
fail3: serio_close(serio);
fail2: serio_set_drvdata(serio, NULL);
fail1: input_free_device(input_dev);
kfree(sermouse);
return err;
}
static struct serio_device_id sermouse_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_MSC,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_SUN,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MS,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MP,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MZ,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MZP,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MZPP,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{ 0 }
};
MODULE_DEVICE_TABLE(serio, sermouse_serio_ids);
static struct serio_driver sermouse_drv = {
.driver = {
.name = "sermouse",
},
.description = DRIVER_DESC,
.id_table = sermouse_serio_ids,
.interrupt = sermouse_interrupt,
.connect = sermouse_connect,
.disconnect = sermouse_disconnect,
};
static int __init sermouse_init(void)
{
return serio_register_driver(&sermouse_drv);
}
static void __exit sermouse_exit(void)
{
serio_unregister_driver(&sermouse_drv);
}
module_init(sermouse_init);
module_exit(sermouse_exit);
| gpl-2.0 |
Martix/shr-fcse | drivers/input/mouse/sermouse.c | 9893 | 9069 | /*
* Copyright (c) 1999-2001 Vojtech Pavlik
*/
/*
* Serial mouse driver for Linux
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/init.h>
#define DRIVER_DESC "Serial mouse driver"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static const char *sermouse_protocols[] = { "None", "Mouse Systems Mouse", "Sun Mouse", "Microsoft Mouse",
"Logitech M+ Mouse", "Microsoft MZ Mouse", "Logitech MZ+ Mouse",
"Logitech MZ++ Mouse"};
struct sermouse {
struct input_dev *dev;
signed char buf[8];
unsigned char count;
unsigned char type;
unsigned long last;
char phys[32];
};
/*
* sermouse_process_msc() analyzes the incoming MSC/Sun bytestream and
* applies some prediction to the data, resulting in 96 updates per
* second, which is as good as a PS/2 or USB mouse.
*/
static void sermouse_process_msc(struct sermouse *sermouse, signed char data)
{
struct input_dev *dev = sermouse->dev;
signed char *buf = sermouse->buf;
switch (sermouse->count) {
case 0:
if ((data & 0xf8) != 0x80)
return;
input_report_key(dev, BTN_LEFT, !(data & 4));
input_report_key(dev, BTN_RIGHT, !(data & 1));
input_report_key(dev, BTN_MIDDLE, !(data & 2));
break;
case 1:
case 3:
input_report_rel(dev, REL_X, data / 2);
input_report_rel(dev, REL_Y, -buf[1]);
buf[0] = data - data / 2;
break;
case 2:
case 4:
input_report_rel(dev, REL_X, buf[0]);
input_report_rel(dev, REL_Y, buf[1] - data);
buf[1] = data / 2;
break;
}
input_sync(dev);
if (++sermouse->count == 5)
sermouse->count = 0;
}
/*
* sermouse_process_ms() anlyzes the incoming MS(Z/+/++) bytestream and
* generates events. With prediction it gets 80 updates/sec, assuming
* standard 3-byte packets and 1200 bps.
*/
static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
{
struct input_dev *dev = sermouse->dev;
signed char *buf = sermouse->buf;
if (data & 0x40)
sermouse->count = 0;
else if (sermouse->count == 0)
return;
switch (sermouse->count) {
case 0:
buf[1] = data;
input_report_key(dev, BTN_LEFT, (data >> 5) & 1);
input_report_key(dev, BTN_RIGHT, (data >> 4) & 1);
break;
case 1:
buf[2] = data;
data = (signed char) (((buf[1] << 6) & 0xc0) | (data & 0x3f));
input_report_rel(dev, REL_X, data / 2);
input_report_rel(dev, REL_Y, buf[4]);
buf[3] = data - data / 2;
break;
case 2:
/* Guessing the state of the middle button on 3-button MS-protocol mice - ugly. */
if ((sermouse->type == SERIO_MS) && !data && !buf[2] && !((buf[0] & 0xf0) ^ buf[1]))
input_report_key(dev, BTN_MIDDLE, !test_bit(BTN_MIDDLE, dev->key));
buf[0] = buf[1];
data = (signed char) (((buf[1] << 4) & 0xc0) | (data & 0x3f));
input_report_rel(dev, REL_X, buf[3]);
input_report_rel(dev, REL_Y, data - buf[4]);
buf[4] = data / 2;
break;
case 3:
switch (sermouse->type) {
case SERIO_MS:
sermouse->type = SERIO_MP;
case SERIO_MP:
if ((data >> 2) & 3) break; /* M++ Wireless Extension packet. */
input_report_key(dev, BTN_MIDDLE, (data >> 5) & 1);
input_report_key(dev, BTN_SIDE, (data >> 4) & 1);
break;
case SERIO_MZP:
case SERIO_MZPP:
input_report_key(dev, BTN_SIDE, (data >> 5) & 1);
case SERIO_MZ:
input_report_key(dev, BTN_MIDDLE, (data >> 4) & 1);
input_report_rel(dev, REL_WHEEL, (data & 8) - (data & 7));
break;
}
break;
case 4:
case 6: /* MZ++ packet type. We can get these bytes for M++ too but we ignore them later. */
buf[1] = (data >> 2) & 0x0f;
break;
case 5:
case 7: /* Ignore anything besides MZ++ */
if (sermouse->type != SERIO_MZPP)
break;
switch (buf[1]) {
case 1: /* Extra mouse info */
input_report_key(dev, BTN_SIDE, (data >> 4) & 1);
input_report_key(dev, BTN_EXTRA, (data >> 5) & 1);
input_report_rel(dev, data & 0x80 ? REL_HWHEEL : REL_WHEEL, (data & 7) - (data & 8));
break;
default: /* We don't decode anything else yet. */
printk(KERN_WARNING
"sermouse.c: Received MZ++ packet %x, don't know how to handle.\n", buf[1]);
break;
}
break;
}
input_sync(dev);
sermouse->count++;
}
/*
* sermouse_interrupt() handles incoming characters, either gathering them into
* packets or passing them to the command routine as command output.
*/
static irqreturn_t sermouse_interrupt(struct serio *serio,
unsigned char data, unsigned int flags)
{
struct sermouse *sermouse = serio_get_drvdata(serio);
if (time_after(jiffies, sermouse->last + HZ/10))
sermouse->count = 0;
sermouse->last = jiffies;
if (sermouse->type > SERIO_SUN)
sermouse_process_ms(sermouse, data);
else
sermouse_process_msc(sermouse, data);
return IRQ_HANDLED;
}
/*
* sermouse_disconnect() cleans up after we don't want talk
* to the mouse anymore.
*/
static void sermouse_disconnect(struct serio *serio)
{
struct sermouse *sermouse = serio_get_drvdata(serio);
serio_close(serio);
serio_set_drvdata(serio, NULL);
input_unregister_device(sermouse->dev);
kfree(sermouse);
}
/*
* sermouse_connect() is a callback form the serio module when
* an unhandled serio port is found.
*/
static int sermouse_connect(struct serio *serio, struct serio_driver *drv)
{
struct sermouse *sermouse;
struct input_dev *input_dev;
unsigned char c = serio->id.extra;
int err = -ENOMEM;
sermouse = kzalloc(sizeof(struct sermouse), GFP_KERNEL);
input_dev = input_allocate_device();
if (!sermouse || !input_dev)
goto fail1;
sermouse->dev = input_dev;
snprintf(sermouse->phys, sizeof(sermouse->phys), "%s/input0", serio->phys);
sermouse->type = serio->id.proto;
input_dev->name = sermouse_protocols[sermouse->type];
input_dev->phys = sermouse->phys;
input_dev->id.bustype = BUS_RS232;
input_dev->id.vendor = sermouse->type;
input_dev->id.product = c;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &serio->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
input_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) |
BIT_MASK(BTN_RIGHT);
input_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
if (c & 0x01) set_bit(BTN_MIDDLE, input_dev->keybit);
if (c & 0x02) set_bit(BTN_SIDE, input_dev->keybit);
if (c & 0x04) set_bit(BTN_EXTRA, input_dev->keybit);
if (c & 0x10) set_bit(REL_WHEEL, input_dev->relbit);
if (c & 0x20) set_bit(REL_HWHEEL, input_dev->relbit);
serio_set_drvdata(serio, sermouse);
err = serio_open(serio, drv);
if (err)
goto fail2;
err = input_register_device(sermouse->dev);
if (err)
goto fail3;
return 0;
fail3: serio_close(serio);
fail2: serio_set_drvdata(serio, NULL);
fail1: input_free_device(input_dev);
kfree(sermouse);
return err;
}
static struct serio_device_id sermouse_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_MSC,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_SUN,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MS,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MP,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MZ,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MZP,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MZPP,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{ 0 }
};
MODULE_DEVICE_TABLE(serio, sermouse_serio_ids);
static struct serio_driver sermouse_drv = {
.driver = {
.name = "sermouse",
},
.description = DRIVER_DESC,
.id_table = sermouse_serio_ids,
.interrupt = sermouse_interrupt,
.connect = sermouse_connect,
.disconnect = sermouse_disconnect,
};
static int __init sermouse_init(void)
{
return serio_register_driver(&sermouse_drv);
}
static void __exit sermouse_exit(void)
{
serio_unregister_driver(&sermouse_drv);
}
module_init(sermouse_init);
module_exit(sermouse_exit);
| gpl-2.0 |
LeroViten/LerNex-Ancora-Kernel | drivers/input/mouse/sermouse.c | 9893 | 9069 | /*
* Copyright (c) 1999-2001 Vojtech Pavlik
*/
/*
* Serial mouse driver for Linux
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/init.h>
#define DRIVER_DESC "Serial mouse driver"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static const char *sermouse_protocols[] = { "None", "Mouse Systems Mouse", "Sun Mouse", "Microsoft Mouse",
"Logitech M+ Mouse", "Microsoft MZ Mouse", "Logitech MZ+ Mouse",
"Logitech MZ++ Mouse"};
struct sermouse {
struct input_dev *dev;
signed char buf[8];
unsigned char count;
unsigned char type;
unsigned long last;
char phys[32];
};
/*
* sermouse_process_msc() analyzes the incoming MSC/Sun bytestream and
* applies some prediction to the data, resulting in 96 updates per
* second, which is as good as a PS/2 or USB mouse.
*/
static void sermouse_process_msc(struct sermouse *sermouse, signed char data)
{
struct input_dev *dev = sermouse->dev;
signed char *buf = sermouse->buf;
switch (sermouse->count) {
case 0:
if ((data & 0xf8) != 0x80)
return;
input_report_key(dev, BTN_LEFT, !(data & 4));
input_report_key(dev, BTN_RIGHT, !(data & 1));
input_report_key(dev, BTN_MIDDLE, !(data & 2));
break;
case 1:
case 3:
input_report_rel(dev, REL_X, data / 2);
input_report_rel(dev, REL_Y, -buf[1]);
buf[0] = data - data / 2;
break;
case 2:
case 4:
input_report_rel(dev, REL_X, buf[0]);
input_report_rel(dev, REL_Y, buf[1] - data);
buf[1] = data / 2;
break;
}
input_sync(dev);
if (++sermouse->count == 5)
sermouse->count = 0;
}
/*
* sermouse_process_ms() anlyzes the incoming MS(Z/+/++) bytestream and
* generates events. With prediction it gets 80 updates/sec, assuming
* standard 3-byte packets and 1200 bps.
*/
static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
{
struct input_dev *dev = sermouse->dev;
signed char *buf = sermouse->buf;
if (data & 0x40)
sermouse->count = 0;
else if (sermouse->count == 0)
return;
switch (sermouse->count) {
case 0:
buf[1] = data;
input_report_key(dev, BTN_LEFT, (data >> 5) & 1);
input_report_key(dev, BTN_RIGHT, (data >> 4) & 1);
break;
case 1:
buf[2] = data;
data = (signed char) (((buf[1] << 6) & 0xc0) | (data & 0x3f));
input_report_rel(dev, REL_X, data / 2);
input_report_rel(dev, REL_Y, buf[4]);
buf[3] = data - data / 2;
break;
case 2:
/* Guessing the state of the middle button on 3-button MS-protocol mice - ugly. */
if ((sermouse->type == SERIO_MS) && !data && !buf[2] && !((buf[0] & 0xf0) ^ buf[1]))
input_report_key(dev, BTN_MIDDLE, !test_bit(BTN_MIDDLE, dev->key));
buf[0] = buf[1];
data = (signed char) (((buf[1] << 4) & 0xc0) | (data & 0x3f));
input_report_rel(dev, REL_X, buf[3]);
input_report_rel(dev, REL_Y, data - buf[4]);
buf[4] = data / 2;
break;
case 3:
switch (sermouse->type) {
case SERIO_MS:
sermouse->type = SERIO_MP;
case SERIO_MP:
if ((data >> 2) & 3) break; /* M++ Wireless Extension packet. */
input_report_key(dev, BTN_MIDDLE, (data >> 5) & 1);
input_report_key(dev, BTN_SIDE, (data >> 4) & 1);
break;
case SERIO_MZP:
case SERIO_MZPP:
input_report_key(dev, BTN_SIDE, (data >> 5) & 1);
case SERIO_MZ:
input_report_key(dev, BTN_MIDDLE, (data >> 4) & 1);
input_report_rel(dev, REL_WHEEL, (data & 8) - (data & 7));
break;
}
break;
case 4:
case 6: /* MZ++ packet type. We can get these bytes for M++ too but we ignore them later. */
buf[1] = (data >> 2) & 0x0f;
break;
case 5:
case 7: /* Ignore anything besides MZ++ */
if (sermouse->type != SERIO_MZPP)
break;
switch (buf[1]) {
case 1: /* Extra mouse info */
input_report_key(dev, BTN_SIDE, (data >> 4) & 1);
input_report_key(dev, BTN_EXTRA, (data >> 5) & 1);
input_report_rel(dev, data & 0x80 ? REL_HWHEEL : REL_WHEEL, (data & 7) - (data & 8));
break;
default: /* We don't decode anything else yet. */
printk(KERN_WARNING
"sermouse.c: Received MZ++ packet %x, don't know how to handle.\n", buf[1]);
break;
}
break;
}
input_sync(dev);
sermouse->count++;
}
/*
* sermouse_interrupt() handles incoming characters, either gathering them into
* packets or passing them to the command routine as command output.
*/
static irqreturn_t sermouse_interrupt(struct serio *serio,
unsigned char data, unsigned int flags)
{
struct sermouse *sermouse = serio_get_drvdata(serio);
if (time_after(jiffies, sermouse->last + HZ/10))
sermouse->count = 0;
sermouse->last = jiffies;
if (sermouse->type > SERIO_SUN)
sermouse_process_ms(sermouse, data);
else
sermouse_process_msc(sermouse, data);
return IRQ_HANDLED;
}
/*
* sermouse_disconnect() cleans up after we don't want talk
* to the mouse anymore.
*/
static void sermouse_disconnect(struct serio *serio)
{
struct sermouse *sermouse = serio_get_drvdata(serio);
serio_close(serio);
serio_set_drvdata(serio, NULL);
input_unregister_device(sermouse->dev);
kfree(sermouse);
}
/*
* sermouse_connect() is a callback form the serio module when
* an unhandled serio port is found.
*/
static int sermouse_connect(struct serio *serio, struct serio_driver *drv)
{
struct sermouse *sermouse;
struct input_dev *input_dev;
unsigned char c = serio->id.extra;
int err = -ENOMEM;
sermouse = kzalloc(sizeof(struct sermouse), GFP_KERNEL);
input_dev = input_allocate_device();
if (!sermouse || !input_dev)
goto fail1;
sermouse->dev = input_dev;
snprintf(sermouse->phys, sizeof(sermouse->phys), "%s/input0", serio->phys);
sermouse->type = serio->id.proto;
input_dev->name = sermouse_protocols[sermouse->type];
input_dev->phys = sermouse->phys;
input_dev->id.bustype = BUS_RS232;
input_dev->id.vendor = sermouse->type;
input_dev->id.product = c;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &serio->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
input_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) |
BIT_MASK(BTN_RIGHT);
input_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
if (c & 0x01) set_bit(BTN_MIDDLE, input_dev->keybit);
if (c & 0x02) set_bit(BTN_SIDE, input_dev->keybit);
if (c & 0x04) set_bit(BTN_EXTRA, input_dev->keybit);
if (c & 0x10) set_bit(REL_WHEEL, input_dev->relbit);
if (c & 0x20) set_bit(REL_HWHEEL, input_dev->relbit);
serio_set_drvdata(serio, sermouse);
err = serio_open(serio, drv);
if (err)
goto fail2;
err = input_register_device(sermouse->dev);
if (err)
goto fail3;
return 0;
fail3: serio_close(serio);
fail2: serio_set_drvdata(serio, NULL);
fail1: input_free_device(input_dev);
kfree(sermouse);
return err;
}
static struct serio_device_id sermouse_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_MSC,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_SUN,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MS,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MP,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MZ,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MZP,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_RS232,
.proto = SERIO_MZPP,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{ 0 }
};
MODULE_DEVICE_TABLE(serio, sermouse_serio_ids);
static struct serio_driver sermouse_drv = {
.driver = {
.name = "sermouse",
},
.description = DRIVER_DESC,
.id_table = sermouse_serio_ids,
.interrupt = sermouse_interrupt,
.connect = sermouse_connect,
.disconnect = sermouse_disconnect,
};
static int __init sermouse_init(void)
{
return serio_register_driver(&sermouse_drv);
}
static void __exit sermouse_exit(void)
{
serio_unregister_driver(&sermouse_drv);
}
module_init(sermouse_init);
module_exit(sermouse_exit);
| gpl-2.0 |
invisiblek/android_kernel_oneplus_msm8974 | drivers/input/joystick/twidjoy.c | 9893 | 7696 | /*
* Copyright (c) 2001 Arndt Schoenewald
* Copyright (c) 2000-2001 Vojtech Pavlik
* Copyright (c) 2000 Mark Fletcher
*
* Sponsored by Quelltext AG (http://www.quelltext-ag.de), Dortmund, Germany
*/
/*
* Driver to use Handykey's Twiddler (the first edition, i.e. the one with
* the RS232 interface) as a joystick under Linux
*
* The Twiddler is a one-handed chording keyboard featuring twelve buttons on
* the front, six buttons on the top, and a built-in tilt sensor. The buttons
* on the front, which are grouped as four rows of three buttons, are pressed
* by the four fingers (this implies only one button per row can be held down
* at the same time) and the buttons on the top are for the thumb. The tilt
* sensor delivers X and Y axis data depending on how the Twiddler is held.
* Additional information can be found at http://www.handykey.com.
*
* This driver does not use the Twiddler for its intended purpose, i.e. as
* a chording keyboard, but as a joystick: pressing and releasing a button
* immediately sends a corresponding button event, and tilting it generates
* corresponding ABS_X and ABS_Y events. This turns the Twiddler into a game
* controller with amazing 18 buttons :-)
*
* Note: The Twiddler2 (the successor of the Twiddler that connects directly
* to the PS/2 keyboard and mouse ports) is NOT supported by this driver!
*
* For questions or feedback regarding this driver module please contact:
* Arndt Schoenewald <arndt@quelltext.com>
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/init.h>
#define DRIVER_DESC "Handykey Twiddler keyboard as a joystick driver"
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/*
* Constants.
*/
#define TWIDJOY_MAX_LENGTH 5
static struct twidjoy_button_spec {
int bitshift;
int bitmask;
int buttons[3];
}
twidjoy_buttons[] = {
{ 0, 3, { BTN_A, BTN_B, BTN_C } },
{ 2, 3, { BTN_X, BTN_Y, BTN_Z } },
{ 4, 3, { BTN_TL, BTN_TR, BTN_TR2 } },
{ 6, 3, { BTN_SELECT, BTN_START, BTN_MODE } },
{ 8, 1, { BTN_BASE5 } },
{ 9, 1, { BTN_BASE } },
{ 10, 1, { BTN_BASE3 } },
{ 11, 1, { BTN_BASE4 } },
{ 12, 1, { BTN_BASE2 } },
{ 13, 1, { BTN_BASE6 } },
{ 0, 0, { 0 } }
};
/*
* Per-Twiddler data.
*/
struct twidjoy {
struct input_dev *dev;
int idx;
unsigned char data[TWIDJOY_MAX_LENGTH];
char phys[32];
};
/*
* twidjoy_process_packet() decodes packets the driver receives from the
* Twiddler. It updates the data accordingly.
*/
static void twidjoy_process_packet(struct twidjoy *twidjoy)
{
struct input_dev *dev = twidjoy->dev;
unsigned char *data = twidjoy->data;
struct twidjoy_button_spec *bp;
int button_bits, abs_x, abs_y;
button_bits = ((data[1] & 0x7f) << 7) | (data[0] & 0x7f);
for (bp = twidjoy_buttons; bp->bitmask; bp++) {
int value = (button_bits & (bp->bitmask << bp->bitshift)) >> bp->bitshift;
int i;
for (i = 0; i < bp->bitmask; i++)
input_report_key(dev, bp->buttons[i], i+1 == value);
}
abs_x = ((data[4] & 0x07) << 5) | ((data[3] & 0x7C) >> 2);
if (data[4] & 0x08) abs_x -= 256;
abs_y = ((data[3] & 0x01) << 7) | ((data[2] & 0x7F) >> 0);
if (data[3] & 0x02) abs_y -= 256;
input_report_abs(dev, ABS_X, -abs_x);
input_report_abs(dev, ABS_Y, +abs_y);
input_sync(dev);
}
/*
* twidjoy_interrupt() is called by the low level driver when characters
* are ready for us. We then buffer them for further processing, or call the
* packet processing routine.
*/
static irqreturn_t twidjoy_interrupt(struct serio *serio, unsigned char data, unsigned int flags)
{
struct twidjoy *twidjoy = serio_get_drvdata(serio);
/* All Twiddler packets are 5 bytes. The fact that the first byte
* has a MSB of 0 and all other bytes have a MSB of 1 can be used
* to check and regain sync. */
if ((data & 0x80) == 0)
twidjoy->idx = 0; /* this byte starts a new packet */
else if (twidjoy->idx == 0)
return IRQ_HANDLED; /* wrong MSB -- ignore this byte */
if (twidjoy->idx < TWIDJOY_MAX_LENGTH)
twidjoy->data[twidjoy->idx++] = data;
if (twidjoy->idx == TWIDJOY_MAX_LENGTH) {
twidjoy_process_packet(twidjoy);
twidjoy->idx = 0;
}
return IRQ_HANDLED;
}
/*
* twidjoy_disconnect() is the opposite of twidjoy_connect()
*/
static void twidjoy_disconnect(struct serio *serio)
{
struct twidjoy *twidjoy = serio_get_drvdata(serio);
serio_close(serio);
serio_set_drvdata(serio, NULL);
input_unregister_device(twidjoy->dev);
kfree(twidjoy);
}
/*
* twidjoy_connect() is the routine that is called when someone adds a
* new serio device. It looks for the Twiddler, and if found, registers
* it as an input device.
*/
static int twidjoy_connect(struct serio *serio, struct serio_driver *drv)
{
struct twidjoy_button_spec *bp;
struct twidjoy *twidjoy;
struct input_dev *input_dev;
int err = -ENOMEM;
int i;
twidjoy = kzalloc(sizeof(struct twidjoy), GFP_KERNEL);
input_dev = input_allocate_device();
if (!twidjoy || !input_dev)
goto fail1;
twidjoy->dev = input_dev;
snprintf(twidjoy->phys, sizeof(twidjoy->phys), "%s/input0", serio->phys);
input_dev->name = "Handykey Twiddler";
input_dev->phys = twidjoy->phys;
input_dev->id.bustype = BUS_RS232;
input_dev->id.vendor = SERIO_TWIDJOY;
input_dev->id.product = 0x0001;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &serio->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_set_abs_params(input_dev, ABS_X, -50, 50, 4, 4);
input_set_abs_params(input_dev, ABS_Y, -50, 50, 4, 4);
for (bp = twidjoy_buttons; bp->bitmask; bp++)
for (i = 0; i < bp->bitmask; i++)
set_bit(bp->buttons[i], input_dev->keybit);
serio_set_drvdata(serio, twidjoy);
err = serio_open(serio, drv);
if (err)
goto fail2;
err = input_register_device(twidjoy->dev);
if (err)
goto fail3;
return 0;
fail3: serio_close(serio);
fail2: serio_set_drvdata(serio, NULL);
fail1: input_free_device(input_dev);
kfree(twidjoy);
return err;
}
/*
* The serio driver structure.
*/
static struct serio_device_id twidjoy_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_TWIDJOY,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{ 0 }
};
MODULE_DEVICE_TABLE(serio, twidjoy_serio_ids);
static struct serio_driver twidjoy_drv = {
.driver = {
.name = "twidjoy",
},
.description = DRIVER_DESC,
.id_table = twidjoy_serio_ids,
.interrupt = twidjoy_interrupt,
.connect = twidjoy_connect,
.disconnect = twidjoy_disconnect,
};
/*
* The functions for inserting/removing us as a module.
*/
static int __init twidjoy_init(void)
{
return serio_register_driver(&twidjoy_drv);
}
static void __exit twidjoy_exit(void)
{
serio_unregister_driver(&twidjoy_drv);
}
module_init(twidjoy_init);
module_exit(twidjoy_exit);
| gpl-2.0 |
MoKee/android_kernel_htc_flounder | drivers/ptp/ptp_sysfs.c | 10405 | 5915 | /*
* PTP 1588 clock support - sysfs interface.
*
* Copyright (C) 2010 OMICRON electronics GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/capability.h>
#include "ptp_private.h"
static ssize_t clock_name_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name);
}
#define PTP_SHOW_INT(name) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *page) \
{ \
struct ptp_clock *ptp = dev_get_drvdata(dev); \
return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->info->name); \
}
PTP_SHOW_INT(max_adj);
PTP_SHOW_INT(n_alarm);
PTP_SHOW_INT(n_ext_ts);
PTP_SHOW_INT(n_per_out);
PTP_SHOW_INT(pps);
#define PTP_RO_ATTR(_var, _name) { \
.attr = { .name = __stringify(_name), .mode = 0444 }, \
.show = _var##_show, \
}
struct device_attribute ptp_dev_attrs[] = {
PTP_RO_ATTR(clock_name, clock_name),
PTP_RO_ATTR(max_adj, max_adjustment),
PTP_RO_ATTR(n_alarm, n_alarms),
PTP_RO_ATTR(n_ext_ts, n_external_timestamps),
PTP_RO_ATTR(n_per_out, n_periodic_outputs),
PTP_RO_ATTR(pps, pps_available),
__ATTR_NULL,
};
static ssize_t extts_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
struct ptp_clock_info *ops = ptp->info;
struct ptp_clock_request req = { .type = PTP_CLK_REQ_EXTTS };
int cnt, enable;
int err = -EINVAL;
cnt = sscanf(buf, "%u %d", &req.extts.index, &enable);
if (cnt != 2)
goto out;
if (req.extts.index >= ops->n_ext_ts)
goto out;
err = ops->enable(ops, &req, enable ? 1 : 0);
if (err)
goto out;
return count;
out:
return err;
}
static ssize_t extts_fifo_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
struct timestamp_event_queue *queue = &ptp->tsevq;
struct ptp_extts_event event;
unsigned long flags;
size_t qcnt;
int cnt = 0;
memset(&event, 0, sizeof(event));
if (mutex_lock_interruptible(&ptp->tsevq_mux))
return -ERESTARTSYS;
spin_lock_irqsave(&queue->lock, flags);
qcnt = queue_cnt(queue);
if (qcnt) {
event = queue->buf[queue->head];
queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
}
spin_unlock_irqrestore(&queue->lock, flags);
if (!qcnt)
goto out;
cnt = snprintf(page, PAGE_SIZE, "%u %lld %u\n",
event.index, event.t.sec, event.t.nsec);
out:
mutex_unlock(&ptp->tsevq_mux);
return cnt;
}
static ssize_t period_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
struct ptp_clock_info *ops = ptp->info;
struct ptp_clock_request req = { .type = PTP_CLK_REQ_PEROUT };
int cnt, enable, err = -EINVAL;
cnt = sscanf(buf, "%u %lld %u %lld %u", &req.perout.index,
&req.perout.start.sec, &req.perout.start.nsec,
&req.perout.period.sec, &req.perout.period.nsec);
if (cnt != 5)
goto out;
if (req.perout.index >= ops->n_per_out)
goto out;
enable = req.perout.period.sec || req.perout.period.nsec;
err = ops->enable(ops, &req, enable);
if (err)
goto out;
return count;
out:
return err;
}
static ssize_t pps_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
struct ptp_clock_info *ops = ptp->info;
struct ptp_clock_request req = { .type = PTP_CLK_REQ_PPS };
int cnt, enable;
int err = -EINVAL;
if (!capable(CAP_SYS_TIME))
return -EPERM;
cnt = sscanf(buf, "%d", &enable);
if (cnt != 1)
goto out;
err = ops->enable(ops, &req, enable ? 1 : 0);
if (err)
goto out;
return count;
out:
return err;
}
static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store);
static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL);
static DEVICE_ATTR(period, 0220, NULL, period_store);
static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store);
int ptp_cleanup_sysfs(struct ptp_clock *ptp)
{
struct device *dev = ptp->dev;
struct ptp_clock_info *info = ptp->info;
if (info->n_ext_ts) {
device_remove_file(dev, &dev_attr_extts_enable);
device_remove_file(dev, &dev_attr_fifo);
}
if (info->n_per_out)
device_remove_file(dev, &dev_attr_period);
if (info->pps)
device_remove_file(dev, &dev_attr_pps_enable);
return 0;
}
int ptp_populate_sysfs(struct ptp_clock *ptp)
{
struct device *dev = ptp->dev;
struct ptp_clock_info *info = ptp->info;
int err;
if (info->n_ext_ts) {
err = device_create_file(dev, &dev_attr_extts_enable);
if (err)
goto out1;
err = device_create_file(dev, &dev_attr_fifo);
if (err)
goto out2;
}
if (info->n_per_out) {
err = device_create_file(dev, &dev_attr_period);
if (err)
goto out3;
}
if (info->pps) {
err = device_create_file(dev, &dev_attr_pps_enable);
if (err)
goto out4;
}
return 0;
out4:
if (info->n_per_out)
device_remove_file(dev, &dev_attr_period);
out3:
if (info->n_ext_ts)
device_remove_file(dev, &dev_attr_fifo);
out2:
if (info->n_ext_ts)
device_remove_file(dev, &dev_attr_extts_enable);
out1:
return err;
}
| gpl-2.0 |
chaostic/Hima-M9 | tools/perf/util/run-command.c | 12197 | 4121 | #include "cache.h"
#include "run-command.h"
#include "exec_cmd.h"
static inline void close_pair(int fd[2])
{
close(fd[0]);
close(fd[1]);
}
static inline void dup_devnull(int to)
{
int fd = open("/dev/null", O_RDWR);
dup2(fd, to);
close(fd);
}
int start_command(struct child_process *cmd)
{
int need_in, need_out, need_err;
int fdin[2], fdout[2], fderr[2];
/*
* In case of errors we must keep the promise to close FDs
* that have been passed in via ->in and ->out.
*/
need_in = !cmd->no_stdin && cmd->in < 0;
if (need_in) {
if (pipe(fdin) < 0) {
if (cmd->out > 0)
close(cmd->out);
return -ERR_RUN_COMMAND_PIPE;
}
cmd->in = fdin[1];
}
need_out = !cmd->no_stdout
&& !cmd->stdout_to_stderr
&& cmd->out < 0;
if (need_out) {
if (pipe(fdout) < 0) {
if (need_in)
close_pair(fdin);
else if (cmd->in)
close(cmd->in);
return -ERR_RUN_COMMAND_PIPE;
}
cmd->out = fdout[0];
}
need_err = !cmd->no_stderr && cmd->err < 0;
if (need_err) {
if (pipe(fderr) < 0) {
if (need_in)
close_pair(fdin);
else if (cmd->in)
close(cmd->in);
if (need_out)
close_pair(fdout);
else if (cmd->out)
close(cmd->out);
return -ERR_RUN_COMMAND_PIPE;
}
cmd->err = fderr[0];
}
fflush(NULL);
cmd->pid = fork();
if (!cmd->pid) {
if (cmd->no_stdin)
dup_devnull(0);
else if (need_in) {
dup2(fdin[0], 0);
close_pair(fdin);
} else if (cmd->in) {
dup2(cmd->in, 0);
close(cmd->in);
}
if (cmd->no_stderr)
dup_devnull(2);
else if (need_err) {
dup2(fderr[1], 2);
close_pair(fderr);
}
if (cmd->no_stdout)
dup_devnull(1);
else if (cmd->stdout_to_stderr)
dup2(2, 1);
else if (need_out) {
dup2(fdout[1], 1);
close_pair(fdout);
} else if (cmd->out > 1) {
dup2(cmd->out, 1);
close(cmd->out);
}
if (cmd->dir && chdir(cmd->dir))
die("exec %s: cd to %s failed (%s)", cmd->argv[0],
cmd->dir, strerror(errno));
if (cmd->env) {
for (; *cmd->env; cmd->env++) {
if (strchr(*cmd->env, '='))
putenv((char*)*cmd->env);
else
unsetenv(*cmd->env);
}
}
if (cmd->preexec_cb)
cmd->preexec_cb();
if (cmd->perf_cmd) {
execv_perf_cmd(cmd->argv);
} else {
execvp(cmd->argv[0], (char *const*) cmd->argv);
}
exit(127);
}
if (cmd->pid < 0) {
int err = errno;
if (need_in)
close_pair(fdin);
else if (cmd->in)
close(cmd->in);
if (need_out)
close_pair(fdout);
else if (cmd->out)
close(cmd->out);
if (need_err)
close_pair(fderr);
return err == ENOENT ?
-ERR_RUN_COMMAND_EXEC :
-ERR_RUN_COMMAND_FORK;
}
if (need_in)
close(fdin[0]);
else if (cmd->in)
close(cmd->in);
if (need_out)
close(fdout[1]);
else if (cmd->out)
close(cmd->out);
if (need_err)
close(fderr[1]);
return 0;
}
static int wait_or_whine(pid_t pid)
{
for (;;) {
int status, code;
pid_t waiting = waitpid(pid, &status, 0);
if (waiting < 0) {
if (errno == EINTR)
continue;
error("waitpid failed (%s)", strerror(errno));
return -ERR_RUN_COMMAND_WAITPID;
}
if (waiting != pid)
return -ERR_RUN_COMMAND_WAITPID_WRONG_PID;
if (WIFSIGNALED(status))
return -ERR_RUN_COMMAND_WAITPID_SIGNAL;
if (!WIFEXITED(status))
return -ERR_RUN_COMMAND_WAITPID_NOEXIT;
code = WEXITSTATUS(status);
switch (code) {
case 127:
return -ERR_RUN_COMMAND_EXEC;
case 0:
return 0;
default:
return -code;
}
}
}
int finish_command(struct child_process *cmd)
{
return wait_or_whine(cmd->pid);
}
int run_command(struct child_process *cmd)
{
int code = start_command(cmd);
if (code)
return code;
return finish_command(cmd);
}
static void prepare_run_command_v_opt(struct child_process *cmd,
const char **argv,
int opt)
{
memset(cmd, 0, sizeof(*cmd));
cmd->argv = argv;
cmd->no_stdin = opt & RUN_COMMAND_NO_STDIN ? 1 : 0;
cmd->perf_cmd = opt & RUN_PERF_CMD ? 1 : 0;
cmd->stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0;
}
int run_command_v_opt(const char **argv, int opt)
{
struct child_process cmd;
prepare_run_command_v_opt(&cmd, argv, opt);
return run_command(&cmd);
}
| gpl-2.0 |
stargo/android_kernel_amazon_ford | drivers/ata/pata_isapnp.c | 12197 | 3469 |
/*
* pata-isapnp.c - ISA PnP PATA controller driver.
* Copyright 2005/2006 Red Hat Inc, all rights reserved.
*
* Based in part on ide-pnp.c by Andrey Panin <pazke@donpac.ru>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/isapnp.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
#include <linux/libata.h>
#define DRV_NAME "pata_isapnp"
#define DRV_VERSION "0.2.5"
static struct scsi_host_template isapnp_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations isapnp_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
};
static struct ata_port_operations isapnp_noalt_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
/* No altstatus so we don't want to use the lost interrupt poll */
.lost_interrupt = ATA_OP_NULL,
};
/**
* isapnp_init_one - attach an isapnp interface
* @idev: PnP device
* @dev_id: matching detect line
*
* Register an ISA bus IDE interface. Such interfaces are PIO 0 and
* non shared IRQ.
*/
static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev_id)
{
struct ata_host *host;
struct ata_port *ap;
void __iomem *cmd_addr, *ctl_addr;
int irq = 0;
irq_handler_t handler = NULL;
if (pnp_port_valid(idev, 0) == 0)
return -ENODEV;
if (pnp_irq_valid(idev, 0)) {
irq = pnp_irq(idev, 0);
handler = ata_sff_interrupt;
}
/* allocate host */
host = ata_host_alloc(&idev->dev, 1);
if (!host)
return -ENOMEM;
/* acquire resources and fill host */
cmd_addr = devm_ioport_map(&idev->dev, pnp_port_start(idev, 0), 8);
if (!cmd_addr)
return -ENOMEM;
ap = host->ports[0];
ap->ops = &isapnp_noalt_port_ops;
ap->pio_mask = ATA_PIO0;
ap->flags |= ATA_FLAG_SLAVE_POSS;
ap->ioaddr.cmd_addr = cmd_addr;
if (pnp_port_valid(idev, 1) == 0) {
ctl_addr = devm_ioport_map(&idev->dev,
pnp_port_start(idev, 1), 1);
ap->ioaddr.altstatus_addr = ctl_addr;
ap->ioaddr.ctl_addr = ctl_addr;
ap->ops = &isapnp_port_ops;
}
ata_sff_std_ports(&ap->ioaddr);
ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
(unsigned long long)pnp_port_start(idev, 0),
(unsigned long long)pnp_port_start(idev, 1));
/* activate */
return ata_host_activate(host, irq, handler, 0,
&isapnp_sht);
}
/**
* isapnp_remove_one - unplug an isapnp interface
* @idev: PnP device
*
* Remove a previously configured PnP ATA port. Called only on module
* unload events as the core does not currently deal with ISAPnP docking.
*/
static void isapnp_remove_one(struct pnp_dev *idev)
{
struct device *dev = &idev->dev;
struct ata_host *host = dev_get_drvdata(dev);
ata_host_detach(host);
}
static struct pnp_device_id isapnp_devices[] = {
/* Generic ESDI/IDE/ATA compatible hard disk controller */
{.id = "PNP0600", .driver_data = 0},
{.id = ""}
};
MODULE_DEVICE_TABLE(pnp, isapnp_devices);
static struct pnp_driver isapnp_driver = {
.name = DRV_NAME,
.id_table = isapnp_devices,
.probe = isapnp_init_one,
.remove = isapnp_remove_one,
};
static int __init isapnp_init(void)
{
return pnp_register_driver(&isapnp_driver);
}
static void __exit isapnp_exit(void)
{
pnp_unregister_driver(&isapnp_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for ISA PnP ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_init(isapnp_init);
module_exit(isapnp_exit);
| gpl-2.0 |
mina86/linux | drivers/devfreq/devfreq-event.c | 166 | 12305 | /*
* devfreq-event: a framework to provide raw data and events of devfreq devices
*
* Copyright (C) 2015 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This driver is based on drivers/devfreq/devfreq.c.
*/
#include <linux/devfreq-event.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/of.h>
static struct class *devfreq_event_class;
/* The list of all devfreq event list */
static LIST_HEAD(devfreq_event_list);
static DEFINE_MUTEX(devfreq_event_list_lock);
#define to_devfreq_event(DEV) container_of(DEV, struct devfreq_event_dev, dev)
/**
* devfreq_event_enable_edev() - Enable the devfreq-event dev and increase
* the enable_count of devfreq-event dev.
* @edev : the devfreq-event device
*
* Note that this function increase the enable_count and enable the
* devfreq-event device. The devfreq-event device should be enabled before
* using it by devfreq device.
*/
int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
{
int ret = 0;
if (!edev || !edev->desc)
return -EINVAL;
mutex_lock(&edev->lock);
if (edev->desc->ops && edev->desc->ops->enable
&& edev->enable_count == 0) {
ret = edev->desc->ops->enable(edev);
if (ret < 0)
goto err;
}
edev->enable_count++;
err:
mutex_unlock(&edev->lock);
return ret;
}
EXPORT_SYMBOL_GPL(devfreq_event_enable_edev);
/**
* devfreq_event_disable_edev() - Disable the devfreq-event dev and decrease
* the enable_count of the devfreq-event dev.
* @edev : the devfreq-event device
*
* Note that this function decrease the enable_count and disable the
* devfreq-event device. After the devfreq-event device is disabled,
* devfreq device can't use the devfreq-event device for get/set/reset
* operations.
*/
int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
{
int ret = 0;
if (!edev || !edev->desc)
return -EINVAL;
mutex_lock(&edev->lock);
if (edev->enable_count <= 0) {
dev_warn(&edev->dev, "unbalanced enable_count\n");
ret = -EIO;
goto err;
}
if (edev->desc->ops && edev->desc->ops->disable
&& edev->enable_count == 1) {
ret = edev->desc->ops->disable(edev);
if (ret < 0)
goto err;
}
edev->enable_count--;
err:
mutex_unlock(&edev->lock);
return ret;
}
EXPORT_SYMBOL_GPL(devfreq_event_disable_edev);
/**
* devfreq_event_is_enabled() - Check whether devfreq-event dev is enabled or
* not.
* @edev : the devfreq-event device
*
* Note that this function check whether devfreq-event dev is enabled or not.
* If return true, the devfreq-event dev is enabeld. If return false, the
* devfreq-event dev is disabled.
*/
bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
{
bool enabled = false;
if (!edev || !edev->desc)
return enabled;
mutex_lock(&edev->lock);
if (edev->enable_count > 0)
enabled = true;
mutex_unlock(&edev->lock);
return enabled;
}
EXPORT_SYMBOL_GPL(devfreq_event_is_enabled);
/**
* devfreq_event_set_event() - Set event to devfreq-event dev to start.
* @edev : the devfreq-event device
*
* Note that this function set the event to the devfreq-event device to start
* for getting the event data which could be various event type.
*/
int devfreq_event_set_event(struct devfreq_event_dev *edev)
{
int ret;
if (!edev || !edev->desc)
return -EINVAL;
if (!edev->desc->ops || !edev->desc->ops->set_event)
return -EINVAL;
if (!devfreq_event_is_enabled(edev))
return -EPERM;
mutex_lock(&edev->lock);
ret = edev->desc->ops->set_event(edev);
mutex_unlock(&edev->lock);
return ret;
}
EXPORT_SYMBOL_GPL(devfreq_event_set_event);
/**
* devfreq_event_get_event() - Get {load|total}_count from devfreq-event dev.
* @edev : the devfreq-event device
* @edata : the calculated data of devfreq-event device
*
* Note that this function get the calculated event data from devfreq-event dev
* after stoping the progress of whole sequence of devfreq-event dev.
*/
int devfreq_event_get_event(struct devfreq_event_dev *edev,
struct devfreq_event_data *edata)
{
int ret;
if (!edev || !edev->desc)
return -EINVAL;
if (!edev->desc->ops || !edev->desc->ops->get_event)
return -EINVAL;
if (!devfreq_event_is_enabled(edev))
return -EINVAL;
edata->total_count = edata->load_count = 0;
mutex_lock(&edev->lock);
ret = edev->desc->ops->get_event(edev, edata);
if (ret < 0)
edata->total_count = edata->load_count = 0;
mutex_unlock(&edev->lock);
return ret;
}
EXPORT_SYMBOL_GPL(devfreq_event_get_event);
/**
* devfreq_event_reset_event() - Reset all opeations of devfreq-event dev.
* @edev : the devfreq-event device
*
* Note that this function stop all operations of devfreq-event dev and reset
* the current event data to make the devfreq-event device into initial state.
*/
int devfreq_event_reset_event(struct devfreq_event_dev *edev)
{
int ret = 0;
if (!edev || !edev->desc)
return -EINVAL;
if (!devfreq_event_is_enabled(edev))
return -EPERM;
mutex_lock(&edev->lock);
if (edev->desc->ops && edev->desc->ops->reset)
ret = edev->desc->ops->reset(edev);
mutex_unlock(&edev->lock);
return ret;
}
EXPORT_SYMBOL_GPL(devfreq_event_reset_event);
/**
* devfreq_event_get_edev_by_phandle() - Get the devfreq-event dev from
* devicetree.
* @dev : the pointer to the given device
* @index : the index into list of devfreq-event device
*
* Note that this function return the pointer of devfreq-event device.
*/
struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
int index)
{
struct device_node *node;
struct devfreq_event_dev *edev;
if (!dev->of_node)
return ERR_PTR(-EINVAL);
node = of_parse_phandle(dev->of_node, "devfreq-events", index);
if (!node)
return ERR_PTR(-ENODEV);
mutex_lock(&devfreq_event_list_lock);
list_for_each_entry(edev, &devfreq_event_list, node) {
if (edev->dev.parent && edev->dev.parent->of_node == node)
goto out;
}
list_for_each_entry(edev, &devfreq_event_list, node) {
if (!strcmp(edev->desc->name, node->name))
goto out;
}
edev = NULL;
out:
mutex_unlock(&devfreq_event_list_lock);
if (!edev) {
of_node_put(node);
return ERR_PTR(-ENODEV);
}
of_node_put(node);
return edev;
}
EXPORT_SYMBOL_GPL(devfreq_event_get_edev_by_phandle);
/**
* devfreq_event_get_edev_count() - Get the count of devfreq-event dev
* @dev : the pointer to the given device
*
* Note that this function return the count of devfreq-event devices.
*/
int devfreq_event_get_edev_count(struct device *dev)
{
int count;
if (!dev->of_node) {
dev_err(dev, "device does not have a device node entry\n");
return -EINVAL;
}
count = of_property_count_elems_of_size(dev->of_node, "devfreq-events",
sizeof(u32));
if (count < 0) {
dev_err(dev,
"failed to get the count of devfreq-event in %s node\n",
dev->of_node->full_name);
return count;
}
return count;
}
EXPORT_SYMBOL_GPL(devfreq_event_get_edev_count);
static void devfreq_event_release_edev(struct device *dev)
{
struct devfreq_event_dev *edev = to_devfreq_event(dev);
kfree(edev);
}
/**
* devfreq_event_add_edev() - Add new devfreq-event device.
* @dev : the device owning the devfreq-event device being created
* @desc : the devfreq-event device's decriptor which include essential
* data for devfreq-event device.
*
* Note that this function add new devfreq-event device to devfreq-event class
* list and register the device of the devfreq-event device.
*/
struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
struct devfreq_event_desc *desc)
{
struct devfreq_event_dev *edev;
static atomic_t event_no = ATOMIC_INIT(0);
int ret;
if (!dev || !desc)
return ERR_PTR(-EINVAL);
if (!desc->name || !desc->ops)
return ERR_PTR(-EINVAL);
if (!desc->ops->set_event || !desc->ops->get_event)
return ERR_PTR(-EINVAL);
edev = kzalloc(sizeof(struct devfreq_event_dev), GFP_KERNEL);
if (!edev)
return ERR_PTR(-ENOMEM);
mutex_init(&edev->lock);
edev->desc = desc;
edev->enable_count = 0;
edev->dev.parent = dev;
edev->dev.class = devfreq_event_class;
edev->dev.release = devfreq_event_release_edev;
dev_set_name(&edev->dev, "event.%d", atomic_inc_return(&event_no) - 1);
ret = device_register(&edev->dev);
if (ret < 0) {
put_device(&edev->dev);
return ERR_PTR(ret);
}
dev_set_drvdata(&edev->dev, edev);
INIT_LIST_HEAD(&edev->node);
mutex_lock(&devfreq_event_list_lock);
list_add(&edev->node, &devfreq_event_list);
mutex_unlock(&devfreq_event_list_lock);
return edev;
}
EXPORT_SYMBOL_GPL(devfreq_event_add_edev);
/**
* devfreq_event_remove_edev() - Remove the devfreq-event device registered.
* @dev : the devfreq-event device
*
* Note that this function remove the registered devfreq-event device.
*/
int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
{
if (!edev)
return -EINVAL;
WARN_ON(edev->enable_count);
mutex_lock(&devfreq_event_list_lock);
list_del(&edev->node);
mutex_unlock(&devfreq_event_list_lock);
device_unregister(&edev->dev);
return 0;
}
EXPORT_SYMBOL_GPL(devfreq_event_remove_edev);
static int devm_devfreq_event_match(struct device *dev, void *res, void *data)
{
struct devfreq_event_dev **r = res;
if (WARN_ON(!r || !*r))
return 0;
return *r == data;
}
static void devm_devfreq_event_release(struct device *dev, void *res)
{
devfreq_event_remove_edev(*(struct devfreq_event_dev **)res);
}
/**
* devm_devfreq_event_add_edev() - Resource-managed devfreq_event_add_edev()
* @dev : the device owning the devfreq-event device being created
* @desc : the devfreq-event device's decriptor which include essential
* data for devfreq-event device.
*
* Note that this function manages automatically the memory of devfreq-event
* device using device resource management and simplify the free operation
* for memory of devfreq-event device.
*/
struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
struct devfreq_event_desc *desc)
{
struct devfreq_event_dev **ptr, *edev;
ptr = devres_alloc(devm_devfreq_event_release, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
edev = devfreq_event_add_edev(dev, desc);
if (IS_ERR(edev)) {
devres_free(ptr);
return ERR_PTR(-ENOMEM);
}
*ptr = edev;
devres_add(dev, ptr);
return edev;
}
EXPORT_SYMBOL_GPL(devm_devfreq_event_add_edev);
/**
* devm_devfreq_event_remove_edev()- Resource-managed devfreq_event_remove_edev()
* @dev : the device owning the devfreq-event device being created
* @edev : the devfreq-event device
*
* Note that this function manages automatically the memory of devfreq-event
* device using device resource management.
*/
void devm_devfreq_event_remove_edev(struct device *dev,
struct devfreq_event_dev *edev)
{
WARN_ON(devres_release(dev, devm_devfreq_event_release,
devm_devfreq_event_match, edev));
}
EXPORT_SYMBOL_GPL(devm_devfreq_event_remove_edev);
/*
* Device attributes for devfreq-event class.
*/
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct devfreq_event_dev *edev = to_devfreq_event(dev);
if (!edev || !edev->desc)
return -EINVAL;
return sprintf(buf, "%s\n", edev->desc->name);
}
static DEVICE_ATTR_RO(name);
static ssize_t enable_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct devfreq_event_dev *edev = to_devfreq_event(dev);
if (!edev || !edev->desc)
return -EINVAL;
return sprintf(buf, "%d\n", edev->enable_count);
}
static DEVICE_ATTR_RO(enable_count);
static struct attribute *devfreq_event_attrs[] = {
&dev_attr_name.attr,
&dev_attr_enable_count.attr,
NULL,
};
ATTRIBUTE_GROUPS(devfreq_event);
static int __init devfreq_event_init(void)
{
devfreq_event_class = class_create(THIS_MODULE, "devfreq-event");
if (IS_ERR(devfreq_event_class)) {
pr_err("%s: couldn't create class\n", __FILE__);
return PTR_ERR(devfreq_event_class);
}
devfreq_event_class->dev_groups = devfreq_event_groups;
return 0;
}
subsys_initcall(devfreq_event_init);
| gpl-2.0 |
matthew-l-weber/linux-3-10-rc1-moxart | arch/mips/kernel/crash_dump.c | 166 | 1863 | #include <linux/highmem.h>
#include <linux/bootmem.h>
#include <linux/crash_dump.h>
#include <asm/uaccess.h>
static int __init parse_savemaxmem(char *p)
{
if (p)
saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
return 1;
}
__setup("savemaxmem=", parse_savemaxmem);
static void *kdump_buf_page;
/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
* @buf: target memory address for the copy; this can be in kernel address
* space or user address space (see @userbuf)
* @csize: number of bytes to copy
* @offset: offset in bytes into the page (based on pfn) to begin the copy
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel.
*
* Calling copy_to_user() in atomic context is not desirable. Hence first
* copying the data to a pre-allocated kernel page and then copying to user
* space in non-atomic context.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
if (!csize)
return 0;
vaddr = kmap_atomic_pfn(pfn);
if (!userbuf) {
memcpy(buf, (vaddr + offset), csize);
kunmap_atomic(vaddr);
} else {
if (!kdump_buf_page) {
pr_warning("Kdump: Kdump buffer page not allocated\n");
return -EFAULT;
}
copy_page(kdump_buf_page, vaddr);
kunmap_atomic(vaddr);
if (copy_to_user(buf, (kdump_buf_page + offset), csize))
return -EFAULT;
}
return csize;
}
static int __init kdump_buf_page_init(void)
{
int ret = 0;
kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!kdump_buf_page) {
pr_warning("Kdump: Failed to allocate kdump buffer page\n");
ret = -ENOMEM;
}
return ret;
}
arch_initcall(kdump_buf_page_init);
| gpl-2.0 |
itgb/opwrt_ap | target/linux/ar71xx/files/arch/mips/ath79/mach-tl-wr841n.c | 678 | 3548 | /*
* TP-LINK TL-WR841N/ND v1 board support
*
* Copyright (C) 2009-2012 Gabor Juhos <juhosg@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/platform_device.h>
#include <asm/mach-ath79/ath79.h>
#include "dev-dsa.h"
#include "dev-eth.h"
#include "dev-gpio-buttons.h"
#include "dev-leds-gpio.h"
#include "dev-m25p80.h"
#include "machtypes.h"
#include "pci.h"
#define TL_WR841ND_V1_GPIO_LED_SYSTEM 2
#define TL_WR841ND_V1_GPIO_LED_QSS_GREEN 4
#define TL_WR841ND_V1_GPIO_LED_QSS_RED 5
#define TL_WR841ND_V1_GPIO_BTN_RESET 3
#define TL_WR841ND_V1_GPIO_BTN_QSS 7
#define TL_WR841ND_V1_KEYS_POLL_INTERVAL 20 /* msecs */
#define TL_WR841ND_V1_KEYS_DEBOUNCE_INTERVAL \
(3 * TL_WR841ND_V1_KEYS_POLL_INTERVAL)
static struct mtd_partition tl_wr841n_v1_partitions[] = {
{
.name = "redboot",
.offset = 0,
.size = 0x020000,
.mask_flags = MTD_WRITEABLE,
}, {
.name = "kernel",
.offset = 0x020000,
.size = 0x140000,
}, {
.name = "rootfs",
.offset = 0x160000,
.size = 0x280000,
}, {
.name = "config",
.offset = 0x3e0000,
.size = 0x020000,
.mask_flags = MTD_WRITEABLE,
}, {
.name = "firmware",
.offset = 0x020000,
.size = 0x3c0000,
}
};
static struct flash_platform_data tl_wr841n_v1_flash_data = {
.parts = tl_wr841n_v1_partitions,
.nr_parts = ARRAY_SIZE(tl_wr841n_v1_partitions),
};
static struct gpio_led tl_wr841n_v1_leds_gpio[] __initdata = {
{
.name = "tp-link:green:system",
.gpio = TL_WR841ND_V1_GPIO_LED_SYSTEM,
.active_low = 1,
}, {
.name = "tp-link:red:qss",
.gpio = TL_WR841ND_V1_GPIO_LED_QSS_RED,
}, {
.name = "tp-link:green:qss",
.gpio = TL_WR841ND_V1_GPIO_LED_QSS_GREEN,
}
};
static struct gpio_keys_button tl_wr841n_v1_gpio_keys[] __initdata = {
{
.desc = "reset",
.type = EV_KEY,
.code = KEY_RESTART,
.debounce_interval = TL_WR841ND_V1_KEYS_DEBOUNCE_INTERVAL,
.gpio = TL_WR841ND_V1_GPIO_BTN_RESET,
.active_low = 1,
}, {
.desc = "qss",
.type = EV_KEY,
.code = KEY_WPS_BUTTON,
.debounce_interval = TL_WR841ND_V1_KEYS_DEBOUNCE_INTERVAL,
.gpio = TL_WR841ND_V1_GPIO_BTN_QSS,
.active_low = 1,
}
};
static struct dsa_chip_data tl_wr841n_v1_dsa_chip = {
.port_names[0] = "wan",
.port_names[1] = "lan1",
.port_names[2] = "lan2",
.port_names[3] = "lan3",
.port_names[4] = "lan4",
.port_names[5] = "cpu",
};
static struct dsa_platform_data tl_wr841n_v1_dsa_data = {
.nr_chips = 1,
.chip = &tl_wr841n_v1_dsa_chip,
};
static void __init tl_wr841n_v1_setup(void)
{
u8 *mac = (u8 *) KSEG1ADDR(0x1f01fc00);
ath79_register_mdio(0, 0x0);
ath79_init_mac(ath79_eth0_data.mac_addr, mac, 0);
ath79_eth0_data.phy_if_mode = PHY_INTERFACE_MODE_RMII;
ath79_eth0_data.speed = SPEED_100;
ath79_eth0_data.duplex = DUPLEX_FULL;
ath79_register_eth(0);
ath79_register_dsa(&ath79_eth0_device.dev, &ath79_mdio0_device.dev,
&tl_wr841n_v1_dsa_data);
ath79_register_m25p80(&tl_wr841n_v1_flash_data);
ath79_register_leds_gpio(-1, ARRAY_SIZE(tl_wr841n_v1_leds_gpio),
tl_wr841n_v1_leds_gpio);
ath79_register_gpio_keys_polled(-1, TL_WR841ND_V1_KEYS_POLL_INTERVAL,
ARRAY_SIZE(tl_wr841n_v1_gpio_keys),
tl_wr841n_v1_gpio_keys);
ath79_register_pci();
}
MIPS_MACHINE(ATH79_MACH_TL_WR841N_V1, "TL-WR841N-v1.5", "TP-LINK TL-WR841N v1",
tl_wr841n_v1_setup);
| gpl-2.0 |
ArtemTeleshev/raspberrypi-linux | crypto/crypto_null.c | 678 | 4414 | /*
* Cryptographic API.
*
* Null algorithms, aka Much Ado About Nothing.
*
* These are needed for IPsec, and may be useful in general for
* testing & debugging.
*
* The null cipher is compliant with RFC2410.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <crypto/null.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/string.h>
static int null_compress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
if (slen > *dlen)
return -EINVAL;
memcpy(dst, src, slen);
*dlen = slen;
return 0;
}
static int null_init(struct shash_desc *desc)
{
return 0;
}
static int null_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return 0;
}
static int null_final(struct shash_desc *desc, u8 *out)
{
return 0;
}
static int null_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return 0;
}
static int null_hash_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{ return 0; }
static int null_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{ return 0; }
static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
memcpy(dst, src, NULL_BLOCK_SIZE);
}
static int skcipher_null_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while (walk.nbytes) {
if (walk.src.virt.addr != walk.dst.virt.addr)
memcpy(walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
}
static struct shash_alg digest_null = {
.digestsize = NULL_DIGEST_SIZE,
.setkey = null_hash_setkey,
.init = null_init,
.update = null_update,
.finup = null_digest,
.digest = null_digest,
.final = null_final,
.base = {
.cra_name = "digest_null",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static struct crypto_alg null_algs[3] = { {
.cra_name = "cipher_null",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_ctxsize = 0,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = NULL_KEY_SIZE,
.cia_max_keysize = NULL_KEY_SIZE,
.cia_setkey = null_setkey,
.cia_encrypt = null_crypt,
.cia_decrypt = null_crypt } }
}, {
.cra_name = "ecb(cipher_null)",
.cra_driver_name = "ecb-cipher_null",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = 0,
.cra_module = THIS_MODULE,
.cra_u = { .blkcipher = {
.min_keysize = NULL_KEY_SIZE,
.max_keysize = NULL_KEY_SIZE,
.ivsize = NULL_IV_SIZE,
.setkey = null_setkey,
.encrypt = skcipher_null_crypt,
.decrypt = skcipher_null_crypt } }
}, {
.cra_name = "compress_null",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_ctxsize = 0,
.cra_module = THIS_MODULE,
.cra_u = { .compress = {
.coa_compress = null_compress,
.coa_decompress = null_compress } }
} };
MODULE_ALIAS_CRYPTO("compress_null");
MODULE_ALIAS_CRYPTO("digest_null");
MODULE_ALIAS_CRYPTO("cipher_null");
static int __init crypto_null_mod_init(void)
{
int ret = 0;
ret = crypto_register_algs(null_algs, ARRAY_SIZE(null_algs));
if (ret < 0)
goto out;
ret = crypto_register_shash(&digest_null);
if (ret < 0)
goto out_unregister_algs;
return 0;
out_unregister_algs:
crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
out:
return ret;
}
static void __exit crypto_null_mod_fini(void)
{
crypto_unregister_shash(&digest_null);
crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
}
module_init(crypto_null_mod_init);
module_exit(crypto_null_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Null Cryptographic Algorithms");
| gpl-2.0 |
jose51197/Infernal | sound/pci/hda/hda_intel.c | 1190 | 80867 | /*
*
* hda_intel.c - Implementation of primary alsa driver code base
* for Intel HD Audio.
*
* Copyright(c) 2004 Intel Corporation. All rights reserved.
*
* Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
* PeiSen Hou <pshou@realtek.com.tw>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* CONTACTS:
*
* Matt Jared matt.jared@intel.com
* Andy Kopp andy.kopp@intel.com
* Dan Kogan dan.d.kogan@intel.com
*
* CHANGES:
*
* 2004.12.01 Major rewrite by tiwai, merged the work of pshou
*
*/
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/reboot.h>
#include <sound/core.h>
#include <sound/initval.h>
#include "hda_codec.h"
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
static char *model[SNDRV_CARDS];
static int position_fix[SNDRV_CARDS];
static int bdl_pos_adj[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1};
static int probe_mask[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1};
static int probe_only[SNDRV_CARDS];
static int single_cmd;
static int enable_msi = -1;
#ifdef CONFIG_SND_HDA_PATCH_LOADER
static char *patch[SNDRV_CARDS];
#endif
#ifdef CONFIG_SND_HDA_INPUT_BEEP
static int beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] =
CONFIG_SND_HDA_INPUT_BEEP_MODE};
#endif
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for Intel HD audio interface.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for Intel HD audio interface.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable Intel HD audio interface.");
module_param_array(model, charp, NULL, 0444);
MODULE_PARM_DESC(model, "Use the given board model.");
module_param_array(position_fix, int, NULL, 0444);
MODULE_PARM_DESC(position_fix, "DMA pointer read method."
"(0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO).");
module_param_array(bdl_pos_adj, int, NULL, 0644);
MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset.");
module_param_array(probe_mask, int, NULL, 0444);
MODULE_PARM_DESC(probe_mask, "Bitmask to probe codecs (default = -1).");
module_param_array(probe_only, int, NULL, 0444);
MODULE_PARM_DESC(probe_only, "Only probing and no codec initialization.");
module_param(single_cmd, bool, 0444);
MODULE_PARM_DESC(single_cmd, "Use single command to communicate with codecs "
"(for debugging only).");
module_param(enable_msi, int, 0444);
MODULE_PARM_DESC(enable_msi, "Enable Message Signaled Interrupt (MSI)");
#ifdef CONFIG_SND_HDA_PATCH_LOADER
module_param_array(patch, charp, NULL, 0444);
MODULE_PARM_DESC(patch, "Patch file for Intel HD audio interface.");
#endif
#ifdef CONFIG_SND_HDA_INPUT_BEEP
module_param_array(beep_mode, int, NULL, 0444);
MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode "
"(0=off, 1=on, 2=mute switch on/off) (default=1).");
#endif
#ifdef CONFIG_SND_HDA_POWER_SAVE
static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
module_param(power_save, int, 0644);
MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
"(in second, 0 = disable).");
/* reset the HD-audio controller in power save mode.
* this may give more power-saving, but will take longer time to
* wake up.
*/
static int power_save_controller = 1;
module_param(power_save_controller, bool, 0644);
MODULE_PARM_DESC(power_save_controller, "Reset controller in power save mode.");
#endif
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
"{Intel, ICH6M},"
"{Intel, ICH7},"
"{Intel, ESB2},"
"{Intel, ICH8},"
"{Intel, ICH9},"
"{Intel, ICH10},"
"{Intel, PCH},"
"{Intel, CPT},"
"{Intel, PPT},"
"{Intel, PBG},"
"{Intel, SCH},"
"{ATI, SB450},"
"{ATI, SB600},"
"{ATI, RS600},"
"{ATI, RS690},"
"{ATI, RS780},"
"{ATI, R600},"
"{ATI, RV630},"
"{ATI, RV610},"
"{ATI, RV670},"
"{ATI, RV635},"
"{ATI, RV620},"
"{ATI, RV770},"
"{VIA, VT8251},"
"{VIA, VT8237A},"
"{SiS, SIS966},"
"{ULI, M5461}}");
MODULE_DESCRIPTION("Intel HDA driver");
#ifdef CONFIG_SND_VERBOSE_PRINTK
#define SFX /* nop */
#else
#define SFX "hda-intel: "
#endif
/*
* registers
*/
#define ICH6_REG_GCAP 0x00
#define ICH6_GCAP_64OK (1 << 0) /* 64bit address support */
#define ICH6_GCAP_NSDO (3 << 1) /* # of serial data out signals */
#define ICH6_GCAP_BSS (31 << 3) /* # of bidirectional streams */
#define ICH6_GCAP_ISS (15 << 8) /* # of input streams */
#define ICH6_GCAP_OSS (15 << 12) /* # of output streams */
#define ICH6_REG_VMIN 0x02
#define ICH6_REG_VMAJ 0x03
#define ICH6_REG_OUTPAY 0x04
#define ICH6_REG_INPAY 0x06
#define ICH6_REG_GCTL 0x08
#define ICH6_GCTL_RESET (1 << 0) /* controller reset */
#define ICH6_GCTL_FCNTRL (1 << 1) /* flush control */
#define ICH6_GCTL_UNSOL (1 << 8) /* accept unsol. response enable */
#define ICH6_REG_WAKEEN 0x0c
#define ICH6_REG_STATESTS 0x0e
#define ICH6_REG_GSTS 0x10
#define ICH6_GSTS_FSTS (1 << 1) /* flush status */
#define ICH6_REG_INTCTL 0x20
#define ICH6_REG_INTSTS 0x24
#define ICH6_REG_WALLCLK 0x30 /* 24Mhz source */
#define ICH6_REG_SYNC 0x34
#define ICH6_REG_CORBLBASE 0x40
#define ICH6_REG_CORBUBASE 0x44
#define ICH6_REG_CORBWP 0x48
#define ICH6_REG_CORBRP 0x4a
#define ICH6_CORBRP_RST (1 << 15) /* read pointer reset */
#define ICH6_REG_CORBCTL 0x4c
#define ICH6_CORBCTL_RUN (1 << 1) /* enable DMA */
#define ICH6_CORBCTL_CMEIE (1 << 0) /* enable memory error irq */
#define ICH6_REG_CORBSTS 0x4d
#define ICH6_CORBSTS_CMEI (1 << 0) /* memory error indication */
#define ICH6_REG_CORBSIZE 0x4e
#define ICH6_REG_RIRBLBASE 0x50
#define ICH6_REG_RIRBUBASE 0x54
#define ICH6_REG_RIRBWP 0x58
#define ICH6_RIRBWP_RST (1 << 15) /* write pointer reset */
#define ICH6_REG_RINTCNT 0x5a
#define ICH6_REG_RIRBCTL 0x5c
#define ICH6_RBCTL_IRQ_EN (1 << 0) /* enable IRQ */
#define ICH6_RBCTL_DMA_EN (1 << 1) /* enable DMA */
#define ICH6_RBCTL_OVERRUN_EN (1 << 2) /* enable overrun irq */
#define ICH6_REG_RIRBSTS 0x5d
#define ICH6_RBSTS_IRQ (1 << 0) /* response irq */
#define ICH6_RBSTS_OVERRUN (1 << 2) /* overrun irq */
#define ICH6_REG_RIRBSIZE 0x5e
#define ICH6_REG_IC 0x60
#define ICH6_REG_IR 0x64
#define ICH6_REG_IRS 0x68
#define ICH6_IRS_VALID (1<<1)
#define ICH6_IRS_BUSY (1<<0)
#define ICH6_REG_DPLBASE 0x70
#define ICH6_REG_DPUBASE 0x74
#define ICH6_DPLBASE_ENABLE 0x1 /* Enable position buffer */
/* SD offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
/* stream register offsets from stream base */
#define ICH6_REG_SD_CTL 0x00
#define ICH6_REG_SD_STS 0x03
#define ICH6_REG_SD_LPIB 0x04
#define ICH6_REG_SD_CBL 0x08
#define ICH6_REG_SD_LVI 0x0c
#define ICH6_REG_SD_FIFOW 0x0e
#define ICH6_REG_SD_FIFOSIZE 0x10
#define ICH6_REG_SD_FORMAT 0x12
#define ICH6_REG_SD_BDLPL 0x18
#define ICH6_REG_SD_BDLPU 0x1c
/* PCI space */
#define ICH6_PCIREG_TCSEL 0x44
/*
* other constants
*/
/* max number of SDs */
/* ICH, ATI and VIA have 4 playback and 4 capture */
#define ICH6_NUM_CAPTURE 4
#define ICH6_NUM_PLAYBACK 4
/* ULI has 6 playback and 5 capture */
#define ULI_NUM_CAPTURE 5
#define ULI_NUM_PLAYBACK 6
/* ATI HDMI has 1 playback and 0 capture */
#define ATIHDMI_NUM_CAPTURE 0
#define ATIHDMI_NUM_PLAYBACK 1
/* TERA has 4 playback and 3 capture */
#define TERA_NUM_CAPTURE 3
#define TERA_NUM_PLAYBACK 4
/* this number is statically defined for simplicity */
#define MAX_AZX_DEV 16
/* max number of fragments - we may use more if allocating more pages for BDL */
#define BDL_SIZE 4096
#define AZX_MAX_BDL_ENTRIES (BDL_SIZE / 16)
#define AZX_MAX_FRAG 32
/* max buffer size - no h/w limit, you can increase as you like */
#define AZX_MAX_BUF_SIZE (1024*1024*1024)
/* RIRB int mask: overrun[2], response[0] */
#define RIRB_INT_RESPONSE 0x01
#define RIRB_INT_OVERRUN 0x04
#define RIRB_INT_MASK 0x05
/* STATESTS int mask: S3,SD2,SD1,SD0 */
#define AZX_MAX_CODECS 8
#define AZX_DEFAULT_CODECS 4
#define STATESTS_INT_MASK ((1 << AZX_MAX_CODECS) - 1)
/* SD_CTL bits */
#define SD_CTL_STREAM_RESET 0x01 /* stream reset bit */
#define SD_CTL_DMA_START 0x02 /* stream DMA start bit */
#define SD_CTL_STRIPE (3 << 16) /* stripe control */
#define SD_CTL_TRAFFIC_PRIO (1 << 18) /* traffic priority */
#define SD_CTL_DIR (1 << 19) /* bi-directional stream */
#define SD_CTL_STREAM_TAG_MASK (0xf << 20)
#define SD_CTL_STREAM_TAG_SHIFT 20
/* SD_CTL and SD_STS */
#define SD_INT_DESC_ERR 0x10 /* descriptor error interrupt */
#define SD_INT_FIFO_ERR 0x08 /* FIFO error interrupt */
#define SD_INT_COMPLETE 0x04 /* completion interrupt */
#define SD_INT_MASK (SD_INT_DESC_ERR|SD_INT_FIFO_ERR|\
SD_INT_COMPLETE)
/* SD_STS */
#define SD_STS_FIFO_READY 0x20 /* FIFO ready */
/* INTCTL and INTSTS */
#define ICH6_INT_ALL_STREAM 0xff /* all stream interrupts */
#define ICH6_INT_CTRL_EN 0x40000000 /* controller interrupt enable bit */
#define ICH6_INT_GLOBAL_EN 0x80000000 /* global interrupt enable bit */
/* below are so far hardcoded - should read registers in future */
#define ICH6_MAX_CORB_ENTRIES 256
#define ICH6_MAX_RIRB_ENTRIES 256
/* position fix mode */
enum {
POS_FIX_AUTO,
POS_FIX_LPIB,
POS_FIX_POSBUF,
POS_FIX_VIACOMBO,
};
/* Defines for ATI HD Audio support in SB450 south bridge */
#define ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR 0x42
#define ATI_SB450_HDAUDIO_ENABLE_SNOOP 0x02
/* Defines for Nvidia HDA support */
#define NVIDIA_HDA_TRANSREG_ADDR 0x4e
#define NVIDIA_HDA_ENABLE_COHBITS 0x0f
#define NVIDIA_HDA_ISTRM_COH 0x4d
#define NVIDIA_HDA_OSTRM_COH 0x4c
#define NVIDIA_HDA_ENABLE_COHBIT 0x01
/* Defines for Intel SCH HDA snoop control */
#define INTEL_SCH_HDA_DEVC 0x78
#define INTEL_SCH_HDA_DEVC_NOSNOOP (0x1<<11)
/* Define IN stream 0 FIFO size offset in VIA controller */
#define VIA_IN_STREAM0_FIFO_SIZE_OFFSET 0x90
/* Define VIA HD Audio Device ID*/
#define VIA_HDAC_DEVICE_ID 0x3288
/* HD Audio class code */
#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
/*
*/
struct azx_dev {
struct snd_dma_buffer bdl; /* BDL buffer */
u32 *posbuf; /* position buffer pointer */
unsigned int bufsize; /* size of the play buffer in bytes */
unsigned int period_bytes; /* size of the period in bytes */
unsigned int frags; /* number for period in the play buffer */
unsigned int fifo_size; /* FIFO size */
unsigned long start_wallclk; /* start + minimum wallclk */
unsigned long period_wallclk; /* wallclk for period */
void __iomem *sd_addr; /* stream descriptor pointer */
u32 sd_int_sta_mask; /* stream int status mask */
/* pcm support */
struct snd_pcm_substream *substream; /* assigned substream,
* set in PCM open
*/
unsigned int format_val; /* format value to be set in the
* controller and the codec
*/
unsigned char stream_tag; /* assigned stream */
unsigned char index; /* stream index */
int device; /* last device number assigned to */
unsigned int opened :1;
unsigned int running :1;
unsigned int irq_pending :1;
/*
* For VIA:
* A flag to ensure DMA position is 0
* when link position is not greater than FIFO size
*/
unsigned int insufficient :1;
};
/* CORB/RIRB */
struct azx_rb {
u32 *buf; /* CORB/RIRB buffer
* Each CORB entry is 4byte, RIRB is 8byte
*/
dma_addr_t addr; /* physical address of CORB/RIRB buffer */
/* for RIRB */
unsigned short rp, wp; /* read/write pointers */
int cmds[AZX_MAX_CODECS]; /* number of pending requests */
u32 res[AZX_MAX_CODECS]; /* last read value */
};
struct azx {
struct snd_card *card;
struct pci_dev *pci;
int dev_index;
/* chip type specific */
int driver_type;
unsigned int driver_caps;
int playback_streams;
int playback_index_offset;
int capture_streams;
int capture_index_offset;
int num_streams;
/* pci resources */
unsigned long addr;
void __iomem *remap_addr;
int irq;
/* locks */
spinlock_t reg_lock;
struct mutex open_mutex;
/* streams (x num_streams) */
struct azx_dev *azx_dev;
/* PCM */
struct snd_pcm *pcm[HDA_MAX_PCMS];
/* HD codec */
unsigned short codec_mask;
int codec_probe_mask; /* copied from probe_mask option */
struct hda_bus *bus;
unsigned int beep_mode;
/* CORB/RIRB */
struct azx_rb corb;
struct azx_rb rirb;
/* CORB/RIRB and position buffers */
struct snd_dma_buffer rb;
struct snd_dma_buffer posbuf;
/* flags */
int position_fix[2]; /* for both playback/capture streams */
int poll_count;
unsigned int running :1;
unsigned int initialized :1;
unsigned int single_cmd :1;
unsigned int polling_mode :1;
unsigned int msi :1;
unsigned int irq_pending_warned :1;
unsigned int probing :1; /* codec probing phase */
/* for debugging */
unsigned int last_cmd[AZX_MAX_CODECS];
/* for pending irqs */
struct work_struct irq_pending_work;
/* reboot notifier (for mysterious hangup problem at power-down) */
struct notifier_block reboot_notifier;
};
/* driver types */
enum {
AZX_DRIVER_ICH,
AZX_DRIVER_PCH,
AZX_DRIVER_SCH,
AZX_DRIVER_ATI,
AZX_DRIVER_ATIHDMI,
AZX_DRIVER_VIA,
AZX_DRIVER_SIS,
AZX_DRIVER_ULI,
AZX_DRIVER_NVIDIA,
AZX_DRIVER_TERA,
AZX_DRIVER_CTX,
AZX_DRIVER_GENERIC,
AZX_NUM_DRIVERS, /* keep this as last entry */
};
/* driver quirks (capabilities) */
/* bits 0-7 are used for indicating driver type */
#define AZX_DCAPS_NO_TCSEL (1 << 8) /* No Intel TCSEL bit */
#define AZX_DCAPS_NO_MSI (1 << 9) /* No MSI support */
#define AZX_DCAPS_ATI_SNOOP (1 << 10) /* ATI snoop enable */
#define AZX_DCAPS_NVIDIA_SNOOP (1 << 11) /* Nvidia snoop enable */
#define AZX_DCAPS_SCH_SNOOP (1 << 12) /* SCH/PCH snoop enable */
#define AZX_DCAPS_RIRB_DELAY (1 << 13) /* Long delay in read loop */
#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14) /* Put a delay before read */
#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
#define AZX_DCAPS_POSFIX_VIA (1 << 17) /* Use VIACOMBO as default */
#define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */
#define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */
/* quirks for ATI SB / AMD Hudson */
#define AZX_DCAPS_PRESET_ATI_SB \
(AZX_DCAPS_ATI_SNOOP | AZX_DCAPS_NO_TCSEL | \
AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB)
/* quirks for ATI/AMD HDMI */
#define AZX_DCAPS_PRESET_ATI_HDMI \
(AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB)
/* quirks for Nvidia */
#define AZX_DCAPS_PRESET_NVIDIA \
(AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI)
static char *driver_short_names[] __devinitdata = {
[AZX_DRIVER_ICH] = "HDA Intel",
[AZX_DRIVER_PCH] = "HDA Intel PCH",
[AZX_DRIVER_SCH] = "HDA Intel MID",
[AZX_DRIVER_ATI] = "HDA ATI SB",
[AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI",
[AZX_DRIVER_VIA] = "HDA VIA VT82xx",
[AZX_DRIVER_SIS] = "HDA SIS966",
[AZX_DRIVER_ULI] = "HDA ULI M5461",
[AZX_DRIVER_NVIDIA] = "HDA NVidia",
[AZX_DRIVER_TERA] = "HDA Teradici",
[AZX_DRIVER_CTX] = "HDA Creative",
[AZX_DRIVER_GENERIC] = "HD-Audio Generic",
};
/*
* macros for easy use
*/
#define azx_writel(chip,reg,value) \
writel(value, (chip)->remap_addr + ICH6_REG_##reg)
#define azx_readl(chip,reg) \
readl((chip)->remap_addr + ICH6_REG_##reg)
#define azx_writew(chip,reg,value) \
writew(value, (chip)->remap_addr + ICH6_REG_##reg)
#define azx_readw(chip,reg) \
readw((chip)->remap_addr + ICH6_REG_##reg)
#define azx_writeb(chip,reg,value) \
writeb(value, (chip)->remap_addr + ICH6_REG_##reg)
#define azx_readb(chip,reg) \
readb((chip)->remap_addr + ICH6_REG_##reg)
#define azx_sd_writel(dev,reg,value) \
writel(value, (dev)->sd_addr + ICH6_REG_##reg)
#define azx_sd_readl(dev,reg) \
readl((dev)->sd_addr + ICH6_REG_##reg)
#define azx_sd_writew(dev,reg,value) \
writew(value, (dev)->sd_addr + ICH6_REG_##reg)
#define azx_sd_readw(dev,reg) \
readw((dev)->sd_addr + ICH6_REG_##reg)
#define azx_sd_writeb(dev,reg,value) \
writeb(value, (dev)->sd_addr + ICH6_REG_##reg)
#define azx_sd_readb(dev,reg) \
readb((dev)->sd_addr + ICH6_REG_##reg)
/* for pcm support */
#define get_azx_dev(substream) (substream->runtime->private_data)
static int azx_acquire_irq(struct azx *chip, int do_disconnect);
static int azx_send_cmd(struct hda_bus *bus, unsigned int val);
/*
* Interface for HD codec
*/
/*
* CORB / RIRB interface
*/
static int azx_alloc_cmd_io(struct azx *chip)
{
int err;
/* single page (at least 4096 bytes) must suffice for both ringbuffes */
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
PAGE_SIZE, &chip->rb);
if (err < 0) {
snd_printk(KERN_ERR SFX "cannot allocate CORB/RIRB\n");
return err;
}
return 0;
}
static void azx_init_cmd_io(struct azx *chip)
{
spin_lock_irq(&chip->reg_lock);
/* CORB set up */
chip->corb.addr = chip->rb.addr;
chip->corb.buf = (u32 *)chip->rb.area;
azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
/* set the corb size to 256 entries (ULI requires explicitly) */
azx_writeb(chip, CORBSIZE, 0x02);
/* set the corb write pointer to 0 */
azx_writew(chip, CORBWP, 0);
/* reset the corb hw read pointer */
azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
/* enable corb dma */
azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
/* RIRB set up */
chip->rirb.addr = chip->rb.addr + 2048;
chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
chip->rirb.wp = chip->rirb.rp = 0;
memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
/* set the rirb size to 256 entries (ULI requires explicitly) */
azx_writeb(chip, RIRBSIZE, 0x02);
/* reset the rirb hw write pointer */
azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
/* set N=1, get RIRB response interrupt for new entry */
if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
azx_writew(chip, RINTCNT, 0xc0);
else
azx_writew(chip, RINTCNT, 1);
/* enable rirb dma and response irq */
azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN);
spin_unlock_irq(&chip->reg_lock);
}
static void azx_free_cmd_io(struct azx *chip)
{
spin_lock_irq(&chip->reg_lock);
/* disable ringbuffer DMAs */
azx_writeb(chip, RIRBCTL, 0);
azx_writeb(chip, CORBCTL, 0);
spin_unlock_irq(&chip->reg_lock);
}
static unsigned int azx_command_addr(u32 cmd)
{
unsigned int addr = cmd >> 28;
if (addr >= AZX_MAX_CODECS) {
snd_BUG();
addr = 0;
}
return addr;
}
static unsigned int azx_response_addr(u32 res)
{
unsigned int addr = res & 0xf;
if (addr >= AZX_MAX_CODECS) {
snd_BUG();
addr = 0;
}
return addr;
}
/* send a command */
static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
{
struct azx *chip = bus->private_data;
unsigned int addr = azx_command_addr(val);
unsigned int wp;
spin_lock_irq(&chip->reg_lock);
/* add command to corb */
wp = azx_readb(chip, CORBWP);
wp++;
wp %= ICH6_MAX_CORB_ENTRIES;
chip->rirb.cmds[addr]++;
chip->corb.buf[wp] = cpu_to_le32(val);
azx_writel(chip, CORBWP, wp);
spin_unlock_irq(&chip->reg_lock);
return 0;
}
#define ICH6_RIRB_EX_UNSOL_EV (1<<4)
/* retrieve RIRB entry - called from interrupt handler */
static void azx_update_rirb(struct azx *chip)
{
unsigned int rp, wp;
unsigned int addr;
u32 res, res_ex;
wp = azx_readb(chip, RIRBWP);
if (wp == chip->rirb.wp)
return;
chip->rirb.wp = wp;
while (chip->rirb.rp != wp) {
chip->rirb.rp++;
chip->rirb.rp %= ICH6_MAX_RIRB_ENTRIES;
rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
res = le32_to_cpu(chip->rirb.buf[rp]);
addr = azx_response_addr(res_ex);
if (res_ex & ICH6_RIRB_EX_UNSOL_EV)
snd_hda_queue_unsol_event(chip->bus, res, res_ex);
else if (chip->rirb.cmds[addr]) {
chip->rirb.res[addr] = res;
smp_wmb();
chip->rirb.cmds[addr]--;
} else
snd_printk(KERN_ERR SFX "spurious response %#x:%#x, "
"last cmd=%#08x\n",
res, res_ex,
chip->last_cmd[addr]);
}
}
/* receive a response */
static unsigned int azx_rirb_get_response(struct hda_bus *bus,
unsigned int addr)
{
struct azx *chip = bus->private_data;
unsigned long timeout;
int do_poll = 0;
again:
timeout = jiffies + msecs_to_jiffies(1000);
for (;;) {
if (chip->polling_mode || do_poll) {
spin_lock_irq(&chip->reg_lock);
azx_update_rirb(chip);
spin_unlock_irq(&chip->reg_lock);
}
if (!chip->rirb.cmds[addr]) {
smp_rmb();
bus->rirb_error = 0;
if (!do_poll)
chip->poll_count = 0;
return chip->rirb.res[addr]; /* the last value */
}
if (time_after(jiffies, timeout))
break;
if (bus->needs_damn_long_delay)
msleep(2); /* temporary workaround */
else {
udelay(10);
cond_resched();
}
}
if (!chip->polling_mode && chip->poll_count < 2) {
snd_printdd(SFX "azx_get_response timeout, "
"polling the codec once: last cmd=0x%08x\n",
chip->last_cmd[addr]);
do_poll = 1;
chip->poll_count++;
goto again;
}
if (!chip->polling_mode) {
snd_printk(KERN_WARNING SFX "azx_get_response timeout, "
"switching to polling mode: last cmd=0x%08x\n",
chip->last_cmd[addr]);
chip->polling_mode = 1;
goto again;
}
if (chip->msi) {
snd_printk(KERN_WARNING SFX "No response from codec, "
"disabling MSI: last cmd=0x%08x\n",
chip->last_cmd[addr]);
free_irq(chip->irq, chip);
chip->irq = -1;
pci_disable_msi(chip->pci);
chip->msi = 0;
if (azx_acquire_irq(chip, 1) < 0) {
bus->rirb_error = 1;
return -1;
}
goto again;
}
if (chip->probing) {
/* If this critical timeout happens during the codec probing
* phase, this is likely an access to a non-existing codec
* slot. Better to return an error and reset the system.
*/
return -1;
}
/* a fatal communication error; need either to reset or to fallback
* to the single_cmd mode
*/
bus->rirb_error = 1;
if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
bus->response_reset = 1;
return -1; /* give a chance to retry */
}
snd_printk(KERN_ERR "hda_intel: azx_get_response timeout, "
"switching to single_cmd mode: last cmd=0x%08x\n",
chip->last_cmd[addr]);
chip->single_cmd = 1;
bus->response_reset = 0;
/* release CORB/RIRB */
azx_free_cmd_io(chip);
/* disable unsolicited responses */
azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_UNSOL);
return -1;
}
/*
* Use the single immediate command instead of CORB/RIRB for simplicity
*
* Note: according to Intel, this is not preferred use. The command was
* intended for the BIOS only, and may get confused with unsolicited
* responses. So, we shouldn't use it for normal operation from the
* driver.
* I left the codes, however, for debugging/testing purposes.
*/
/* receive a response */
static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
{
int timeout = 50;
while (timeout--) {
/* check IRV busy bit */
if (azx_readw(chip, IRS) & ICH6_IRS_VALID) {
/* reuse rirb.res as the response return value */
chip->rirb.res[addr] = azx_readl(chip, IR);
return 0;
}
udelay(1);
}
if (printk_ratelimit())
snd_printd(SFX "get_response timeout: IRS=0x%x\n",
azx_readw(chip, IRS));
chip->rirb.res[addr] = -1;
return -EIO;
}
/* send a command */
static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
{
struct azx *chip = bus->private_data;
unsigned int addr = azx_command_addr(val);
int timeout = 50;
bus->rirb_error = 0;
while (timeout--) {
/* check ICB busy bit */
if (!((azx_readw(chip, IRS) & ICH6_IRS_BUSY))) {
/* Clear IRV valid bit */
azx_writew(chip, IRS, azx_readw(chip, IRS) |
ICH6_IRS_VALID);
azx_writel(chip, IC, val);
azx_writew(chip, IRS, azx_readw(chip, IRS) |
ICH6_IRS_BUSY);
return azx_single_wait_for_response(chip, addr);
}
udelay(1);
}
if (printk_ratelimit())
snd_printd(SFX "send_cmd timeout: IRS=0x%x, val=0x%x\n",
azx_readw(chip, IRS), val);
return -EIO;
}
/* receive a response */
static unsigned int azx_single_get_response(struct hda_bus *bus,
unsigned int addr)
{
struct azx *chip = bus->private_data;
return chip->rirb.res[addr];
}
/*
* The below are the main callbacks from hda_codec.
*
* They are just the skeleton to call sub-callbacks according to the
* current setting of chip->single_cmd.
*/
/* send a command */
static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
{
struct azx *chip = bus->private_data;
chip->last_cmd[azx_command_addr(val)] = val;
if (chip->single_cmd)
return azx_single_send_cmd(bus, val);
else
return azx_corb_send_cmd(bus, val);
}
/* get a response */
static unsigned int azx_get_response(struct hda_bus *bus,
unsigned int addr)
{
struct azx *chip = bus->private_data;
if (chip->single_cmd)
return azx_single_get_response(bus, addr);
else
return azx_rirb_get_response(bus, addr);
}
#ifdef CONFIG_SND_HDA_POWER_SAVE
static void azx_power_notify(struct hda_bus *bus);
#endif
/* reset codec link */
static int azx_reset(struct azx *chip, int full_reset)
{
int count;
if (!full_reset)
goto __skip;
/* clear STATESTS */
azx_writeb(chip, STATESTS, STATESTS_INT_MASK);
/* reset controller */
azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_RESET);
count = 50;
while (azx_readb(chip, GCTL) && --count)
msleep(1);
/* delay for >= 100us for codec PLL to settle per spec
* Rev 0.9 section 5.5.1
*/
msleep(1);
/* Bring controller out of reset */
azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | ICH6_GCTL_RESET);
count = 50;
while (!azx_readb(chip, GCTL) && --count)
msleep(1);
/* Brent Chartrand said to wait >= 540us for codecs to initialize */
msleep(1);
__skip:
/* check to see if controller is ready */
if (!azx_readb(chip, GCTL)) {
snd_printd(SFX "azx_reset: controller not ready!\n");
return -EBUSY;
}
/* Accept unsolicited responses */
if (!chip->single_cmd)
azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
ICH6_GCTL_UNSOL);
/* detect codecs */
if (!chip->codec_mask) {
chip->codec_mask = azx_readw(chip, STATESTS);
snd_printdd(SFX "codec_mask = 0x%x\n", chip->codec_mask);
}
return 0;
}
/*
* Lowlevel interface
*/
/* enable interrupts */
static void azx_int_enable(struct azx *chip)
{
/* enable controller CIE and GIE */
azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN);
}
/* disable interrupts */
static void azx_int_disable(struct azx *chip)
{
int i;
/* disable interrupts in stream descriptor */
for (i = 0; i < chip->num_streams; i++) {
struct azx_dev *azx_dev = &chip->azx_dev[i];
azx_sd_writeb(azx_dev, SD_CTL,
azx_sd_readb(azx_dev, SD_CTL) & ~SD_INT_MASK);
}
/* disable SIE for all streams */
azx_writeb(chip, INTCTL, 0);
/* disable controller CIE and GIE */
azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
~(ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN));
}
/* clear interrupts */
static void azx_int_clear(struct azx *chip)
{
int i;
/* clear stream status */
for (i = 0; i < chip->num_streams; i++) {
struct azx_dev *azx_dev = &chip->azx_dev[i];
azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK);
}
/* clear STATESTS */
azx_writeb(chip, STATESTS, STATESTS_INT_MASK);
/* clear rirb status */
azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
/* clear int status */
azx_writel(chip, INTSTS, ICH6_INT_CTRL_EN | ICH6_INT_ALL_STREAM);
}
/* start a stream */
static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
{
/*
* Before stream start, initialize parameter
*/
azx_dev->insufficient = 1;
/* enable SIE */
azx_writel(chip, INTCTL,
azx_readl(chip, INTCTL) | (1 << azx_dev->index));
/* set DMA start and interrupt mask */
azx_sd_writeb(azx_dev, SD_CTL, azx_sd_readb(azx_dev, SD_CTL) |
SD_CTL_DMA_START | SD_INT_MASK);
}
/* stop DMA */
static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
{
azx_sd_writeb(azx_dev, SD_CTL, azx_sd_readb(azx_dev, SD_CTL) &
~(SD_CTL_DMA_START | SD_INT_MASK));
azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
}
/* stop a stream */
static void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
{
azx_stream_clear(chip, azx_dev);
/* disable SIE */
azx_writel(chip, INTCTL,
azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
}
/*
* reset and start the controller registers
*/
static void azx_init_chip(struct azx *chip, int full_reset)
{
if (chip->initialized)
return;
/* reset controller */
azx_reset(chip, full_reset);
/* initialize interrupts */
azx_int_clear(chip);
azx_int_enable(chip);
/* initialize the codec command I/O */
if (!chip->single_cmd)
azx_init_cmd_io(chip);
/* program the position buffer */
azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
chip->initialized = 1;
}
/*
* initialize the PCI registers
*/
/* update bits in a PCI register byte */
static void update_pci_byte(struct pci_dev *pci, unsigned int reg,
unsigned char mask, unsigned char val)
{
unsigned char data;
pci_read_config_byte(pci, reg, &data);
data &= ~mask;
data |= (val & mask);
pci_write_config_byte(pci, reg, data);
}
static void azx_init_pci(struct azx *chip)
{
unsigned short snoop;
/* Clear bits 0-2 of PCI register TCSEL (at offset 0x44)
* TCSEL == Traffic Class Select Register, which sets PCI express QOS
* Ensuring these bits are 0 clears playback static on some HD Audio
* codecs.
* The PCI register TCSEL is defined in the Intel manuals.
*/
if (!(chip->driver_caps & AZX_DCAPS_NO_TCSEL)) {
snd_printdd(SFX "Clearing TCSEL\n");
update_pci_byte(chip->pci, ICH6_PCIREG_TCSEL, 0x07, 0);
}
/* For ATI SB450/600/700/800/900 and AMD Hudson azalia HD audio,
* we need to enable snoop.
*/
if (chip->driver_caps & AZX_DCAPS_ATI_SNOOP) {
snd_printdd(SFX "Enabling ATI snoop\n");
update_pci_byte(chip->pci,
ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR,
0x07, ATI_SB450_HDAUDIO_ENABLE_SNOOP);
}
/* For NVIDIA HDA, enable snoop */
if (chip->driver_caps & AZX_DCAPS_NVIDIA_SNOOP) {
snd_printdd(SFX "Enabling Nvidia snoop\n");
update_pci_byte(chip->pci,
NVIDIA_HDA_TRANSREG_ADDR,
0x0f, NVIDIA_HDA_ENABLE_COHBITS);
update_pci_byte(chip->pci,
NVIDIA_HDA_ISTRM_COH,
0x01, NVIDIA_HDA_ENABLE_COHBIT);
update_pci_byte(chip->pci,
NVIDIA_HDA_OSTRM_COH,
0x01, NVIDIA_HDA_ENABLE_COHBIT);
}
/* Enable SCH/PCH snoop if needed */
if (chip->driver_caps & AZX_DCAPS_SCH_SNOOP) {
pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) {
pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC,
snoop & (~INTEL_SCH_HDA_DEVC_NOSNOOP));
pci_read_config_word(chip->pci,
INTEL_SCH_HDA_DEVC, &snoop);
snd_printdd(SFX "HDA snoop disabled, enabling ... %s\n",
(snoop & INTEL_SCH_HDA_DEVC_NOSNOOP)
? "Failed" : "OK");
}
}
}
static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev);
/*
* interrupt handler
*/
static irqreturn_t azx_interrupt(int irq, void *dev_id)
{
struct azx *chip = dev_id;
struct azx_dev *azx_dev;
u32 status;
u8 sd_status;
int i, ok;
spin_lock(&chip->reg_lock);
status = azx_readl(chip, INTSTS);
if (status == 0) {
spin_unlock(&chip->reg_lock);
return IRQ_NONE;
}
for (i = 0; i < chip->num_streams; i++) {
azx_dev = &chip->azx_dev[i];
if (status & azx_dev->sd_int_sta_mask) {
sd_status = azx_sd_readb(azx_dev, SD_STS);
azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK);
if (!azx_dev->substream || !azx_dev->running ||
!(sd_status & SD_INT_COMPLETE))
continue;
/* check whether this IRQ is really acceptable */
ok = azx_position_ok(chip, azx_dev);
if (ok == 1) {
azx_dev->irq_pending = 0;
spin_unlock(&chip->reg_lock);
snd_pcm_period_elapsed(azx_dev->substream);
spin_lock(&chip->reg_lock);
} else if (ok == 0 && chip->bus && chip->bus->workq) {
/* bogus IRQ, process it later */
azx_dev->irq_pending = 1;
queue_work(chip->bus->workq,
&chip->irq_pending_work);
}
}
}
/* clear rirb int */
status = azx_readb(chip, RIRBSTS);
if (status & RIRB_INT_MASK) {
if (status & RIRB_INT_RESPONSE) {
if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
udelay(80);
azx_update_rirb(chip);
}
azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
}
#if 0
/* clear state status int */
if (azx_readb(chip, STATESTS) & 0x04)
azx_writeb(chip, STATESTS, 0x04);
#endif
spin_unlock(&chip->reg_lock);
return IRQ_HANDLED;
}
/*
* set up a BDL entry
*/
static int setup_bdle(struct snd_pcm_substream *substream,
struct azx_dev *azx_dev, u32 **bdlp,
int ofs, int size, int with_ioc)
{
u32 *bdl = *bdlp;
while (size > 0) {
dma_addr_t addr;
int chunk;
if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
return -EINVAL;
addr = snd_pcm_sgbuf_get_addr(substream, ofs);
/* program the address field of the BDL entry */
bdl[0] = cpu_to_le32((u32)addr);
bdl[1] = cpu_to_le32(upper_32_bits(addr));
/* program the size field of the BDL entry */
chunk = snd_pcm_sgbuf_get_chunk_size(substream, ofs, size);
bdl[2] = cpu_to_le32(chunk);
/* program the IOC to enable interrupt
* only when the whole fragment is processed
*/
size -= chunk;
bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
bdl += 4;
azx_dev->frags++;
ofs += chunk;
}
*bdlp = bdl;
return ofs;
}
/*
* set up BDL entries
*/
static int azx_setup_periods(struct azx *chip,
struct snd_pcm_substream *substream,
struct azx_dev *azx_dev)
{
u32 *bdl;
int i, ofs, periods, period_bytes;
int pos_adj;
/* reset BDL address */
azx_sd_writel(azx_dev, SD_BDLPL, 0);
azx_sd_writel(azx_dev, SD_BDLPU, 0);
period_bytes = azx_dev->period_bytes;
periods = azx_dev->bufsize / period_bytes;
/* program the initial BDL entries */
bdl = (u32 *)azx_dev->bdl.area;
ofs = 0;
azx_dev->frags = 0;
pos_adj = bdl_pos_adj[chip->dev_index];
if (pos_adj > 0) {
struct snd_pcm_runtime *runtime = substream->runtime;
int pos_align = pos_adj;
pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
if (!pos_adj)
pos_adj = pos_align;
else
pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
pos_align;
pos_adj = frames_to_bytes(runtime, pos_adj);
if (pos_adj >= period_bytes) {
snd_printk(KERN_WARNING SFX "Too big adjustment %d\n",
bdl_pos_adj[chip->dev_index]);
pos_adj = 0;
} else {
ofs = setup_bdle(substream, azx_dev,
&bdl, ofs, pos_adj,
!substream->runtime->no_period_wakeup);
if (ofs < 0)
goto error;
}
} else
pos_adj = 0;
for (i = 0; i < periods; i++) {
if (i == periods - 1 && pos_adj)
ofs = setup_bdle(substream, azx_dev, &bdl, ofs,
period_bytes - pos_adj, 0);
else
ofs = setup_bdle(substream, azx_dev, &bdl, ofs,
period_bytes,
!substream->runtime->no_period_wakeup);
if (ofs < 0)
goto error;
}
return 0;
error:
snd_printk(KERN_ERR SFX "Too many BDL entries: buffer=%d, period=%d\n",
azx_dev->bufsize, period_bytes);
return -EINVAL;
}
/* reset stream */
static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
{
unsigned char val;
int timeout;
azx_stream_clear(chip, azx_dev);
azx_sd_writeb(azx_dev, SD_CTL, azx_sd_readb(azx_dev, SD_CTL) |
SD_CTL_STREAM_RESET);
udelay(3);
timeout = 300;
while (!((val = azx_sd_readb(azx_dev, SD_CTL)) & SD_CTL_STREAM_RESET) &&
--timeout)
;
val &= ~SD_CTL_STREAM_RESET;
azx_sd_writeb(azx_dev, SD_CTL, val);
udelay(3);
timeout = 300;
/* waiting for hardware to report that the stream is out of reset */
while (((val = azx_sd_readb(azx_dev, SD_CTL)) & SD_CTL_STREAM_RESET) &&
--timeout)
;
/* reset first position - may not be synced with hw at this time */
*azx_dev->posbuf = 0;
}
/*
* set up the SD for streaming
*/
static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
{
/* make sure the run bit is zero for SD */
azx_stream_clear(chip, azx_dev);
/* program the stream_tag */
azx_sd_writel(azx_dev, SD_CTL,
(azx_sd_readl(azx_dev, SD_CTL) & ~SD_CTL_STREAM_TAG_MASK)|
(azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT));
/* program the length of samples in cyclic buffer */
azx_sd_writel(azx_dev, SD_CBL, azx_dev->bufsize);
/* program the stream format */
/* this value needs to be the same as the one programmed */
azx_sd_writew(azx_dev, SD_FORMAT, azx_dev->format_val);
/* program the stream LVI (last valid index) of the BDL */
azx_sd_writew(azx_dev, SD_LVI, azx_dev->frags - 1);
/* program the BDL address */
/* lower BDL address */
azx_sd_writel(azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
/* upper BDL address */
azx_sd_writel(azx_dev, SD_BDLPU, upper_32_bits(azx_dev->bdl.addr));
/* enable the position buffer */
if (chip->position_fix[0] != POS_FIX_LPIB ||
chip->position_fix[1] != POS_FIX_LPIB) {
if (!(azx_readl(chip, DPLBASE) & ICH6_DPLBASE_ENABLE))
azx_writel(chip, DPLBASE,
(u32)chip->posbuf.addr | ICH6_DPLBASE_ENABLE);
}
/* set the interrupt enable bits in the descriptor control register */
azx_sd_writel(azx_dev, SD_CTL,
azx_sd_readl(azx_dev, SD_CTL) | SD_INT_MASK);
return 0;
}
/*
* Probe the given codec address
*/
static int probe_codec(struct azx *chip, int addr)
{
unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
unsigned int res;
mutex_lock(&chip->bus->cmd_mutex);
chip->probing = 1;
azx_send_cmd(chip->bus, cmd);
res = azx_get_response(chip->bus, addr);
chip->probing = 0;
mutex_unlock(&chip->bus->cmd_mutex);
if (res == -1)
return -EIO;
snd_printdd(SFX "codec #%d probed OK\n", addr);
return 0;
}
static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
struct hda_pcm *cpcm);
static void azx_stop_chip(struct azx *chip);
static void azx_bus_reset(struct hda_bus *bus)
{
struct azx *chip = bus->private_data;
bus->in_reset = 1;
azx_stop_chip(chip);
azx_init_chip(chip, 1);
#ifdef CONFIG_PM
if (chip->initialized) {
int i;
for (i = 0; i < HDA_MAX_PCMS; i++)
snd_pcm_suspend_all(chip->pcm[i]);
snd_hda_suspend(chip->bus);
snd_hda_resume(chip->bus);
}
#endif
bus->in_reset = 0;
}
/*
* Codec initialization
*/
/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] __devinitdata = {
[AZX_DRIVER_NVIDIA] = 8,
[AZX_DRIVER_TERA] = 1,
};
static int __devinit azx_codec_create(struct azx *chip, const char *model)
{
struct hda_bus_template bus_temp;
int c, codecs, err;
int max_slots;
memset(&bus_temp, 0, sizeof(bus_temp));
bus_temp.private_data = chip;
bus_temp.modelname = model;
bus_temp.pci = chip->pci;
bus_temp.ops.command = azx_send_cmd;
bus_temp.ops.get_response = azx_get_response;
bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
bus_temp.ops.bus_reset = azx_bus_reset;
#ifdef CONFIG_SND_HDA_POWER_SAVE
bus_temp.power_save = &power_save;
bus_temp.ops.pm_notify = azx_power_notify;
#endif
err = snd_hda_bus_new(chip->card, &bus_temp, &chip->bus);
if (err < 0)
return err;
if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
snd_printd(SFX "Enable delay in RIRB handling\n");
chip->bus->needs_damn_long_delay = 1;
}
codecs = 0;
max_slots = azx_max_codecs[chip->driver_type];
if (!max_slots)
max_slots = AZX_DEFAULT_CODECS;
/* First try to probe all given codec slots */
for (c = 0; c < max_slots; c++) {
if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
if (probe_codec(chip, c) < 0) {
/* Some BIOSen give you wrong codec addresses
* that don't exist
*/
snd_printk(KERN_WARNING SFX
"Codec #%d probe error; "
"disabling it...\n", c);
chip->codec_mask &= ~(1 << c);
/* More badly, accessing to a non-existing
* codec often screws up the controller chip,
* and disturbs the further communications.
* Thus if an error occurs during probing,
* better to reset the controller chip to
* get back to the sanity state.
*/
azx_stop_chip(chip);
azx_init_chip(chip, 1);
}
}
}
/* AMD chipsets often cause the communication stalls upon certain
* sequence like the pin-detection. It seems that forcing the synced
* access works around the stall. Grrr...
*/
if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
snd_printd(SFX "Enable sync_write for stable communication\n");
chip->bus->sync_write = 1;
chip->bus->allow_bus_reset = 1;
}
/* Then create codec instances */
for (c = 0; c < max_slots; c++) {
if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
struct hda_codec *codec;
err = snd_hda_codec_new(chip->bus, c, &codec);
if (err < 0)
continue;
codec->beep_mode = chip->beep_mode;
codecs++;
}
}
if (!codecs) {
snd_printk(KERN_ERR SFX "no codecs initialized\n");
return -ENXIO;
}
return 0;
}
/* configure each codec instance */
static int __devinit azx_codec_configure(struct azx *chip)
{
struct hda_codec *codec;
list_for_each_entry(codec, &chip->bus->codec_list, list) {
snd_hda_codec_configure(codec);
}
return 0;
}
/*
* PCM support
*/
/* assign a stream for the PCM */
static inline struct azx_dev *
azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
{
int dev, i, nums;
struct azx_dev *res = NULL;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
dev = chip->playback_index_offset;
nums = chip->playback_streams;
} else {
dev = chip->capture_index_offset;
nums = chip->capture_streams;
}
for (i = 0; i < nums; i++, dev++)
if (!chip->azx_dev[dev].opened) {
res = &chip->azx_dev[dev];
if (res->device == substream->pcm->device)
break;
}
if (res) {
res->opened = 1;
res->device = substream->pcm->device;
}
return res;
}
/* release the assigned stream */
static inline void azx_release_device(struct azx_dev *azx_dev)
{
azx_dev->opened = 0;
}
static struct snd_pcm_hardware azx_pcm_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
/* No full-resume yet implemented */
/* SNDRV_PCM_INFO_RESUME |*/
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = AZX_MAX_BUF_SIZE,
.period_bytes_min = 128,
.period_bytes_max = AZX_MAX_BUF_SIZE / 2,
.periods_min = 2,
.periods_max = AZX_MAX_FRAG,
.fifo_size = 0,
};
struct azx_pcm {
struct azx *chip;
struct hda_codec *codec;
struct hda_pcm_stream *hinfo[2];
};
static int azx_pcm_open(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev;
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned long flags;
int err;
mutex_lock(&chip->open_mutex);
azx_dev = azx_assign_device(chip, substream);
if (azx_dev == NULL) {
mutex_unlock(&chip->open_mutex);
return -EBUSY;
}
runtime->hw = azx_pcm_hw;
runtime->hw.channels_min = hinfo->channels_min;
runtime->hw.channels_max = hinfo->channels_max;
runtime->hw.formats = hinfo->formats;
runtime->hw.rates = hinfo->rates;
snd_pcm_limit_hw_rates(runtime);
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
128);
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
128);
snd_hda_power_up(apcm->codec);
err = hinfo->ops.open(hinfo, apcm->codec, substream);
if (err < 0) {
azx_release_device(azx_dev);
snd_hda_power_down(apcm->codec);
mutex_unlock(&chip->open_mutex);
return err;
}
snd_pcm_limit_hw_rates(runtime);
/* sanity check */
if (snd_BUG_ON(!runtime->hw.channels_min) ||
snd_BUG_ON(!runtime->hw.channels_max) ||
snd_BUG_ON(!runtime->hw.formats) ||
snd_BUG_ON(!runtime->hw.rates)) {
azx_release_device(azx_dev);
hinfo->ops.close(hinfo, apcm->codec, substream);
snd_hda_power_down(apcm->codec);
mutex_unlock(&chip->open_mutex);
return -EINVAL;
}
spin_lock_irqsave(&chip->reg_lock, flags);
azx_dev->substream = substream;
azx_dev->running = 0;
spin_unlock_irqrestore(&chip->reg_lock, flags);
runtime->private_data = azx_dev;
snd_pcm_set_sync(substream);
mutex_unlock(&chip->open_mutex);
return 0;
}
static int azx_pcm_close(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev = get_azx_dev(substream);
unsigned long flags;
mutex_lock(&chip->open_mutex);
spin_lock_irqsave(&chip->reg_lock, flags);
azx_dev->substream = NULL;
azx_dev->running = 0;
spin_unlock_irqrestore(&chip->reg_lock, flags);
azx_release_device(azx_dev);
hinfo->ops.close(hinfo, apcm->codec, substream);
snd_hda_power_down(apcm->codec);
mutex_unlock(&chip->open_mutex);
return 0;
}
static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct azx_dev *azx_dev = get_azx_dev(substream);
azx_dev->bufsize = 0;
azx_dev->period_bytes = 0;
azx_dev->format_val = 0;
return snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
}
static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx_dev *azx_dev = get_azx_dev(substream);
struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
/* reset BDL address */
azx_sd_writel(azx_dev, SD_BDLPL, 0);
azx_sd_writel(azx_dev, SD_BDLPU, 0);
azx_sd_writel(azx_dev, SD_CTL, 0);
azx_dev->bufsize = 0;
azx_dev->period_bytes = 0;
azx_dev->format_val = 0;
snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
return snd_pcm_lib_free_pages(substream);
}
static int azx_pcm_prepare(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev = get_azx_dev(substream);
struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int bufsize, period_bytes, format_val, stream_tag;
int err;
azx_stream_reset(chip, azx_dev);
format_val = snd_hda_calc_stream_format(runtime->rate,
runtime->channels,
runtime->format,
hinfo->maxbps,
apcm->codec->spdif_ctls);
if (!format_val) {
snd_printk(KERN_ERR SFX
"invalid format_val, rate=%d, ch=%d, format=%d\n",
runtime->rate, runtime->channels, runtime->format);
return -EINVAL;
}
bufsize = snd_pcm_lib_buffer_bytes(substream);
period_bytes = snd_pcm_lib_period_bytes(substream);
snd_printdd(SFX "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
bufsize, format_val);
if (bufsize != azx_dev->bufsize ||
period_bytes != azx_dev->period_bytes ||
format_val != azx_dev->format_val) {
azx_dev->bufsize = bufsize;
azx_dev->period_bytes = period_bytes;
azx_dev->format_val = format_val;
err = azx_setup_periods(chip, substream, azx_dev);
if (err < 0)
return err;
}
/* wallclk has 24Mhz clock source */
azx_dev->period_wallclk = (((runtime->period_size * 24000) /
runtime->rate) * 1000);
azx_setup_controller(chip, azx_dev);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
azx_dev->fifo_size = azx_sd_readw(azx_dev, SD_FIFOSIZE) + 1;
else
azx_dev->fifo_size = 0;
stream_tag = azx_dev->stream_tag;
/* CA-IBG chips need the playback stream starting from 1 */
if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
stream_tag > chip->capture_streams)
stream_tag -= chip->capture_streams;
return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
azx_dev->format_val, substream);
}
static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev;
struct snd_pcm_substream *s;
int rstart = 0, start, nsync = 0, sbits = 0;
int nwait, timeout;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
rstart = 1;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
start = 1;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
start = 0;
break;
default:
return -EINVAL;
}
snd_pcm_group_for_each_entry(s, substream) {
if (s->pcm->card != substream->pcm->card)
continue;
azx_dev = get_azx_dev(s);
sbits |= 1 << azx_dev->index;
nsync++;
snd_pcm_trigger_done(s, substream);
}
spin_lock(&chip->reg_lock);
if (nsync > 1) {
/* first, set SYNC bits of corresponding streams */
azx_writel(chip, SYNC, azx_readl(chip, SYNC) | sbits);
}
snd_pcm_group_for_each_entry(s, substream) {
if (s->pcm->card != substream->pcm->card)
continue;
azx_dev = get_azx_dev(s);
if (start) {
azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
if (!rstart)
azx_dev->start_wallclk -=
azx_dev->period_wallclk;
azx_stream_start(chip, azx_dev);
} else {
azx_stream_stop(chip, azx_dev);
}
azx_dev->running = start;
}
spin_unlock(&chip->reg_lock);
if (start) {
if (nsync == 1)
return 0;
/* wait until all FIFOs get ready */
for (timeout = 5000; timeout; timeout--) {
nwait = 0;
snd_pcm_group_for_each_entry(s, substream) {
if (s->pcm->card != substream->pcm->card)
continue;
azx_dev = get_azx_dev(s);
if (!(azx_sd_readb(azx_dev, SD_STS) &
SD_STS_FIFO_READY))
nwait++;
}
if (!nwait)
break;
cpu_relax();
}
} else {
/* wait until all RUN bits are cleared */
for (timeout = 5000; timeout; timeout--) {
nwait = 0;
snd_pcm_group_for_each_entry(s, substream) {
if (s->pcm->card != substream->pcm->card)
continue;
azx_dev = get_azx_dev(s);
if (azx_sd_readb(azx_dev, SD_CTL) &
SD_CTL_DMA_START)
nwait++;
}
if (!nwait)
break;
cpu_relax();
}
}
if (nsync > 1) {
spin_lock(&chip->reg_lock);
/* reset SYNC bits */
azx_writel(chip, SYNC, azx_readl(chip, SYNC) & ~sbits);
spin_unlock(&chip->reg_lock);
}
return 0;
}
/* get the current DMA position with correction on VIA chips */
static unsigned int azx_via_get_position(struct azx *chip,
struct azx_dev *azx_dev)
{
unsigned int link_pos, mini_pos, bound_pos;
unsigned int mod_link_pos, mod_dma_pos, mod_mini_pos;
unsigned int fifo_size;
link_pos = azx_sd_readl(azx_dev, SD_LPIB);
if (azx_dev->index >= 4) {
/* Playback, no problem using link position */
return link_pos;
}
/* Capture */
/* For new chipset,
* use mod to get the DMA position just like old chipset
*/
mod_dma_pos = le32_to_cpu(*azx_dev->posbuf);
mod_dma_pos %= azx_dev->period_bytes;
/* azx_dev->fifo_size can't get FIFO size of in stream.
* Get from base address + offset.
*/
fifo_size = readw(chip->remap_addr + VIA_IN_STREAM0_FIFO_SIZE_OFFSET);
if (azx_dev->insufficient) {
/* Link position never gather than FIFO size */
if (link_pos <= fifo_size)
return 0;
azx_dev->insufficient = 0;
}
if (link_pos <= fifo_size)
mini_pos = azx_dev->bufsize + link_pos - fifo_size;
else
mini_pos = link_pos - fifo_size;
/* Find nearest previous boudary */
mod_mini_pos = mini_pos % azx_dev->period_bytes;
mod_link_pos = link_pos % azx_dev->period_bytes;
if (mod_link_pos >= fifo_size)
bound_pos = link_pos - mod_link_pos;
else if (mod_dma_pos >= mod_mini_pos)
bound_pos = mini_pos - mod_mini_pos;
else {
bound_pos = mini_pos - mod_mini_pos + azx_dev->period_bytes;
if (bound_pos >= azx_dev->bufsize)
bound_pos = 0;
}
/* Calculate real DMA position we want */
return bound_pos + mod_dma_pos;
}
static unsigned int azx_get_position(struct azx *chip,
struct azx_dev *azx_dev)
{
unsigned int pos;
int stream = azx_dev->substream->stream;
switch (chip->position_fix[stream]) {
case POS_FIX_LPIB:
/* read LPIB */
pos = azx_sd_readl(azx_dev, SD_LPIB);
break;
case POS_FIX_VIACOMBO:
pos = azx_via_get_position(chip, azx_dev);
break;
default:
/* use the position buffer */
pos = le32_to_cpu(*azx_dev->posbuf);
}
if (pos >= azx_dev->bufsize)
pos = 0;
return pos;
}
static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev = get_azx_dev(substream);
return bytes_to_frames(substream->runtime,
azx_get_position(chip, azx_dev));
}
/*
* Check whether the current DMA position is acceptable for updating
* periods. Returns non-zero if it's OK.
*
* Many HD-audio controllers appear pretty inaccurate about
* the update-IRQ timing. The IRQ is issued before actually the
* data is processed. So, we need to process it afterwords in a
* workqueue.
*/
static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
{
u32 wallclk;
unsigned int pos;
int stream;
wallclk = azx_readl(chip, WALLCLK) - azx_dev->start_wallclk;
if (wallclk < (azx_dev->period_wallclk * 2) / 3)
return -1; /* bogus (too early) interrupt */
stream = azx_dev->substream->stream;
pos = azx_get_position(chip, azx_dev);
if (chip->position_fix[stream] == POS_FIX_AUTO) {
if (!pos) {
printk(KERN_WARNING
"hda-intel: Invalid position buffer, "
"using LPIB read method instead.\n");
chip->position_fix[stream] = POS_FIX_LPIB;
pos = azx_get_position(chip, azx_dev);
} else
chip->position_fix[stream] = POS_FIX_POSBUF;
}
if (WARN_ONCE(!azx_dev->period_bytes,
"hda-intel: zero azx_dev->period_bytes"))
return -1; /* this shouldn't happen! */
if (wallclk < (azx_dev->period_wallclk * 5) / 4 &&
pos % azx_dev->period_bytes > azx_dev->period_bytes / 2)
/* NG - it's below the first next period boundary */
return bdl_pos_adj[chip->dev_index] ? 0 : -1;
azx_dev->start_wallclk += wallclk;
return 1; /* OK, it's fine */
}
/*
* The work for pending PCM period updates.
*/
static void azx_irq_pending_work(struct work_struct *work)
{
struct azx *chip = container_of(work, struct azx, irq_pending_work);
int i, pending, ok;
if (!chip->irq_pending_warned) {
printk(KERN_WARNING
"hda-intel: IRQ timing workaround is activated "
"for card #%d. Suggest a bigger bdl_pos_adj.\n",
chip->card->number);
chip->irq_pending_warned = 1;
}
for (;;) {
pending = 0;
spin_lock_irq(&chip->reg_lock);
for (i = 0; i < chip->num_streams; i++) {
struct azx_dev *azx_dev = &chip->azx_dev[i];
if (!azx_dev->irq_pending ||
!azx_dev->substream ||
!azx_dev->running)
continue;
ok = azx_position_ok(chip, azx_dev);
if (ok > 0) {
azx_dev->irq_pending = 0;
spin_unlock(&chip->reg_lock);
snd_pcm_period_elapsed(azx_dev->substream);
spin_lock(&chip->reg_lock);
} else if (ok < 0) {
pending = 0; /* too early */
} else
pending++;
}
spin_unlock_irq(&chip->reg_lock);
if (!pending)
return;
msleep(1);
}
}
/* clear irq_pending flags and assure no on-going workq */
static void azx_clear_irq_pending(struct azx *chip)
{
int i;
spin_lock_irq(&chip->reg_lock);
for (i = 0; i < chip->num_streams; i++)
chip->azx_dev[i].irq_pending = 0;
spin_unlock_irq(&chip->reg_lock);
}
static struct snd_pcm_ops azx_pcm_ops = {
.open = azx_pcm_open,
.close = azx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = azx_pcm_hw_params,
.hw_free = azx_pcm_hw_free,
.prepare = azx_pcm_prepare,
.trigger = azx_pcm_trigger,
.pointer = azx_pcm_pointer,
.page = snd_pcm_sgbuf_ops_page,
};
static void azx_pcm_free(struct snd_pcm *pcm)
{
struct azx_pcm *apcm = pcm->private_data;
if (apcm) {
apcm->chip->pcm[pcm->device] = NULL;
kfree(apcm);
}
}
static int
azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
struct hda_pcm *cpcm)
{
struct azx *chip = bus->private_data;
struct snd_pcm *pcm;
struct azx_pcm *apcm;
int pcm_dev = cpcm->device;
int s, err;
if (pcm_dev >= HDA_MAX_PCMS) {
snd_printk(KERN_ERR SFX "Invalid PCM device number %d\n",
pcm_dev);
return -EINVAL;
}
if (chip->pcm[pcm_dev]) {
snd_printk(KERN_ERR SFX "PCM %d already exists\n", pcm_dev);
return -EBUSY;
}
err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
&pcm);
if (err < 0)
return err;
strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
if (apcm == NULL)
return -ENOMEM;
apcm->chip = chip;
apcm->codec = codec;
pcm->private_data = apcm;
pcm->private_free = azx_pcm_free;
if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
chip->pcm[pcm_dev] = pcm;
cpcm->pcm = pcm;
for (s = 0; s < 2; s++) {
apcm->hinfo[s] = &cpcm->stream[s];
if (cpcm->stream[s].substreams)
snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
}
/* buffer pre-allocation */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
snd_dma_pci_data(chip->pci),
1024 * 64, 32 * 1024 * 1024);
return 0;
}
/*
* mixer creation - all stuff is implemented in hda module
*/
static int __devinit azx_mixer_create(struct azx *chip)
{
return snd_hda_build_controls(chip->bus);
}
/*
* initialize SD streams
*/
static int __devinit azx_init_stream(struct azx *chip)
{
int i;
/* initialize each stream (aka device)
* assign the starting bdl address to each stream (device)
* and initialize
*/
for (i = 0; i < chip->num_streams; i++) {
struct azx_dev *azx_dev = &chip->azx_dev[i];
azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
/* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
/* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
azx_dev->sd_int_sta_mask = 1 << i;
/* stream tag: must be non-zero and unique */
azx_dev->index = i;
azx_dev->stream_tag = i + 1;
}
return 0;
}
static int azx_acquire_irq(struct azx *chip, int do_disconnect)
{
if (request_irq(chip->pci->irq, azx_interrupt,
chip->msi ? 0 : IRQF_SHARED,
"hda_intel", chip)) {
printk(KERN_ERR "hda-intel: unable to grab IRQ %d, "
"disabling device\n", chip->pci->irq);
if (do_disconnect)
snd_card_disconnect(chip->card);
return -1;
}
chip->irq = chip->pci->irq;
pci_intx(chip->pci, !chip->msi);
return 0;
}
static void azx_stop_chip(struct azx *chip)
{
if (!chip->initialized)
return;
/* disable interrupts */
azx_int_disable(chip);
azx_int_clear(chip);
/* disable CORB/RIRB */
azx_free_cmd_io(chip);
/* disable position buffer */
azx_writel(chip, DPLBASE, 0);
azx_writel(chip, DPUBASE, 0);
chip->initialized = 0;
}
#ifdef CONFIG_SND_HDA_POWER_SAVE
/* power-up/down the controller */
static void azx_power_notify(struct hda_bus *bus)
{
struct azx *chip = bus->private_data;
struct hda_codec *c;
int power_on = 0;
list_for_each_entry(c, &bus->codec_list, list) {
if (c->power_on) {
power_on = 1;
break;
}
}
if (power_on)
azx_init_chip(chip, 1);
else if (chip->running && power_save_controller &&
!bus->power_keep_link_on)
azx_stop_chip(chip);
}
#endif /* CONFIG_SND_HDA_POWER_SAVE */
#ifdef CONFIG_PM
/*
* power management
*/
static int snd_hda_codecs_inuse(struct hda_bus *bus)
{
struct hda_codec *codec;
list_for_each_entry(codec, &bus->codec_list, list) {
if (snd_hda_codec_needs_resume(codec))
return 1;
}
return 0;
}
static int azx_suspend(struct pci_dev *pci, pm_message_t state)
{
struct snd_card *card = pci_get_drvdata(pci);
struct azx *chip = card->private_data;
int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
azx_clear_irq_pending(chip);
for (i = 0; i < HDA_MAX_PCMS; i++)
snd_pcm_suspend_all(chip->pcm[i]);
if (chip->initialized)
snd_hda_suspend(chip->bus);
azx_stop_chip(chip);
if (chip->irq >= 0) {
free_irq(chip->irq, chip);
chip->irq = -1;
}
if (chip->msi)
pci_disable_msi(chip->pci);
pci_disable_device(pci);
pci_save_state(pci);
pci_set_power_state(pci, pci_choose_state(pci, state));
return 0;
}
static int azx_resume(struct pci_dev *pci)
{
struct snd_card *card = pci_get_drvdata(pci);
struct azx *chip = card->private_data;
pci_set_power_state(pci, PCI_D0);
pci_restore_state(pci);
if (pci_enable_device(pci) < 0) {
printk(KERN_ERR "hda-intel: pci_enable_device failed, "
"disabling device\n");
snd_card_disconnect(card);
return -EIO;
}
pci_set_master(pci);
if (chip->msi)
if (pci_enable_msi(pci) < 0)
chip->msi = 0;
if (azx_acquire_irq(chip, 1) < 0)
return -EIO;
azx_init_pci(chip);
if (snd_hda_codecs_inuse(chip->bus))
azx_init_chip(chip, 1);
snd_hda_resume(chip->bus);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
#endif /* CONFIG_PM */
/*
* reboot notifier for hang-up problem at power-down
*/
static int azx_halt(struct notifier_block *nb, unsigned long event, void *buf)
{
struct azx *chip = container_of(nb, struct azx, reboot_notifier);
snd_hda_bus_reboot_notify(chip->bus);
azx_stop_chip(chip);
return NOTIFY_OK;
}
static void azx_notifier_register(struct azx *chip)
{
chip->reboot_notifier.notifier_call = azx_halt;
register_reboot_notifier(&chip->reboot_notifier);
}
static void azx_notifier_unregister(struct azx *chip)
{
if (chip->reboot_notifier.notifier_call)
unregister_reboot_notifier(&chip->reboot_notifier);
}
/*
* destructor
*/
static int azx_free(struct azx *chip)
{
int i;
azx_notifier_unregister(chip);
if (chip->initialized) {
azx_clear_irq_pending(chip);
for (i = 0; i < chip->num_streams; i++)
azx_stream_stop(chip, &chip->azx_dev[i]);
azx_stop_chip(chip);
}
if (chip->irq >= 0)
free_irq(chip->irq, (void*)chip);
if (chip->msi)
pci_disable_msi(chip->pci);
if (chip->remap_addr)
iounmap(chip->remap_addr);
if (chip->azx_dev) {
for (i = 0; i < chip->num_streams; i++)
if (chip->azx_dev[i].bdl.area)
snd_dma_free_pages(&chip->azx_dev[i].bdl);
}
if (chip->rb.area)
snd_dma_free_pages(&chip->rb);
if (chip->posbuf.area)
snd_dma_free_pages(&chip->posbuf);
pci_release_regions(chip->pci);
pci_disable_device(chip->pci);
kfree(chip->azx_dev);
kfree(chip);
return 0;
}
static int azx_dev_free(struct snd_device *device)
{
return azx_free(device->device_data);
}
/*
* white/black-listing for position_fix
*/
static struct snd_pci_quirk position_fix_list[] __devinitdata = {
SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1025, 0x026f, "Acer Aspire 5538", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x02c6, "Dell Inspiron 1010", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell Inspiron 1120", POS_FIX_LPIB),
SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB),
SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1849, 0x0888, "775Dual-VSTA", POS_FIX_LPIB),
SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB),
SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
{}
};
static int __devinit check_position_fix(struct azx *chip, int fix)
{
const struct snd_pci_quirk *q;
switch (fix) {
case POS_FIX_LPIB:
case POS_FIX_POSBUF:
case POS_FIX_VIACOMBO:
return fix;
}
q = snd_pci_quirk_lookup(chip->pci, position_fix_list);
if (q) {
printk(KERN_INFO
"hda_intel: position_fix set to %d "
"for device %04x:%04x\n",
q->value, q->subvendor, q->subdevice);
return q->value;
}
/* Check VIA/ATI HD Audio Controller exist */
if (chip->driver_caps & AZX_DCAPS_POSFIX_VIA) {
snd_printd(SFX "Using VIACOMBO position fix\n");
return POS_FIX_VIACOMBO;
}
if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
snd_printd(SFX "Using LPIB position fix\n");
return POS_FIX_LPIB;
}
return POS_FIX_AUTO;
}
/*
* black-lists for probe_mask
*/
static struct snd_pci_quirk probe_mask_list[] __devinitdata = {
/* Thinkpad often breaks the controller communication when accessing
* to the non-working (or non-existing) modem codec slot.
*/
SND_PCI_QUIRK(0x1014, 0x05b7, "Thinkpad Z60", 0x01),
SND_PCI_QUIRK(0x17aa, 0x2010, "Thinkpad X/T/R60", 0x01),
SND_PCI_QUIRK(0x17aa, 0x20ac, "Thinkpad X/T/R61", 0x01),
/* broken BIOS */
SND_PCI_QUIRK(0x1028, 0x20ac, "Dell Studio Desktop", 0x01),
/* including bogus ALC268 in slot#2 that conflicts with ALC888 */
SND_PCI_QUIRK(0x17c0, 0x4085, "Medion MD96630", 0x01),
/* forced codec slots */
SND_PCI_QUIRK(0x1043, 0x1262, "ASUS W5Fm", 0x103),
SND_PCI_QUIRK(0x1046, 0x1262, "ASUS W5F", 0x103),
{}
};
#define AZX_FORCE_CODEC_MASK 0x100
static void __devinit check_probe_mask(struct azx *chip, int dev)
{
const struct snd_pci_quirk *q;
chip->codec_probe_mask = probe_mask[dev];
if (chip->codec_probe_mask == -1) {
q = snd_pci_quirk_lookup(chip->pci, probe_mask_list);
if (q) {
printk(KERN_INFO
"hda_intel: probe_mask set to 0x%x "
"for device %04x:%04x\n",
q->value, q->subvendor, q->subdevice);
chip->codec_probe_mask = q->value;
}
}
/* check forced option */
if (chip->codec_probe_mask != -1 &&
(chip->codec_probe_mask & AZX_FORCE_CODEC_MASK)) {
chip->codec_mask = chip->codec_probe_mask & 0xff;
printk(KERN_INFO "hda_intel: codec_mask forced to 0x%x\n",
chip->codec_mask);
}
}
/*
* white/black-list for enable_msi
*/
static struct snd_pci_quirk msi_black_list[] __devinitdata = {
SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */
SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */
{}
};
static void __devinit check_msi(struct azx *chip)
{
const struct snd_pci_quirk *q;
if (enable_msi >= 0) {
chip->msi = !!enable_msi;
return;
}
chip->msi = 1; /* enable MSI as default */
q = snd_pci_quirk_lookup(chip->pci, msi_black_list);
if (q) {
printk(KERN_INFO
"hda_intel: msi for device %04x:%04x set to %d\n",
q->subvendor, q->subdevice, q->value);
chip->msi = q->value;
return;
}
/* NVidia chipsets seem to cause troubles with MSI */
if (chip->driver_caps & AZX_DCAPS_NO_MSI) {
printk(KERN_INFO "hda_intel: Disabling MSI\n");
chip->msi = 0;
}
}
/*
* constructor
*/
static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
int dev, unsigned int driver_caps,
struct azx **rchip)
{
struct azx *chip;
int i, err;
unsigned short gcap;
static struct snd_device_ops ops = {
.dev_free = azx_dev_free,
};
*rchip = NULL;
err = pci_enable_device(pci);
if (err < 0)
return err;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (!chip) {
snd_printk(KERN_ERR SFX "cannot allocate chip\n");
pci_disable_device(pci);
return -ENOMEM;
}
spin_lock_init(&chip->reg_lock);
mutex_init(&chip->open_mutex);
chip->card = card;
chip->pci = pci;
chip->irq = -1;
chip->driver_caps = driver_caps;
chip->driver_type = driver_caps & 0xff;
check_msi(chip);
chip->dev_index = dev;
INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work);
chip->position_fix[0] = chip->position_fix[1] =
check_position_fix(chip, position_fix[dev]);
check_probe_mask(chip, dev);
chip->single_cmd = single_cmd;
if (bdl_pos_adj[dev] < 0) {
switch (chip->driver_type) {
case AZX_DRIVER_ICH:
case AZX_DRIVER_PCH:
bdl_pos_adj[dev] = 1;
break;
default:
bdl_pos_adj[dev] = 32;
break;
}
}
#if BITS_PER_LONG != 64
/* Fix up base address on ULI M5461 */
if (chip->driver_type == AZX_DRIVER_ULI) {
u16 tmp3;
pci_read_config_word(pci, 0x40, &tmp3);
pci_write_config_word(pci, 0x40, tmp3 | 0x10);
pci_write_config_dword(pci, PCI_BASE_ADDRESS_1, 0);
}
#endif
err = pci_request_regions(pci, "ICH HD audio");
if (err < 0) {
kfree(chip);
pci_disable_device(pci);
return err;
}
chip->addr = pci_resource_start(pci, 0);
chip->remap_addr = pci_ioremap_bar(pci, 0);
if (chip->remap_addr == NULL) {
snd_printk(KERN_ERR SFX "ioremap error\n");
err = -ENXIO;
goto errout;
}
if (chip->msi)
if (pci_enable_msi(pci) < 0)
chip->msi = 0;
if (azx_acquire_irq(chip, 0) < 0) {
err = -EBUSY;
goto errout;
}
pci_set_master(pci);
synchronize_irq(chip->irq);
gcap = azx_readw(chip, GCAP);
snd_printdd(SFX "chipset global capabilities = 0x%x\n", gcap);
/* disable SB600 64bit support for safety */
if (chip->pci->vendor == PCI_VENDOR_ID_ATI) {
struct pci_dev *p_smbus;
p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
PCI_DEVICE_ID_ATI_SBX00_SMBUS,
NULL);
if (p_smbus) {
if (p_smbus->revision < 0x30)
gcap &= ~ICH6_GCAP_64OK;
pci_dev_put(p_smbus);
}
}
/* disable 64bit DMA address on some devices */
if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
snd_printd(SFX "Disabling 64bit DMA\n");
gcap &= ~ICH6_GCAP_64OK;
}
/* allow 64bit DMA address if supported by H/W */
if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64));
else {
pci_set_dma_mask(pci, DMA_BIT_MASK(32));
pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32));
}
/* read number of streams from GCAP register instead of using
* hardcoded value
*/
chip->capture_streams = (gcap >> 8) & 0x0f;
chip->playback_streams = (gcap >> 12) & 0x0f;
if (!chip->playback_streams && !chip->capture_streams) {
/* gcap didn't give any info, switching to old method */
switch (chip->driver_type) {
case AZX_DRIVER_ULI:
chip->playback_streams = ULI_NUM_PLAYBACK;
chip->capture_streams = ULI_NUM_CAPTURE;
break;
case AZX_DRIVER_ATIHDMI:
chip->playback_streams = ATIHDMI_NUM_PLAYBACK;
chip->capture_streams = ATIHDMI_NUM_CAPTURE;
break;
case AZX_DRIVER_GENERIC:
default:
chip->playback_streams = ICH6_NUM_PLAYBACK;
chip->capture_streams = ICH6_NUM_CAPTURE;
break;
}
}
chip->capture_index_offset = 0;
chip->playback_index_offset = chip->capture_streams;
chip->num_streams = chip->playback_streams + chip->capture_streams;
chip->azx_dev = kcalloc(chip->num_streams, sizeof(*chip->azx_dev),
GFP_KERNEL);
if (!chip->azx_dev) {
snd_printk(KERN_ERR SFX "cannot malloc azx_dev\n");
goto errout;
}
for (i = 0; i < chip->num_streams; i++) {
/* allocate memory for the BDL for each stream */
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
BDL_SIZE, &chip->azx_dev[i].bdl);
if (err < 0) {
snd_printk(KERN_ERR SFX "cannot allocate BDL\n");
goto errout;
}
}
/* allocate memory for the position buffer */
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
chip->num_streams * 8, &chip->posbuf);
if (err < 0) {
snd_printk(KERN_ERR SFX "cannot allocate posbuf\n");
goto errout;
}
/* allocate CORB/RIRB */
err = azx_alloc_cmd_io(chip);
if (err < 0)
goto errout;
/* initialize streams */
azx_init_stream(chip);
/* initialize chip */
azx_init_pci(chip);
azx_init_chip(chip, (probe_only[dev] & 2) == 0);
/* codec detection */
if (!chip->codec_mask) {
snd_printk(KERN_ERR SFX "no codecs found!\n");
err = -ENODEV;
goto errout;
}
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
if (err <0) {
snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
goto errout;
}
strcpy(card->driver, "HDA-Intel");
strlcpy(card->shortname, driver_short_names[chip->driver_type],
sizeof(card->shortname));
snprintf(card->longname, sizeof(card->longname),
"%s at 0x%lx irq %i",
card->shortname, chip->addr, chip->irq);
*rchip = chip;
return 0;
errout:
azx_free(chip);
return err;
}
static void power_down_all_codecs(struct azx *chip)
{
#ifdef CONFIG_SND_HDA_POWER_SAVE
/* The codecs were powered up in snd_hda_codec_new().
* Now all initialization done, so turn them down if possible
*/
struct hda_codec *codec;
list_for_each_entry(codec, &chip->bus->codec_list, list) {
snd_hda_power_down(codec);
}
#endif
}
static int __devinit azx_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
struct azx *chip;
int err;
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
dev++;
return -ENOENT;
}
err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
if (err < 0) {
snd_printk(KERN_ERR SFX "Error creating card!\n");
return err;
}
/* set this here since it's referred in snd_hda_load_patch() */
snd_card_set_dev(card, &pci->dev);
err = azx_create(card, pci, dev, pci_id->driver_data, &chip);
if (err < 0)
goto out_free;
card->private_data = chip;
#ifdef CONFIG_SND_HDA_INPUT_BEEP
chip->beep_mode = beep_mode[dev];
#endif
/* create codec instances */
err = azx_codec_create(chip, model[dev]);
if (err < 0)
goto out_free;
#ifdef CONFIG_SND_HDA_PATCH_LOADER
if (patch[dev] && *patch[dev]) {
snd_printk(KERN_ERR SFX "Applying patch firmware '%s'\n",
patch[dev]);
err = snd_hda_load_patch(chip->bus, patch[dev]);
if (err < 0)
goto out_free;
}
#endif
if ((probe_only[dev] & 1) == 0) {
err = azx_codec_configure(chip);
if (err < 0)
goto out_free;
}
/* create PCM streams */
err = snd_hda_build_pcms(chip->bus);
if (err < 0)
goto out_free;
/* create mixer controls */
err = azx_mixer_create(chip);
if (err < 0)
goto out_free;
err = snd_card_register(card);
if (err < 0)
goto out_free;
pci_set_drvdata(pci, card);
chip->running = 1;
power_down_all_codecs(chip);
azx_notifier_register(chip);
dev++;
return err;
out_free:
snd_card_free(card);
return err;
}
static void __devexit azx_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
}
/* PCI IDs */
static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
/* CPT */
{ PCI_DEVICE(0x8086, 0x1c20),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
/* PBG */
{ PCI_DEVICE(0x8086, 0x1d20),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
/* Panther Point */
{ PCI_DEVICE(0x8086, 0x1e20),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
/* SCH */
{ PCI_DEVICE(0x8086, 0x811b),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP },
/* Generic Intel */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
.class_mask = 0xffffff,
.driver_data = AZX_DRIVER_ICH },
/* ATI SB 450/600/700/800/900 */
{ PCI_DEVICE(0x1002, 0x437b),
.driver_data = AZX_DRIVER_ATI | AZX_DCAPS_PRESET_ATI_SB },
{ PCI_DEVICE(0x1002, 0x4383),
.driver_data = AZX_DRIVER_ATI | AZX_DCAPS_PRESET_ATI_SB },
/* AMD Hudson */
{ PCI_DEVICE(0x1022, 0x780d),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
/* ATI HDMI */
{ PCI_DEVICE(0x1002, 0x793b),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0x7919),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0x960f),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0x970f),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0xaa00),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0xaa08),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0xaa10),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0xaa18),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0xaa20),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0xaa28),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0xaa30),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0xaa38),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0xaa40),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0xaa48),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
/* VIA VT8251/VT8237A */
{ PCI_DEVICE(0x1106, 0x3288),
.driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
/* SIS966 */
{ PCI_DEVICE(0x1039, 0x7502), .driver_data = AZX_DRIVER_SIS },
/* ULI M5461 */
{ PCI_DEVICE(0x10b9, 0x5461), .driver_data = AZX_DRIVER_ULI },
/* NVIDIA MCP */
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
.class_mask = 0xffffff,
.driver_data = AZX_DRIVER_NVIDIA | AZX_DCAPS_PRESET_NVIDIA },
/* Teradici */
{ PCI_DEVICE(0x6549, 0x1200),
.driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT },
/* Creative X-Fi (CA0110-IBG) */
#if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE)
/* the following entry conflicts with snd-ctxfi driver,
* as ctxfi driver mutates from HD-audio to native mode with
* a special command sequence.
*/
{ PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_ANY_ID),
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
.class_mask = 0xffffff,
.driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
AZX_DCAPS_RIRB_PRE_DELAY },
#else
/* this entry seems still valid -- i.e. without emu20kx chip */
{ PCI_DEVICE(0x1102, 0x0009),
.driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
AZX_DCAPS_RIRB_PRE_DELAY },
#endif
/* Vortex86MX */
{ PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
/* VMware HDAudio */
{ PCI_DEVICE(0x15ad, 0x1977), .driver_data = AZX_DRIVER_GENERIC },
/* AMD/ATI Generic, PCI class code and Vendor ID for HD Audio */
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID),
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
.class_mask = 0xffffff,
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_ANY_ID),
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
.class_mask = 0xffffff,
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, azx_ids);
/* pci_driver definition */
static struct pci_driver driver = {
.name = "HDA Intel",
.id_table = azx_ids,
.probe = azx_probe,
.remove = __devexit_p(azx_remove),
#ifdef CONFIG_PM
.suspend = azx_suspend,
.resume = azx_resume,
#endif
};
static int __init alsa_card_azx_init(void)
{
return pci_register_driver(&driver);
}
static void __exit alsa_card_azx_exit(void)
{
pci_unregister_driver(&driver);
}
module_init(alsa_card_azx_init)
module_exit(alsa_card_azx_exit)
| gpl-2.0 |
NETFORCE2/linux | drivers/leds/trigger/ledtrig-cpu.c | 1446 | 3877 | /*
* ledtrig-cpu.c - LED trigger based on CPU activity
*
* This LED trigger will be registered for each possible CPU and named as
* cpu0, cpu1, cpu2, cpu3, etc.
*
* It can be bound to any LED just like other triggers using either a
* board file or via sysfs interface.
*
* An API named ledtrig_cpu is exported for any user, who want to add CPU
* activity indication in their code
*
* Copyright 2011 Linus Walleij <linus.walleij@linaro.org>
* Copyright 2011 - 2012 Bryan Wu <bryan.wu@canonical.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/percpu.h>
#include <linux/syscore_ops.h>
#include <linux/rwsem.h>
#include <linux/cpu.h>
#include "../leds.h"
#define MAX_NAME_LEN 8
struct led_trigger_cpu {
char name[MAX_NAME_LEN];
struct led_trigger *_trig;
};
static DEFINE_PER_CPU(struct led_trigger_cpu, cpu_trig);
/**
* ledtrig_cpu - emit a CPU event as a trigger
* @evt: CPU event to be emitted
*
* Emit a CPU event on a CPU core, which will trigger a
* binded LED to turn on or turn off.
*/
void ledtrig_cpu(enum cpu_led_event ledevt)
{
struct led_trigger_cpu *trig = this_cpu_ptr(&cpu_trig);
/* Locate the correct CPU LED */
switch (ledevt) {
case CPU_LED_IDLE_END:
case CPU_LED_START:
/* Will turn the LED on, max brightness */
led_trigger_event(trig->_trig, LED_FULL);
break;
case CPU_LED_IDLE_START:
case CPU_LED_STOP:
case CPU_LED_HALTED:
/* Will turn the LED off */
led_trigger_event(trig->_trig, LED_OFF);
break;
default:
/* Will leave the LED as it is */
break;
}
}
EXPORT_SYMBOL(ledtrig_cpu);
static int ledtrig_cpu_syscore_suspend(void)
{
ledtrig_cpu(CPU_LED_STOP);
return 0;
}
static void ledtrig_cpu_syscore_resume(void)
{
ledtrig_cpu(CPU_LED_START);
}
static void ledtrig_cpu_syscore_shutdown(void)
{
ledtrig_cpu(CPU_LED_HALTED);
}
static struct syscore_ops ledtrig_cpu_syscore_ops = {
.shutdown = ledtrig_cpu_syscore_shutdown,
.suspend = ledtrig_cpu_syscore_suspend,
.resume = ledtrig_cpu_syscore_resume,
};
static int ledtrig_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_STARTING:
ledtrig_cpu(CPU_LED_START);
break;
case CPU_DYING:
ledtrig_cpu(CPU_LED_STOP);
break;
}
return NOTIFY_OK;
}
static struct notifier_block ledtrig_cpu_nb = {
.notifier_call = ledtrig_cpu_notify,
};
static int __init ledtrig_cpu_init(void)
{
int cpu;
/* Supports up to 9999 cpu cores */
BUILD_BUG_ON(CONFIG_NR_CPUS > 9999);
/*
* Registering CPU led trigger for each CPU core here
* ignores CPU hotplug, but after this CPU hotplug works
* fine with this trigger.
*/
for_each_possible_cpu(cpu) {
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
led_trigger_register_simple(trig->name, &trig->_trig);
}
register_syscore_ops(&ledtrig_cpu_syscore_ops);
register_cpu_notifier(&ledtrig_cpu_nb);
pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n");
return 0;
}
module_init(ledtrig_cpu_init);
static void __exit ledtrig_cpu_exit(void)
{
int cpu;
unregister_cpu_notifier(&ledtrig_cpu_nb);
for_each_possible_cpu(cpu) {
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
led_trigger_unregister_simple(trig->_trig);
trig->_trig = NULL;
memset(trig->name, 0, MAX_NAME_LEN);
}
unregister_syscore_ops(&ledtrig_cpu_syscore_ops);
}
module_exit(ledtrig_cpu_exit);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_AUTHOR("Bryan Wu <bryan.wu@canonical.com>");
MODULE_DESCRIPTION("CPU LED trigger");
MODULE_LICENSE("GPL");
| gpl-2.0 |
84506232/AK-OnePone | net/ipv6/ip6_flowlabel.c | 4518 | 17813 | /*
* ip6_flowlabel.c IPv6 flowlabel manager.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
#include <linux/route.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/protocol.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <net/rawv6.h>
#include <net/icmp.h>
#include <net/transp_v6.h>
#include <asm/uaccess.h>
#define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified
in old IPv6 RFC. Well, it was reasonable value.
*/
#define FL_MAX_LINGER 60 /* Maximal linger timeout */
/* FL hash table */
#define FL_MAX_PER_SOCK 32
#define FL_MAX_SIZE 4096
#define FL_HASH_MASK 255
#define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
static atomic_t fl_size = ATOMIC_INIT(0);
static struct ip6_flowlabel *fl_ht[FL_HASH_MASK+1];
static void ip6_fl_gc(unsigned long dummy);
static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
/* FL hash table lock: it protects only of GC */
static DEFINE_RWLOCK(ip6_fl_lock);
/* Big socket sock */
static DEFINE_RWLOCK(ip6_sk_fl_lock);
static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
{
struct ip6_flowlabel *fl;
for (fl=fl_ht[FL_HASH(label)]; fl; fl = fl->next) {
if (fl->label == label && net_eq(fl->fl_net, net))
return fl;
}
return NULL;
}
static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
{
struct ip6_flowlabel *fl;
read_lock_bh(&ip6_fl_lock);
fl = __fl_lookup(net, label);
if (fl)
atomic_inc(&fl->users);
read_unlock_bh(&ip6_fl_lock);
return fl;
}
static void fl_free(struct ip6_flowlabel *fl)
{
if (fl) {
release_net(fl->fl_net);
kfree(fl->opt);
}
kfree(fl);
}
static void fl_release(struct ip6_flowlabel *fl)
{
write_lock_bh(&ip6_fl_lock);
fl->lastuse = jiffies;
if (atomic_dec_and_test(&fl->users)) {
unsigned long ttd = fl->lastuse + fl->linger;
if (time_after(ttd, fl->expires))
fl->expires = ttd;
ttd = fl->expires;
if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
struct ipv6_txoptions *opt = fl->opt;
fl->opt = NULL;
kfree(opt);
}
if (!timer_pending(&ip6_fl_gc_timer) ||
time_after(ip6_fl_gc_timer.expires, ttd))
mod_timer(&ip6_fl_gc_timer, ttd);
}
write_unlock_bh(&ip6_fl_lock);
}
static void ip6_fl_gc(unsigned long dummy)
{
int i;
unsigned long now = jiffies;
unsigned long sched = 0;
write_lock(&ip6_fl_lock);
for (i=0; i<=FL_HASH_MASK; i++) {
struct ip6_flowlabel *fl, **flp;
flp = &fl_ht[i];
while ((fl=*flp) != NULL) {
if (atomic_read(&fl->users) == 0) {
unsigned long ttd = fl->lastuse + fl->linger;
if (time_after(ttd, fl->expires))
fl->expires = ttd;
ttd = fl->expires;
if (time_after_eq(now, ttd)) {
*flp = fl->next;
fl_free(fl);
atomic_dec(&fl_size);
continue;
}
if (!sched || time_before(ttd, sched))
sched = ttd;
}
flp = &fl->next;
}
}
if (!sched && atomic_read(&fl_size))
sched = now + FL_MAX_LINGER;
if (sched) {
mod_timer(&ip6_fl_gc_timer, sched);
}
write_unlock(&ip6_fl_lock);
}
static void __net_exit ip6_fl_purge(struct net *net)
{
int i;
write_lock(&ip6_fl_lock);
for (i = 0; i <= FL_HASH_MASK; i++) {
struct ip6_flowlabel *fl, **flp;
flp = &fl_ht[i];
while ((fl = *flp) != NULL) {
if (net_eq(fl->fl_net, net) &&
atomic_read(&fl->users) == 0) {
*flp = fl->next;
fl_free(fl);
atomic_dec(&fl_size);
continue;
}
flp = &fl->next;
}
}
write_unlock(&ip6_fl_lock);
}
static struct ip6_flowlabel *fl_intern(struct net *net,
struct ip6_flowlabel *fl, __be32 label)
{
struct ip6_flowlabel *lfl;
fl->label = label & IPV6_FLOWLABEL_MASK;
write_lock_bh(&ip6_fl_lock);
if (label == 0) {
for (;;) {
fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK;
if (fl->label) {
lfl = __fl_lookup(net, fl->label);
if (lfl == NULL)
break;
}
}
} else {
/*
* we dropper the ip6_fl_lock, so this entry could reappear
* and we need to recheck with it.
*
* OTOH no need to search the active socket first, like it is
* done in ipv6_flowlabel_opt - sock is locked, so new entry
* with the same label can only appear on another sock
*/
lfl = __fl_lookup(net, fl->label);
if (lfl != NULL) {
atomic_inc(&lfl->users);
write_unlock_bh(&ip6_fl_lock);
return lfl;
}
}
fl->lastuse = jiffies;
fl->next = fl_ht[FL_HASH(fl->label)];
fl_ht[FL_HASH(fl->label)] = fl;
atomic_inc(&fl_size);
write_unlock_bh(&ip6_fl_lock);
return NULL;
}
/* Socket flowlabel lists */
struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
{
struct ipv6_fl_socklist *sfl;
struct ipv6_pinfo *np = inet6_sk(sk);
label &= IPV6_FLOWLABEL_MASK;
read_lock_bh(&ip6_sk_fl_lock);
for (sfl=np->ipv6_fl_list; sfl; sfl = sfl->next) {
struct ip6_flowlabel *fl = sfl->fl;
if (fl->label == label) {
fl->lastuse = jiffies;
atomic_inc(&fl->users);
read_unlock_bh(&ip6_sk_fl_lock);
return fl;
}
}
read_unlock_bh(&ip6_sk_fl_lock);
return NULL;
}
EXPORT_SYMBOL_GPL(fl6_sock_lookup);
void fl6_free_socklist(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_fl_socklist *sfl;
while ((sfl = np->ipv6_fl_list) != NULL) {
np->ipv6_fl_list = sfl->next;
fl_release(sfl->fl);
kfree(sfl);
}
}
/* Service routines */
/*
It is the only difficult place. flowlabel enforces equal headers
before and including routing header, however user may supply options
following rthdr.
*/
struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
struct ip6_flowlabel * fl,
struct ipv6_txoptions * fopt)
{
struct ipv6_txoptions * fl_opt = fl->opt;
if (fopt == NULL || fopt->opt_flen == 0)
return fl_opt;
if (fl_opt != NULL) {
opt_space->hopopt = fl_opt->hopopt;
opt_space->dst0opt = fl_opt->dst0opt;
opt_space->srcrt = fl_opt->srcrt;
opt_space->opt_nflen = fl_opt->opt_nflen;
} else {
if (fopt->opt_nflen == 0)
return fopt;
opt_space->hopopt = NULL;
opt_space->dst0opt = NULL;
opt_space->srcrt = NULL;
opt_space->opt_nflen = 0;
}
opt_space->dst1opt = fopt->dst1opt;
opt_space->opt_flen = fopt->opt_flen;
return opt_space;
}
static unsigned long check_linger(unsigned long ttl)
{
if (ttl < FL_MIN_LINGER)
return FL_MIN_LINGER*HZ;
if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
return 0;
return ttl*HZ;
}
static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
{
linger = check_linger(linger);
if (!linger)
return -EPERM;
expires = check_linger(expires);
if (!expires)
return -EPERM;
fl->lastuse = jiffies;
if (time_before(fl->linger, linger))
fl->linger = linger;
if (time_before(expires, fl->linger))
expires = fl->linger;
if (time_before(fl->expires, fl->lastuse + expires))
fl->expires = fl->lastuse + expires;
return 0;
}
static struct ip6_flowlabel *
fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
char __user *optval, int optlen, int *err_p)
{
struct ip6_flowlabel *fl = NULL;
int olen;
int addr_type;
int err;
olen = optlen - CMSG_ALIGN(sizeof(*freq));
err = -EINVAL;
if (olen > 64 * 1024)
goto done;
err = -ENOMEM;
fl = kzalloc(sizeof(*fl), GFP_KERNEL);
if (fl == NULL)
goto done;
if (olen > 0) {
struct msghdr msg;
struct flowi6 flowi6;
int junk;
err = -ENOMEM;
fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
if (fl->opt == NULL)
goto done;
memset(fl->opt, 0, sizeof(*fl->opt));
fl->opt->tot_len = sizeof(*fl->opt) + olen;
err = -EFAULT;
if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
goto done;
msg.msg_controllen = olen;
msg.msg_control = (void*)(fl->opt+1);
memset(&flowi6, 0, sizeof(flowi6));
err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
&junk, &junk);
if (err)
goto done;
err = -EINVAL;
if (fl->opt->opt_flen)
goto done;
if (fl->opt->opt_nflen == 0) {
kfree(fl->opt);
fl->opt = NULL;
}
}
fl->fl_net = hold_net(net);
fl->expires = jiffies;
err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
if (err)
goto done;
fl->share = freq->flr_share;
addr_type = ipv6_addr_type(&freq->flr_dst);
if ((addr_type & IPV6_ADDR_MAPPED) ||
addr_type == IPV6_ADDR_ANY) {
err = -EINVAL;
goto done;
}
fl->dst = freq->flr_dst;
atomic_set(&fl->users, 1);
switch (fl->share) {
case IPV6_FL_S_EXCL:
case IPV6_FL_S_ANY:
break;
case IPV6_FL_S_PROCESS:
fl->owner = current->pid;
break;
case IPV6_FL_S_USER:
fl->owner = current_euid();
break;
default:
err = -EINVAL;
goto done;
}
return fl;
done:
fl_free(fl);
*err_p = err;
return NULL;
}
static int mem_check(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_fl_socklist *sfl;
int room = FL_MAX_SIZE - atomic_read(&fl_size);
int count = 0;
if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
return 0;
for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next)
count++;
if (room <= 0 ||
((count >= FL_MAX_PER_SOCK ||
(count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
!capable(CAP_NET_ADMIN)))
return -ENOBUFS;
return 0;
}
static int ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
{
if (h1 == h2)
return 0;
if (h1 == NULL || h2 == NULL)
return 1;
if (h1->hdrlen != h2->hdrlen)
return 1;
return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1));
}
static int ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
{
if (o1 == o2)
return 0;
if (o1 == NULL || o2 == NULL)
return 1;
if (o1->opt_nflen != o2->opt_nflen)
return 1;
if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt))
return 1;
if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt))
return 1;
if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt))
return 1;
return 0;
}
static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
struct ip6_flowlabel *fl)
{
write_lock_bh(&ip6_sk_fl_lock);
sfl->fl = fl;
sfl->next = np->ipv6_fl_list;
np->ipv6_fl_list = sfl;
write_unlock_bh(&ip6_sk_fl_lock);
}
int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
{
int uninitialized_var(err);
struct net *net = sock_net(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_flowlabel_req freq;
struct ipv6_fl_socklist *sfl1=NULL;
struct ipv6_fl_socklist *sfl, **sflp;
struct ip6_flowlabel *fl, *fl1 = NULL;
if (optlen < sizeof(freq))
return -EINVAL;
if (copy_from_user(&freq, optval, sizeof(freq)))
return -EFAULT;
switch (freq.flr_action) {
case IPV6_FL_A_PUT:
write_lock_bh(&ip6_sk_fl_lock);
for (sflp = &np->ipv6_fl_list; (sfl=*sflp)!=NULL; sflp = &sfl->next) {
if (sfl->fl->label == freq.flr_label) {
if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
np->flow_label &= ~IPV6_FLOWLABEL_MASK;
*sflp = sfl->next;
write_unlock_bh(&ip6_sk_fl_lock);
fl_release(sfl->fl);
kfree(sfl);
return 0;
}
}
write_unlock_bh(&ip6_sk_fl_lock);
return -ESRCH;
case IPV6_FL_A_RENEW:
read_lock_bh(&ip6_sk_fl_lock);
for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) {
if (sfl->fl->label == freq.flr_label) {
err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
read_unlock_bh(&ip6_sk_fl_lock);
return err;
}
}
read_unlock_bh(&ip6_sk_fl_lock);
if (freq.flr_share == IPV6_FL_S_NONE && capable(CAP_NET_ADMIN)) {
fl = fl_lookup(net, freq.flr_label);
if (fl) {
err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
fl_release(fl);
return err;
}
}
return -ESRCH;
case IPV6_FL_A_GET:
if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
return -EINVAL;
fl = fl_create(net, sk, &freq, optval, optlen, &err);
if (fl == NULL)
return err;
sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
if (freq.flr_label) {
err = -EEXIST;
read_lock_bh(&ip6_sk_fl_lock);
for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) {
if (sfl->fl->label == freq.flr_label) {
if (freq.flr_flags&IPV6_FL_F_EXCL) {
read_unlock_bh(&ip6_sk_fl_lock);
goto done;
}
fl1 = sfl->fl;
atomic_inc(&fl1->users);
break;
}
}
read_unlock_bh(&ip6_sk_fl_lock);
if (fl1 == NULL)
fl1 = fl_lookup(net, freq.flr_label);
if (fl1) {
recheck:
err = -EEXIST;
if (freq.flr_flags&IPV6_FL_F_EXCL)
goto release;
err = -EPERM;
if (fl1->share == IPV6_FL_S_EXCL ||
fl1->share != fl->share ||
fl1->owner != fl->owner)
goto release;
err = -EINVAL;
if (!ipv6_addr_equal(&fl1->dst, &fl->dst) ||
ipv6_opt_cmp(fl1->opt, fl->opt))
goto release;
err = -ENOMEM;
if (sfl1 == NULL)
goto release;
if (fl->linger > fl1->linger)
fl1->linger = fl->linger;
if ((long)(fl->expires - fl1->expires) > 0)
fl1->expires = fl->expires;
fl_link(np, sfl1, fl1);
fl_free(fl);
return 0;
release:
fl_release(fl1);
goto done;
}
}
err = -ENOENT;
if (!(freq.flr_flags&IPV6_FL_F_CREATE))
goto done;
err = -ENOMEM;
if (sfl1 == NULL || (err = mem_check(sk)) != 0)
goto done;
fl1 = fl_intern(net, fl, freq.flr_label);
if (fl1 != NULL)
goto recheck;
if (!freq.flr_label) {
if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
&fl->label, sizeof(fl->label))) {
/* Intentionally ignore fault. */
}
}
fl_link(np, sfl1, fl);
return 0;
default:
return -EINVAL;
}
done:
fl_free(fl);
kfree(sfl1);
return err;
}
#ifdef CONFIG_PROC_FS
struct ip6fl_iter_state {
struct seq_net_private p;
int bucket;
};
#define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private)
static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
{
struct ip6_flowlabel *fl = NULL;
struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
struct net *net = seq_file_net(seq);
for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
fl = fl_ht[state->bucket];
while (fl && !net_eq(fl->fl_net, net))
fl = fl->next;
if (fl)
break;
}
return fl;
}
static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
{
struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
struct net *net = seq_file_net(seq);
fl = fl->next;
try_again:
while (fl && !net_eq(fl->fl_net, net))
fl = fl->next;
while (!fl) {
if (++state->bucket <= FL_HASH_MASK) {
fl = fl_ht[state->bucket];
goto try_again;
} else
break;
}
return fl;
}
static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
{
struct ip6_flowlabel *fl = ip6fl_get_first(seq);
if (fl)
while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
--pos;
return pos ? NULL : fl;
}
static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(ip6_fl_lock)
{
read_lock_bh(&ip6_fl_lock);
return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip6_flowlabel *fl;
if (v == SEQ_START_TOKEN)
fl = ip6fl_get_first(seq);
else
fl = ip6fl_get_next(seq, v);
++*pos;
return fl;
}
static void ip6fl_seq_stop(struct seq_file *seq, void *v)
__releases(ip6_fl_lock)
{
read_unlock_bh(&ip6_fl_lock);
}
static int ip6fl_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
"Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
else {
struct ip6_flowlabel *fl = v;
seq_printf(seq,
"%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
(unsigned)ntohl(fl->label),
fl->share,
(unsigned)fl->owner,
atomic_read(&fl->users),
fl->linger/HZ,
(long)(fl->expires - jiffies)/HZ,
&fl->dst,
fl->opt ? fl->opt->opt_nflen : 0);
}
return 0;
}
static const struct seq_operations ip6fl_seq_ops = {
.start = ip6fl_seq_start,
.next = ip6fl_seq_next,
.stop = ip6fl_seq_stop,
.show = ip6fl_seq_show,
};
static int ip6fl_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ip6fl_seq_ops,
sizeof(struct ip6fl_iter_state));
}
static const struct file_operations ip6fl_seq_fops = {
.owner = THIS_MODULE,
.open = ip6fl_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int __net_init ip6_flowlabel_proc_init(struct net *net)
{
if (!proc_net_fops_create(net, "ip6_flowlabel",
S_IRUGO, &ip6fl_seq_fops))
return -ENOMEM;
return 0;
}
static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
{
proc_net_remove(net, "ip6_flowlabel");
}
#else
static inline int ip6_flowlabel_proc_init(struct net *net)
{
return 0;
}
static inline void ip6_flowlabel_proc_fini(struct net *net)
{
}
#endif
static void __net_exit ip6_flowlabel_net_exit(struct net *net)
{
ip6_fl_purge(net);
ip6_flowlabel_proc_fini(net);
}
static struct pernet_operations ip6_flowlabel_net_ops = {
.init = ip6_flowlabel_proc_init,
.exit = ip6_flowlabel_net_exit,
};
int ip6_flowlabel_init(void)
{
return register_pernet_subsys(&ip6_flowlabel_net_ops);
}
void ip6_flowlabel_cleanup(void)
{
del_timer(&ip6_fl_gc_timer);
unregister_pernet_subsys(&ip6_flowlabel_net_ops);
}
| gpl-2.0 |
HackerOO7/android_kernel_huawei_u8951 | drivers/media/dvb/b2c2/flexcop.c | 5030 | 9029 | /*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop.c - main module part
* Copyright (C) 2004-9 Patrick Boettcher <patrick.boettcher@desy.de>
* based on skystar2-driver Copyright (C) 2003 Vadim Catana, skystar@moldova.cc
*
* Acknowledgements:
* John Jurrius from BBTI, Inc. for extensive support
* with code examples and data books
* Bjarne Steinsbo, bjarne at steinsbo.com (some ideas for rewriting)
*
* Contributions to the skystar2-driver have been done by
* Vincenzo Di Massa, hawk.it at tiscalinet.it (several DiSEqC fixes)
* Roberto Ragusa, r.ragusa at libero.it (polishing, restyling the code)
* Uwe Bugla, uwe.bugla at gmx.de (doing tests, restyling code, writing docu)
* Niklas Peinecke, peinecke at gdv.uni-hannover.de (hardware pid/mac
* filtering)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "flexcop.h"
#define DRIVER_NAME "B2C2 FlexcopII/II(b)/III digital TV receiver chip"
#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de"
#ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
#define DEBSTATUS ""
#else
#define DEBSTATUS " (debugging is not enabled)"
#endif
int b2c2_flexcop_debug;
module_param_named(debug, b2c2_flexcop_debug, int, 0644);
MODULE_PARM_DESC(debug,
"set debug level (1=info,2=tuner,4=i2c,8=ts,"
"16=sram,32=reg (|-able))."
DEBSTATUS);
#undef DEBSTATUS
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
/* global zero for ibi values */
flexcop_ibi_value ibi_zero;
static int flexcop_dvb_start_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct flexcop_device *fc = dvbdmxfeed->demux->priv;
return flexcop_pid_feed_control(fc, dvbdmxfeed, 1);
}
static int flexcop_dvb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct flexcop_device *fc = dvbdmxfeed->demux->priv;
return flexcop_pid_feed_control(fc, dvbdmxfeed, 0);
}
static int flexcop_dvb_init(struct flexcop_device *fc)
{
int ret = dvb_register_adapter(&fc->dvb_adapter,
"FlexCop Digital TV device", fc->owner,
fc->dev, adapter_nr);
if (ret < 0) {
err("error registering DVB adapter");
return ret;
}
fc->dvb_adapter.priv = fc;
fc->demux.dmx.capabilities = (DMX_TS_FILTERING | DMX_SECTION_FILTERING
| DMX_MEMORY_BASED_FILTERING);
fc->demux.priv = fc;
fc->demux.filternum = fc->demux.feednum = FC_MAX_FEED;
fc->demux.start_feed = flexcop_dvb_start_feed;
fc->demux.stop_feed = flexcop_dvb_stop_feed;
fc->demux.write_to_decoder = NULL;
ret = dvb_dmx_init(&fc->demux);
if (ret < 0) {
err("dvb_dmx failed: error %d", ret);
goto err_dmx;
}
fc->hw_frontend.source = DMX_FRONTEND_0;
fc->dmxdev.filternum = fc->demux.feednum;
fc->dmxdev.demux = &fc->demux.dmx;
fc->dmxdev.capabilities = 0;
ret = dvb_dmxdev_init(&fc->dmxdev, &fc->dvb_adapter);
if (ret < 0) {
err("dvb_dmxdev_init failed: error %d", ret);
goto err_dmx_dev;
}
ret = fc->demux.dmx.add_frontend(&fc->demux.dmx, &fc->hw_frontend);
if (ret < 0) {
err("adding hw_frontend to dmx failed: error %d", ret);
goto err_dmx_add_hw_frontend;
}
fc->mem_frontend.source = DMX_MEMORY_FE;
ret = fc->demux.dmx.add_frontend(&fc->demux.dmx, &fc->mem_frontend);
if (ret < 0) {
err("adding mem_frontend to dmx failed: error %d", ret);
goto err_dmx_add_mem_frontend;
}
ret = fc->demux.dmx.connect_frontend(&fc->demux.dmx, &fc->hw_frontend);
if (ret < 0) {
err("connect frontend failed: error %d", ret);
goto err_connect_frontend;
}
ret = dvb_net_init(&fc->dvb_adapter, &fc->dvbnet, &fc->demux.dmx);
if (ret < 0) {
err("dvb_net_init failed: error %d", ret);
goto err_net;
}
fc->init_state |= FC_STATE_DVB_INIT;
return 0;
err_net:
fc->demux.dmx.disconnect_frontend(&fc->demux.dmx);
err_connect_frontend:
fc->demux.dmx.remove_frontend(&fc->demux.dmx, &fc->mem_frontend);
err_dmx_add_mem_frontend:
fc->demux.dmx.remove_frontend(&fc->demux.dmx, &fc->hw_frontend);
err_dmx_add_hw_frontend:
dvb_dmxdev_release(&fc->dmxdev);
err_dmx_dev:
dvb_dmx_release(&fc->demux);
err_dmx:
dvb_unregister_adapter(&fc->dvb_adapter);
return ret;
}
static void flexcop_dvb_exit(struct flexcop_device *fc)
{
if (fc->init_state & FC_STATE_DVB_INIT) {
dvb_net_release(&fc->dvbnet);
fc->demux.dmx.close(&fc->demux.dmx);
fc->demux.dmx.remove_frontend(&fc->demux.dmx,
&fc->mem_frontend);
fc->demux.dmx.remove_frontend(&fc->demux.dmx,
&fc->hw_frontend);
dvb_dmxdev_release(&fc->dmxdev);
dvb_dmx_release(&fc->demux);
dvb_unregister_adapter(&fc->dvb_adapter);
deb_info("deinitialized dvb stuff\n");
}
fc->init_state &= ~FC_STATE_DVB_INIT;
}
/* these methods are necessary to achieve the long-term-goal of hiding the
* struct flexcop_device from the bus-parts */
void flexcop_pass_dmx_data(struct flexcop_device *fc, u8 *buf, u32 len)
{
dvb_dmx_swfilter(&fc->demux, buf, len);
}
EXPORT_SYMBOL(flexcop_pass_dmx_data);
void flexcop_pass_dmx_packets(struct flexcop_device *fc, u8 *buf, u32 no)
{
dvb_dmx_swfilter_packets(&fc->demux, buf, no);
}
EXPORT_SYMBOL(flexcop_pass_dmx_packets);
static void flexcop_reset(struct flexcop_device *fc)
{
flexcop_ibi_value v210, v204;
/* reset the flexcop itself */
fc->write_ibi_reg(fc,ctrl_208,ibi_zero);
v210.raw = 0;
v210.sw_reset_210.reset_block_000 = 1;
v210.sw_reset_210.reset_block_100 = 1;
v210.sw_reset_210.reset_block_200 = 1;
v210.sw_reset_210.reset_block_300 = 1;
v210.sw_reset_210.reset_block_400 = 1;
v210.sw_reset_210.reset_block_500 = 1;
v210.sw_reset_210.reset_block_600 = 1;
v210.sw_reset_210.reset_block_700 = 1;
v210.sw_reset_210.Block_reset_enable = 0xb2;
v210.sw_reset_210.Special_controls = 0xc259;
fc->write_ibi_reg(fc,sw_reset_210,v210);
msleep(1);
/* reset the periphical devices */
v204 = fc->read_ibi_reg(fc,misc_204);
v204.misc_204.Per_reset_sig = 0;
fc->write_ibi_reg(fc,misc_204,v204);
msleep(1);
v204.misc_204.Per_reset_sig = 1;
fc->write_ibi_reg(fc,misc_204,v204);
}
void flexcop_reset_block_300(struct flexcop_device *fc)
{
flexcop_ibi_value v208_save = fc->read_ibi_reg(fc, ctrl_208),
v210 = fc->read_ibi_reg(fc, sw_reset_210);
deb_rdump("208: %08x, 210: %08x\n", v208_save.raw, v210.raw);
fc->write_ibi_reg(fc,ctrl_208,ibi_zero);
v210.sw_reset_210.reset_block_300 = 1;
v210.sw_reset_210.Block_reset_enable = 0xb2;
fc->write_ibi_reg(fc,sw_reset_210,v210);
fc->write_ibi_reg(fc,ctrl_208,v208_save);
}
struct flexcop_device *flexcop_device_kmalloc(size_t bus_specific_len)
{
void *bus;
struct flexcop_device *fc = kzalloc(sizeof(struct flexcop_device),
GFP_KERNEL);
if (!fc) {
err("no memory");
return NULL;
}
bus = kzalloc(bus_specific_len, GFP_KERNEL);
if (!bus) {
err("no memory");
kfree(fc);
return NULL;
}
fc->bus_specific = bus;
return fc;
}
EXPORT_SYMBOL(flexcop_device_kmalloc);
void flexcop_device_kfree(struct flexcop_device *fc)
{
kfree(fc->bus_specific);
kfree(fc);
}
EXPORT_SYMBOL(flexcop_device_kfree);
int flexcop_device_initialize(struct flexcop_device *fc)
{
int ret;
ibi_zero.raw = 0;
flexcop_reset(fc);
flexcop_determine_revision(fc);
flexcop_sram_init(fc);
flexcop_hw_filter_init(fc);
flexcop_smc_ctrl(fc, 0);
ret = flexcop_dvb_init(fc);
if (ret)
goto error;
/* i2c has to be done before doing EEProm stuff -
* because the EEProm is accessed via i2c */
ret = flexcop_i2c_init(fc);
if (ret)
goto error;
/* do the MAC address reading after initializing the dvb_adapter */
if (fc->get_mac_addr(fc, 0) == 0) {
u8 *b = fc->dvb_adapter.proposed_mac;
info("MAC address = %pM", b);
flexcop_set_mac_filter(fc,b);
flexcop_mac_filter_ctrl(fc,1);
} else
warn("reading of MAC address failed.\n");
ret = flexcop_frontend_init(fc);
if (ret)
goto error;
flexcop_device_name(fc,"initialization of","complete");
return 0;
error:
flexcop_device_exit(fc);
return ret;
}
EXPORT_SYMBOL(flexcop_device_initialize);
void flexcop_device_exit(struct flexcop_device *fc)
{
flexcop_frontend_exit(fc);
flexcop_i2c_exit(fc);
flexcop_dvb_exit(fc);
}
EXPORT_SYMBOL(flexcop_device_exit);
static int flexcop_module_init(void)
{
info(DRIVER_NAME " loaded successfully");
return 0;
}
static void flexcop_module_cleanup(void)
{
info(DRIVER_NAME " unloaded successfully");
}
module_init(flexcop_module_init);
module_exit(flexcop_module_cleanup);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_NAME);
MODULE_LICENSE("GPL");
| gpl-2.0 |
aeipDev/deka-kernel-msm7x30-3.4 | net/ipv6/xfrm6_output.c | 7078 | 3636 | /*
* xfrm6_output.c - Common IPsec encapsulation code for IPv6.
* Copyright (C) 2002 USAGI/WIDE Project
* Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/if_ether.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/icmpv6.h>
#include <linux/netfilter_ipv6.h>
#include <net/dst.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/xfrm.h>
int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
u8 **prevhdr)
{
return ip6_find_1stfragopt(skb, prevhdr);
}
EXPORT_SYMBOL(xfrm6_find_1stfragopt);
static int xfrm6_local_dontfrag(struct sk_buff *skb)
{
int proto;
struct sock *sk = skb->sk;
if (sk) {
proto = sk->sk_protocol;
if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
return inet6_sk(sk)->dontfrag;
}
return 0;
}
static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
{
struct flowi6 fl6;
struct sock *sk = skb->sk;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.daddr = ipv6_hdr(skb)->daddr;
ipv6_local_rxpmtu(sk, &fl6, mtu);
}
static void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
{
struct flowi6 fl6;
struct sock *sk = skb->sk;
fl6.fl6_dport = inet_sk(sk)->inet_dport;
fl6.daddr = ipv6_hdr(skb)->daddr;
ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
}
static int xfrm6_tunnel_check_size(struct sk_buff *skb)
{
int mtu, ret = 0;
struct dst_entry *dst = skb_dst(skb);
mtu = dst_mtu(dst);
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
if (!skb->local_df && skb->len > mtu) {
skb->dev = dst->dev;
if (xfrm6_local_dontfrag(skb))
xfrm6_local_rxpmtu(skb, mtu);
else if (skb->sk)
xfrm6_local_error(skb, mtu);
else
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ret = -EMSGSIZE;
}
return ret;
}
int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
err = xfrm6_tunnel_check_size(skb);
if (err)
return err;
XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr;
return xfrm6_extract_header(skb);
}
int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
err = xfrm_inner_extract_output(x, skb);
if (err)
return err;
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
#ifdef CONFIG_NETFILTER
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
#endif
skb->protocol = htons(ETH_P_IPV6);
skb->local_df = 1;
return x->outer_mode->output2(x, skb);
}
EXPORT_SYMBOL(xfrm6_prepare_output);
int xfrm6_output_finish(struct sk_buff *skb)
{
#ifdef CONFIG_NETFILTER
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
#endif
skb->protocol = htons(ETH_P_IPV6);
return xfrm_output(skb);
}
static int __xfrm6_output(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
int mtu = ip6_skb_dst_mtu(skb);
if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
xfrm6_local_rxpmtu(skb, mtu);
return -EMSGSIZE;
} else if (!skb->local_df && skb->len > mtu && skb->sk) {
xfrm6_local_error(skb, mtu);
return -EMSGSIZE;
}
if (x->props.mode == XFRM_MODE_TUNNEL &&
((skb->len > mtu && !skb_is_gso(skb)) ||
dst_allfrag(skb_dst(skb)))) {
return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
}
return x->outer_mode->afinfo->output_finish(skb);
}
int xfrm6_output(struct sk_buff *skb)
{
return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
skb_dst(skb)->dev, __xfrm6_output);
}
| gpl-2.0 |
jassycliq/lg_g2d801 | drivers/infiniband/hw/qib/qib_ud.c | 7078 | 16612 | /*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_smi.h>
#include "qib.h"
#include "qib_mad.h"
/**
* qib_ud_loopback - handle send on loopback QPs
* @sqp: the sending QP
* @swqe: the send work request
*
* This is called from qib_make_ud_req() to forward a WQE addressed
* to the same HCA.
* Note that the receive interrupt handler may be calling qib_ud_rcv()
* while this is being called.
*/
static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
{
struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
struct qib_pportdata *ppd;
struct qib_qp *qp;
struct ib_ah_attr *ah_attr;
unsigned long flags;
struct qib_sge_state ssge;
struct qib_sge *sge;
struct ib_wc wc;
u32 length;
qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
if (!qp) {
ibp->n_pkt_drops++;
return;
}
if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++;
goto drop;
}
ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
ppd = ppd_from_ibp(ibp);
if (qp->ibqp.qp_num > 1) {
u16 pkey1;
u16 pkey2;
u16 lid;
pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
lid = ppd->lid | (ah_attr->src_path_bits &
((1 << ppd->lmc) - 1));
qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
ah_attr->sl,
sqp->ibqp.qp_num, qp->ibqp.qp_num,
cpu_to_be16(lid),
cpu_to_be16(ah_attr->dlid));
goto drop;
}
}
/*
* Check that the qkey matches (except for QP0, see 9.6.1.4.1).
* Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR (see 10.2.5).
*/
if (qp->ibqp.qp_num) {
u32 qkey;
qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ?
sqp->qkey : swqe->wr.wr.ud.remote_qkey;
if (unlikely(qkey != qp->qkey)) {
u16 lid;
lid = ppd->lid | (ah_attr->src_path_bits &
((1 << ppd->lmc) - 1));
qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
ah_attr->sl,
sqp->ibqp.qp_num, qp->ibqp.qp_num,
cpu_to_be16(lid),
cpu_to_be16(ah_attr->dlid));
goto drop;
}
}
/*
* A GRH is expected to precede the data even if not
* present on the wire.
*/
length = swqe->length;
memset(&wc, 0, sizeof wc);
wc.byte_len = length + sizeof(struct ib_grh);
if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = swqe->wr.ex.imm_data;
}
spin_lock_irqsave(&qp->r_lock, flags);
/*
* Get the next work request entry to find where to put the data.
*/
if (qp->r_flags & QIB_R_REUSE_SGE)
qp->r_flags &= ~QIB_R_REUSE_SGE;
else {
int ret;
ret = qib_get_rwqe(qp, 0);
if (ret < 0) {
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
goto bail_unlock;
}
if (!ret) {
if (qp->ibqp.qp_num == 0)
ibp->n_vl15_dropped++;
goto bail_unlock;
}
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= QIB_R_REUSE_SGE;
ibp->n_pkt_drops++;
goto bail_unlock;
}
if (ah_attr->ah_flags & IB_AH_GRH) {
qib_copy_sge(&qp->r_sge, &ah_attr->grh,
sizeof(struct ib_grh), 1);
wc.wc_flags |= IB_WC_GRH;
} else
qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
ssge.sg_list = swqe->sg_list + 1;
ssge.sge = *swqe->sg_list;
ssge.num_sge = swqe->wr.num_sge;
sge = &ssge.sge;
while (length) {
u32 len = sge->length;
if (len > length)
len = length;
if (len > sge->sge_length)
len = sge->sge_length;
BUG_ON(len == 0);
qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (--ssge.num_sge)
*sge = *ssge.sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
if (++sge->n >= QIB_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
}
sge->vaddr =
sge->mr->map[sge->m]->segs[sge->n].vaddr;
sge->length =
sge->mr->map[sge->m]->segs[sge->n].length;
}
length -= len;
}
while (qp->r_sge.num_sge) {
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
goto bail_unlock;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
wc.qp = &qp->ibqp;
wc.src_qp = sqp->ibqp.qp_num;
wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
swqe->wr.wr.ud.pkey_index : 0;
wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
wc.sl = ah_attr->sl;
wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
swqe->wr.send_flags & IB_SEND_SOLICITED);
ibp->n_loop_pkts++;
bail_unlock:
spin_unlock_irqrestore(&qp->r_lock, flags);
drop:
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
/**
* qib_make_ud_req - construct a UD request packet
* @qp: the QP
*
* Return 1 if constructed; otherwise, return 0.
*/
int qib_make_ud_req(struct qib_qp *qp)
{
struct qib_other_headers *ohdr;
struct ib_ah_attr *ah_attr;
struct qib_pportdata *ppd;
struct qib_ibport *ibp;
struct qib_swqe *wqe;
unsigned long flags;
u32 nwords;
u32 extra_bytes;
u32 bth0;
u16 lrh0;
u16 lid;
int ret = 0;
int next_cur;
spin_lock_irqsave(&qp->s_lock, flags);
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
if (qp->s_last == qp->s_head)
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&qp->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA;
goto bail;
}
wqe = get_swqe_ptr(qp, qp->s_last);
qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
}
if (qp->s_cur == qp->s_head)
goto bail;
wqe = get_swqe_ptr(qp, qp->s_cur);
next_cur = qp->s_cur + 1;
if (next_cur >= qp->s_size)
next_cur = 0;
/* Construct the header. */
ibp = to_iport(qp->ibqp.device, qp->port_num);
ppd = ppd_from_ibp(ibp);
ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
if (ah_attr->dlid != QIB_PERMISSIVE_LID)
ibp->n_multicast_xmit++;
else
ibp->n_unicast_xmit++;
} else {
ibp->n_unicast_xmit++;
lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
if (unlikely(lid == ppd->lid)) {
/*
* If DMAs are in progress, we can't generate
* a completion for the loopback packet since
* it would be out of order.
* XXX Instead of waiting, we could queue a
* zero length descriptor so we get a callback.
*/
if (atomic_read(&qp->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA;
goto bail;
}
qp->s_cur = next_cur;
spin_unlock_irqrestore(&qp->s_lock, flags);
qib_ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, flags);
qib_send_complete(qp, wqe, IB_WC_SUCCESS);
goto done;
}
}
qp->s_cur = next_cur;
extra_bytes = -wqe->length & 3;
nwords = (wqe->length + extra_bytes) >> 2;
/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
qp->s_hdrwords = 7;
qp->s_cur_size = wqe->length;
qp->s_cur_sge = &qp->s_sge;
qp->s_srate = ah_attr->static_rate;
qp->s_wqe = wqe;
qp->s_sge.sge = wqe->sg_list[0];
qp->s_sge.sg_list = wqe->sg_list + 1;
qp->s_sge.num_sge = wqe->wr.num_sge;
qp->s_sge.total_len = wqe->length;
if (ah_attr->ah_flags & IB_AH_GRH) {
/* Header size in 32-bit words. */
qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
&ah_attr->grh,
qp->s_hdrwords, nwords);
lrh0 = QIB_LRH_GRH;
ohdr = &qp->s_hdr.u.l.oth;
/*
* Don't worry about sending to locally attached multicast
* QPs. It is unspecified by the spec. what happens.
*/
} else {
/* Header size in 32-bit words. */
lrh0 = QIB_LRH_BTH;
ohdr = &qp->s_hdr.u.oth;
}
if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
qp->s_hdrwords++;
ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
} else
bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
lrh0 |= ah_attr->sl << 4;
if (qp->ibqp.qp_type == IB_QPT_SMI)
lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
else
lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
lid = ppd->lid;
if (lid) {
lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
qp->s_hdr.lrh[3] = cpu_to_be16(lid);
} else
qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
bth0 |= extra_bytes << 20;
bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
wqe->wr.wr.ud.pkey_index : qp->s_pkey_index);
ohdr->bth[0] = cpu_to_be32(bth0);
/*
* Use the multicast QP if the destination LID is a multicast LID.
*/
ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
ah_attr->dlid != QIB_PERMISSIVE_LID ?
cpu_to_be32(QIB_MULTICAST_QPN) :
cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
/*
* Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR (see 10.2.5).
*/
ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
qp->qkey : wqe->wr.wr.ud.remote_qkey);
ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
done:
ret = 1;
goto unlock;
bail:
qp->s_flags &= ~QIB_S_BUSY;
unlock:
spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
{
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_devdata *dd = ppd->dd;
unsigned ctxt = ppd->hw_pidx;
unsigned i;
pkey &= 0x7fff; /* remove limited/full membership bit */
for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
return i;
/*
* Should not get here, this means hardware failed to validate pkeys.
* Punt and return index 0.
*/
return 0;
}
/**
* qib_ud_rcv - receive an incoming UD packet
* @ibp: the port the packet came in on
* @hdr: the packet header
* @has_grh: true if the packet has a GRH
* @data: the packet data
* @tlen: the packet length
* @qp: the QP the packet came on
*
* This is called from qib_qp_rcv() to process an incoming UD packet
* for the given QP.
* Called at interrupt level.
*/
void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
int has_grh, void *data, u32 tlen, struct qib_qp *qp)
{
struct qib_other_headers *ohdr;
int opcode;
u32 hdrsize;
u32 pad;
struct ib_wc wc;
u32 qkey;
u32 src_qp;
u16 dlid;
/* Check for GRH */
if (!has_grh) {
ohdr = &hdr->u.oth;
hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
} else {
ohdr = &hdr->u.l.oth;
hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
}
qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
/*
* Get the number of bytes the message was padded by
* and drop incomplete packets.
*/
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
if (unlikely(tlen < (hdrsize + pad + 4)))
goto drop;
tlen -= hdrsize + pad + 4;
/*
* Check that the permissive LID is only used on QP0
* and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
*/
if (qp->ibqp.qp_num) {
if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
hdr->lrh[3] == IB_LID_PERMISSIVE))
goto drop;
if (qp->ibqp.qp_num > 1) {
u16 pkey1, pkey2;
pkey1 = be32_to_cpu(ohdr->bth[0]);
pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
pkey1,
(be16_to_cpu(hdr->lrh[0]) >> 4) &
0xF,
src_qp, qp->ibqp.qp_num,
hdr->lrh[3], hdr->lrh[1]);
return;
}
}
if (unlikely(qkey != qp->qkey)) {
qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
src_qp, qp->ibqp.qp_num,
hdr->lrh[3], hdr->lrh[1]);
return;
}
/* Drop invalid MAD packets (see 13.5.3.1). */
if (unlikely(qp->ibqp.qp_num == 1 &&
(tlen != 256 ||
(be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
goto drop;
} else {
struct ib_smp *smp;
/* Drop invalid MAD packets (see 13.5.3.1). */
if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
goto drop;
smp = (struct ib_smp *) data;
if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
hdr->lrh[3] == IB_LID_PERMISSIVE) &&
smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
goto drop;
}
/*
* The opcode is in the low byte when its in network order
* (top byte when in host order).
*/
opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
if (qp->ibqp.qp_num > 1 &&
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
wc.ex.imm_data = ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
tlen -= sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0;
wc.wc_flags = 0;
} else
goto drop;
/*
* A GRH is expected to precede the data even if not
* present on the wire.
*/
wc.byte_len = tlen + sizeof(struct ib_grh);
/*
* Get the next work request entry to find where to put the data.
*/
if (qp->r_flags & QIB_R_REUSE_SGE)
qp->r_flags &= ~QIB_R_REUSE_SGE;
else {
int ret;
ret = qib_get_rwqe(qp, 0);
if (ret < 0) {
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return;
}
if (!ret) {
if (qp->ibqp.qp_num == 0)
ibp->n_vl15_dropped++;
return;
}
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= QIB_R_REUSE_SGE;
goto drop;
}
if (has_grh) {
qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
sizeof(struct ib_grh), 1);
wc.wc_flags |= IB_WC_GRH;
} else
qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
while (qp->r_sge.num_sge) {
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
return;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
wc.vendor_err = 0;
wc.qp = &qp->ibqp;
wc.src_qp = src_qp;
wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
wc.slid = be16_to_cpu(hdr->lrh[3]);
wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
dlid = be16_to_cpu(hdr->lrh[1]);
/*
* Save the LMC lower bits if the destination LID is a unicast LID.
*/
wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 :
dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
return;
drop:
ibp->n_pkt_drops++;
}
| gpl-2.0 |
NooNameR/Dirty | kernel/sched.c | 167 | 229263 | /*
* kernel/sched.c
*
* Kernel scheduler and related syscalls
*
* Copyright (C) 1991-2002 Linus Torvalds
*
* 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
* make semaphores SMP safe
* 1998-11-19 Implemented schedule_timeout() and related stuff
* by Andrea Arcangeli
* 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
* hybrid priority-list and round-robin design with
* an array-switch method of distributing timeslices
* and per-CPU runqueues. Cleanups and useful suggestions
* by Davide Libenzi, preemptible kernel bits by Robert Love.
* 2003-09-03 Interactivity tuning by Con Kolivas.
* 2004-04-02 Scheduler domains code by Nick Piggin
* 2007-04-15 Work begun on replacing all interactivity tuning with a
* fair scheduling design by Con Kolivas.
* 2007-05-05 Load balancing (smp-nice) and other improvements
* by Peter Williams
* 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
* 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
* 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
* Thomas Gleixner, Mike Kravetz
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <asm/mmu_context.h>
#include <linux/interrupt.h>
#include <linux/capability.h>
#include <linux/completion.h>
#include <linux/kernel_stat.h>
#include <linux/debug_locks.h>
#include <linux/perf_event.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/profile.h>
#include <linux/freezer.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/pid_namespace.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/timer.h>
#include <linux/rcupdate.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/percpu.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stop_machine.h>
#include <linux/sysctl.h>
#include <linux/syscalls.h>
#include <linux/times.h>
#include <linux/tsacct_kern.h>
#include <linux/kprobes.h>
#include <linux/delayacct.h>
#include <linux/unistd.h>
#include <linux/pagemap.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/debugfs.h>
#include <linux/ctype.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
#include <linux/cpuacct.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
#include <asm/mutex.h>
#include "sched_cpupri.h"
#include "workqueue_sched.h"
#include "sched_autogroup.h"
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
* and back.
*/
#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
/*
* 'User priority' is the nice value converted to something we
* can work with better when scaling various scheduler parameters,
* it's a [ 0 ... 39 ] range.
*/
#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
/*
* Helpers for converting nanosecond timing to jiffy resolution
*/
#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
#define NICE_0_LOAD SCHED_LOAD_SCALE
#define NICE_0_SHIFT SCHED_LOAD_SHIFT
/*
* These are the 'tuning knobs' of the scheduler:
*
* default timeslice is 100 msecs (used only for SCHED_RR tasks).
* Timeslices get refilled after they expire.
*/
#define DEF_TIMESLICE (100 * HZ / 1000)
/*
* single value that denotes runtime == period, ie unlimited time.
*/
#define RUNTIME_INF ((u64)~0ULL)
static inline int rt_policy(int policy)
{
if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
return 1;
return 0;
}
static inline int task_has_rt_policy(struct task_struct *p)
{
return rt_policy(p->policy);
}
/*
* This is the priority-queue data structure of the RT scheduling class:
*/
struct rt_prio_array {
DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
struct list_head queue[MAX_RT_PRIO];
};
struct rt_bandwidth {
/* nests inside the rq lock: */
raw_spinlock_t rt_runtime_lock;
ktime_t rt_period;
u64 rt_runtime;
struct hrtimer rt_period_timer;
};
static struct rt_bandwidth def_rt_bandwidth;
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
struct rt_bandwidth *rt_b =
container_of(timer, struct rt_bandwidth, rt_period_timer);
ktime_t now;
int overrun;
int idle = 0;
for (;;) {
now = hrtimer_cb_get_time(timer);
overrun = hrtimer_forward(timer, now, rt_b->rt_period);
if (!overrun)
break;
idle = do_sched_rt_period_timer(rt_b, overrun);
}
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}
static
void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
{
rt_b->rt_period = ns_to_ktime(period);
rt_b->rt_runtime = runtime;
raw_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
static inline int rt_bandwidth_enabled(void)
{
return sysctl_sched_rt_runtime >= 0;
}
static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
ktime_t now;
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
return;
if (hrtimer_active(&rt_b->rt_period_timer))
return;
raw_spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
unsigned long delta;
ktime_t soft, hard;
if (hrtimer_active(&rt_b->rt_period_timer))
break;
now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
hard = hrtimer_get_expires(&rt_b->rt_period_timer);
delta = ktime_to_ns(ktime_sub(hard, soft));
__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
HRTIMER_MODE_ABS_PINNED, 0);
}
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
#ifdef CONFIG_RT_GROUP_SCHED
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
{
hrtimer_cancel(&rt_b->rt_period_timer);
}
#endif
/*
* sched_domains_mutex serializes calls to init_sched_domains,
* detach_destroy_domains and partition_sched_domains.
*/
static DEFINE_MUTEX(sched_domains_mutex);
#ifdef CONFIG_CGROUP_SCHED
#include <linux/cgroup.h>
struct cfs_rq;
static LIST_HEAD(task_groups);
/* task group related information */
struct task_group {
struct cgroup_subsys_state css;
#ifdef CONFIG_FAIR_GROUP_SCHED
/* schedulable entities of this group on each cpu */
struct sched_entity **se;
/* runqueue "owned" by this group on each cpu */
struct cfs_rq **cfs_rq;
unsigned long shares;
atomic_t load_weight;
#endif
#ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity **rt_se;
struct rt_rq **rt_rq;
struct rt_bandwidth rt_bandwidth;
#endif
struct rcu_head rcu;
struct list_head list;
struct task_group *parent;
struct list_head siblings;
struct list_head children;
#ifdef CONFIG_SCHED_AUTOGROUP
struct autogroup *autogroup;
#endif
};
/* task_group_lock serializes the addition/removal of task groups */
static DEFINE_SPINLOCK(task_group_lock);
#ifdef CONFIG_FAIR_GROUP_SCHED
# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
/*
* A weight of 0 or 1 can cause arithmetics problems.
* A weight of a cfs_rq is the sum of weights of which entities
* are queued on this cfs_rq, so a weight of a entity should not be
* too large, so as the shares value of a task group.
* (The default weight is 1024 - so there's no practical
* limitation from this.)
*/
#define MIN_SHARES (1UL << 1)
#define MAX_SHARES (1UL << 18)
static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
#endif
/* Default task group.
* Every task in system belong to this group at bootup.
*/
struct task_group root_task_group;
#endif /* CONFIG_CGROUP_SCHED */
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct load_weight load;
unsigned long nr_running;
u64 exec_clock;
u64 min_vruntime;
#ifndef CONFIG_64BIT
u64 min_vruntime_copy;
#endif
struct rb_root tasks_timeline;
struct rb_node *rb_leftmost;
struct list_head tasks;
struct list_head *balance_iterator;
/*
* 'curr' points to currently running entity on this cfs_rq.
* It is set to NULL otherwise (i.e when none are currently running).
*/
struct sched_entity *curr, *next, *last, *skip;
#ifdef CONFIG_SCHED_DEBUG
unsigned int nr_spread_over;
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
/*
* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities
* (like users, containers etc.)
*
* leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
* list is used during load balance.
*/
int on_list;
struct list_head leaf_cfs_rq_list;
struct task_group *tg; /* group that "owns" this runqueue */
#ifdef CONFIG_SMP
/*
* the part of load.weight contributed by tasks
*/
unsigned long task_weight;
/*
* h_load = weight * f(tg)
*
* Where f(tg) is the recursive weight fraction assigned to
* this group.
*/
unsigned long h_load;
/*
* Maintaining per-cpu shares distribution for group scheduling
*
* load_stamp is the last time we updated the load average
* load_last is the last time we updated the load average and saw load
* load_unacc_exec_time is currently unaccounted execution time
*/
u64 load_avg;
u64 load_period;
u64 load_stamp, load_last, load_unacc_exec_time;
unsigned long load_contribution;
#endif
#endif
};
/* Real-Time classes' related field in a runqueue: */
struct rt_rq {
struct rt_prio_array active;
unsigned long rt_nr_running;
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
struct {
int curr; /* highest queued rt task prio */
#ifdef CONFIG_SMP
int next; /* next highest */
#endif
} highest_prio;
#endif
#ifdef CONFIG_SMP
unsigned long rt_nr_migratory;
unsigned long rt_nr_total;
int overloaded;
struct plist_head pushable_tasks;
#endif
int rt_throttled;
u64 rt_time;
u64 rt_runtime;
/* Nests inside the rq lock: */
raw_spinlock_t rt_runtime_lock;
#ifdef CONFIG_RT_GROUP_SCHED
unsigned long rt_nr_boosted;
struct rq *rq;
struct list_head leaf_rt_rq_list;
struct task_group *tg;
#endif
};
#ifdef CONFIG_SMP
/*
* We add the notion of a root-domain which will be used to define per-domain
* variables. Each exclusive cpuset essentially defines an island domain by
* fully partitioning the member cpus from any other cpuset. Whenever a new
* exclusive cpuset is created, we also create and attach a new root-domain
* object.
*
*/
struct root_domain {
atomic_t refcount;
struct rcu_head rcu;
cpumask_var_t span;
cpumask_var_t online;
/*
* The "RT overload" flag: it gets set if a CPU has more than
* one runnable RT task.
*/
cpumask_var_t rto_mask;
atomic_t rto_count;
struct cpupri cpupri;
};
/*
* By default the system creates a single root-domain with all cpus as
* members (mimicking the global state we have today).
*/
static struct root_domain def_root_domain;
#endif /* CONFIG_SMP */
/*
* This is the main, per-CPU runqueue data structure.
*
* Locking rule: those places that want to lock multiple runqueues
* (such as the load balancing or the thread migration code), lock
* acquire operations must be ordered by ascending &runqueue.
*/
struct rq {
/* runqueue lock: */
raw_spinlock_t lock;
/*
* nr_running and cpu_load should be in the same cacheline because
* remote CPUs use both these fields when doing load calculation.
*/
unsigned long nr_running;
#define CPU_LOAD_IDX_MAX 5
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
unsigned long last_load_update_tick;
#ifdef CONFIG_NO_HZ
u64 nohz_stamp;
unsigned char nohz_balance_kick;
#endif
int skip_clock_update;
/* capture load from *all* tasks on this cpu: */
struct load_weight load;
unsigned long nr_load_updates;
u64 nr_switches;
struct cfs_rq cfs;
struct rt_rq rt;
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
#endif
#ifdef CONFIG_RT_GROUP_SCHED
struct list_head leaf_rt_rq_list;
#endif
/*
* This is part of a global counter where only the total sum
* over all CPUs matters. A task can increase this counter on
* one CPU and if it got migrated afterwards it may decrease
* it on another CPU. Always updated under the runqueue lock:
*/
unsigned long nr_uninterruptible;
struct task_struct *curr, *idle, *stop;
unsigned long next_balance;
struct mm_struct *prev_mm;
u64 clock;
u64 clock_task;
atomic_t nr_iowait;
#ifdef CONFIG_SMP
struct root_domain *rd;
struct sched_domain *sd;
unsigned long cpu_power;
unsigned char idle_at_tick;
/* For active balancing */
int post_schedule;
int active_balance;
int push_cpu;
struct cpu_stop_work active_balance_work;
/* cpu of this runqueue: */
int cpu;
int online;
unsigned long avg_load_per_task;
u64 rt_avg;
u64 age_stamp;
u64 idle_stamp;
u64 avg_idle;
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
u64 prev_irq_time;
#endif
/* calc_load related fields */
unsigned long calc_load_update;
long calc_load_active;
#ifdef CONFIG_SCHED_HRTICK
#ifdef CONFIG_SMP
int hrtick_csd_pending;
struct call_single_data hrtick_csd;
#endif
struct hrtimer hrtick_timer;
#endif
#ifdef CONFIG_SCHEDSTATS
/* latency stats */
struct sched_info rq_sched_info;
unsigned long long rq_cpu_time;
/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
/* sys_sched_yield() stats */
unsigned int yld_count;
/* schedule() stats */
unsigned int sched_switch;
unsigned int sched_count;
unsigned int sched_goidle;
/* try_to_wake_up() stats */
unsigned int ttwu_count;
unsigned int ttwu_local;
#endif
#ifdef CONFIG_SMP
struct task_struct *wake_list;
#endif
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
static inline int cpu_of(struct rq *rq)
{
#ifdef CONFIG_SMP
return rq->cpu;
#else
return 0;
#endif
}
#define rcu_dereference_check_sched_domain(p) \
rcu_dereference_check((p), \
rcu_read_lock_held() || \
lockdep_is_held(&sched_domains_mutex))
/*
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
* See detach_destroy_domains: synchronize_sched for details.
*
* The domain tree of any CPU may only be accessed from within
* preempt-disabled sections.
*/
#define for_each_domain(cpu, __sd) \
for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
#define this_rq() (&__get_cpu_var(runqueues))
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() (&__raw_get_cpu_var(runqueues))
#ifdef CONFIG_CGROUP_SCHED
/*
* Return the group to which this tasks belongs.
*
* We use task_subsys_state_check() and extend the RCU verification with
* pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
* task it moves into the cgroup. Therefore by holding either of those locks,
* we pin the task to the current cgroup.
*/
static inline struct task_group *task_group(struct task_struct *p)
{
struct task_group *tg;
struct cgroup_subsys_state *css;
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
lockdep_is_held(&p->pi_lock) ||
lockdep_is_held(&task_rq(p)->lock));
tg = container_of(css, struct task_group, css);
return autogroup_task_group(p, tg);
}
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
p->se.parent = task_group(p)->se[cpu];
#endif
#ifdef CONFIG_RT_GROUP_SCHED
p->rt.rt_rq = task_group(p)->rt_rq[cpu];
p->rt.parent = task_group(p)->rt_se[cpu];
#endif
}
#else /* CONFIG_CGROUP_SCHED */
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
static inline struct task_group *task_group(struct task_struct *p)
{
return NULL;
}
#endif /* CONFIG_CGROUP_SCHED */
static void update_rq_clock_task(struct rq *rq, s64 delta);
static void update_rq_clock(struct rq *rq)
{
s64 delta;
if (rq->skip_clock_update > 0)
return;
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
rq->clock += delta;
update_rq_clock_task(rq, delta);
}
/*
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
*/
#ifdef CONFIG_SCHED_DEBUG
# define const_debug __read_mostly
#else
# define const_debug static const
#endif
/**
* runqueue_is_locked - Returns true if the current cpu runqueue is locked
* @cpu: the processor in question.
*
* This interface allows printk to be called with the runqueue lock
* held and know whether or not it is OK to wake up the klogd.
*/
int runqueue_is_locked(int cpu)
{
return raw_spin_is_locked(&cpu_rq(cpu)->lock);
}
/*
* Debugging: various feature bits
*/
#define SCHED_FEAT(name, enabled) \
__SCHED_FEAT_##name ,
enum {
#include "sched_features.h"
};
#undef SCHED_FEAT
#define SCHED_FEAT(name, enabled) \
(1UL << __SCHED_FEAT_##name) * enabled |
const_debug unsigned int sysctl_sched_features =
#include "sched_features.h"
0;
#undef SCHED_FEAT
#ifdef CONFIG_SCHED_DEBUG
#define SCHED_FEAT(name, enabled) \
#name ,
static __read_mostly char *sched_feat_names[] = {
#include "sched_features.h"
NULL
};
#undef SCHED_FEAT
static int sched_feat_show(struct seq_file *m, void *v)
{
int i;
for (i = 0; sched_feat_names[i]; i++) {
if (!(sysctl_sched_features & (1UL << i)))
seq_puts(m, "NO_");
seq_printf(m, "%s ", sched_feat_names[i]);
}
seq_puts(m, "\n");
return 0;
}
static ssize_t
sched_feat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
char *cmp;
int neg = 0;
int i;
if (cnt > 63)
cnt = 63;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
cmp = strstrip(buf);
if (strncmp(cmp, "NO_", 3) == 0) {
neg = 1;
cmp += 3;
}
for (i = 0; sched_feat_names[i]; i++) {
if (strcmp(cmp, sched_feat_names[i]) == 0) {
if (neg)
sysctl_sched_features &= ~(1UL << i);
else
sysctl_sched_features |= (1UL << i);
break;
}
}
if (!sched_feat_names[i])
return -EINVAL;
*ppos += cnt;
return cnt;
}
static int sched_feat_open(struct inode *inode, struct file *filp)
{
return single_open(filp, sched_feat_show, NULL);
}
static const struct file_operations sched_feat_fops = {
.open = sched_feat_open,
.write = sched_feat_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static __init int sched_init_debug(void)
{
debugfs_create_file("sched_features", 0644, NULL, NULL,
&sched_feat_fops);
return 0;
}
late_initcall(sched_init_debug);
#endif
#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
/*
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
const_debug unsigned int sysctl_sched_nr_migrate = 32;
/*
* period over which we average the RT time consumption, measured
* in ms.
*
* default: 1s
*/
const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
/*
* period over which we measure -rt task cpu usage in us.
* default: 1s
*/
unsigned int sysctl_sched_rt_period = 1000000;
static __read_mostly int scheduler_running;
/*
* part of the period that we allow rt tasks to run in us.
* default: 0.95s
*/
int sysctl_sched_rt_runtime = 950000;
static inline u64 global_rt_period(void)
{
return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
}
static inline u64 global_rt_runtime(void)
{
if (sysctl_sched_rt_runtime < 0)
return RUNTIME_INF;
return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
}
#ifndef prepare_arch_switch
# define prepare_arch_switch(next) do { } while (0)
#endif
#ifndef finish_arch_switch
# define finish_arch_switch(prev) do { } while (0)
#endif
static inline int task_current(struct rq *rq, struct task_struct *p)
{
return rq->curr == p;
}
static inline int task_running(struct rq *rq, struct task_struct *p)
{
#ifdef CONFIG_SMP
return p->on_cpu;
#else
return task_current(rq, p);
#endif
}
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{
#ifdef CONFIG_SMP
/*
* We can optimise this out completely for !SMP, because the
* SMP rebalancing from interrupt is the only thing that cares
* here.
*/
next->on_cpu = 1;
#endif
}
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{
#ifdef CONFIG_SMP
/*
* After ->on_cpu is cleared, the task can be moved to a different CPU.
* We must ensure this doesn't happen until the switch is completely
* finished.
*/
smp_wmb();
prev->on_cpu = 0;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
rq->lock.owner = current;
#endif
/*
* If we are tracking spinlock dependencies then we have to
* fix up the runqueue lock - which gets 'carried over' from
* prev into current:
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
raw_spin_unlock_irq(&rq->lock);
}
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{
#ifdef CONFIG_SMP
/*
* We can optimise this out completely for !SMP, because the
* SMP rebalancing from interrupt is the only thing that cares
* here.
*/
next->on_cpu = 1;
#endif
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
raw_spin_unlock_irq(&rq->lock);
#else
raw_spin_unlock(&rq->lock);
#endif
}
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{
#ifdef CONFIG_SMP
/*
* After ->on_cpu is cleared, the task can be moved to a different CPU.
* We must ensure this doesn't happen until the switch is completely
* finished.
*/
smp_wmb();
prev->on_cpu = 0;
#endif
#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
local_irq_enable();
#endif
}
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
/*
* __task_rq_lock - lock the rq @p resides on.
*/
static inline struct rq *__task_rq_lock(struct task_struct *p)
__acquires(rq->lock)
{
struct rq *rq;
lockdep_assert_held(&p->pi_lock);
for (;;) {
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
raw_spin_unlock(&rq->lock);
}
}
/*
* task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
*/
static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
__acquires(p->pi_lock)
__acquires(rq->lock)
{
struct rq *rq;
for (;;) {
raw_spin_lock_irqsave(&p->pi_lock, *flags);
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
}
}
static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
raw_spin_unlock(&rq->lock);
}
static inline void
task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
__releases(rq->lock)
__releases(p->pi_lock)
{
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
}
/*
* this_rq_lock - lock this runqueue and disable interrupts.
*/
static struct rq *this_rq_lock(void)
__acquires(rq->lock)
{
struct rq *rq;
local_irq_disable();
rq = this_rq();
raw_spin_lock(&rq->lock);
return rq;
}
#ifdef CONFIG_SCHED_HRTICK
/*
* Use HR-timers to deliver accurate preemption points.
*
* Its all a bit involved since we cannot program an hrt while holding the
* rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
* reschedule event.
*
* When we get rescheduled we reprogram the hrtick_timer outside of the
* rq->lock.
*/
/*
* Use hrtick when:
* - enabled by features
* - hrtimer is actually high res
*/
static inline int hrtick_enabled(struct rq *rq)
{
if (!sched_feat(HRTICK))
return 0;
if (!cpu_active(cpu_of(rq)))
return 0;
return hrtimer_is_hres_active(&rq->hrtick_timer);
}
static void hrtick_clear(struct rq *rq)
{
if (hrtimer_active(&rq->hrtick_timer))
hrtimer_cancel(&rq->hrtick_timer);
}
/*
* High-resolution timer tick.
* Runs from hardirq context with interrupts disabled.
*/
static enum hrtimer_restart hrtick(struct hrtimer *timer)
{
struct rq *rq = container_of(timer, struct rq, hrtick_timer);
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
raw_spin_unlock(&rq->lock);
return HRTIMER_NORESTART;
}
#ifdef CONFIG_SMP
/*
* called from hardirq (IPI) context
*/
static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
raw_spin_lock(&rq->lock);
hrtimer_restart(&rq->hrtick_timer);
rq->hrtick_csd_pending = 0;
raw_spin_unlock(&rq->lock);
}
/*
* Called to set the hrtick timer state.
*
* called with rq->lock held and irqs disabled
*/
static void hrtick_start(struct rq *rq, u64 delay)
{
struct hrtimer *timer = &rq->hrtick_timer;
ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
hrtimer_set_expires(timer, time);
if (rq == this_rq()) {
hrtimer_restart(timer);
} else if (!rq->hrtick_csd_pending) {
__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
rq->hrtick_csd_pending = 1;
}
}
static int
hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int cpu = (int)(long)hcpu;
switch (action) {
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
hrtick_clear(cpu_rq(cpu));
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
static __init void init_hrtick(void)
{
hotcpu_notifier(hotplug_hrtick, 0);
}
#else
/*
* Called to set the hrtick timer state.
*
* called with rq->lock held and irqs disabled
*/
static void hrtick_start(struct rq *rq, u64 delay)
{
__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
HRTIMER_MODE_REL_PINNED, 0);
}
static inline void init_hrtick(void)
{
}
#endif /* CONFIG_SMP */
static void init_rq_hrtick(struct rq *rq)
{
#ifdef CONFIG_SMP
rq->hrtick_csd_pending = 0;
rq->hrtick_csd.flags = 0;
rq->hrtick_csd.func = __hrtick_start;
rq->hrtick_csd.info = rq;
#endif
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
{
}
static inline void init_rq_hrtick(struct rq *rq)
{
}
static inline void init_hrtick(void)
{
}
#endif /* CONFIG_SCHED_HRTICK */
/*
* resched_task - mark a task 'to be rescheduled now'.
*
* On UP this means the setting of the need_resched flag, on SMP it
* might also involve a cross-CPU call to trigger the scheduler on
* the target CPU.
*/
#ifdef CONFIG_SMP
#ifndef tsk_is_polling
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
#endif
static void resched_task(struct task_struct *p)
{
int cpu;
assert_raw_spin_locked(&task_rq(p)->lock);
if (test_tsk_need_resched(p))
return;
set_tsk_need_resched(p);
cpu = task_cpu(p);
if (cpu == smp_processor_id())
return;
/* NEED_RESCHED must be visible before we test polling */
smp_mb();
if (!tsk_is_polling(p))
smp_send_reschedule(cpu);
}
static void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
if (!raw_spin_trylock_irqsave(&rq->lock, flags))
return;
resched_task(cpu_curr(cpu));
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
#ifdef CONFIG_NO_HZ
/*
* In the semi idle case, use the nearest busy cpu for migrating timers
* from an idle cpu. This is good for power-savings.
*
* We don't do similar optimization for completely idle system, as
* selecting an idle cpu will add more delays to the timers than intended
* (as that cpu's timer base may not be uptodate wrt jiffies etc).
*/
int get_nohz_timer_target(void)
{
int cpu = smp_processor_id();
int i;
struct sched_domain *sd;
rcu_read_lock();
for_each_domain(cpu, sd) {
for_each_cpu(i, sched_domain_span(sd)) {
if (!idle_cpu(i)) {
cpu = i;
goto unlock;
}
}
}
unlock:
rcu_read_unlock();
return cpu;
}
/*
* When add_timer_on() enqueues a timer into the timer wheel of an
* idle CPU then this timer might expire before the next timer event
* which is scheduled to wake up that CPU. In case of a completely
* idle system the next event might even be infinite time into the
* future. wake_up_idle_cpu() ensures that the CPU is woken up and
* leaves the inner idle loop so the newly added timer is taken into
* account when the CPU goes back to idle and evaluates the timer
* wheel for the next timer event.
*/
void wake_up_idle_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (cpu == smp_processor_id())
return;
/*
* This is safe, as this function is called with the timer
* wheel base lock of (cpu) held. When the CPU is on the way
* to idle and has not yet set rq->curr to idle then it will
* be serialized on the timer wheel base lock and take the new
* timer into account automatically.
*/
if (rq->curr != rq->idle)
return;
/*
* We can set TIF_RESCHED on the idle task of the other CPU
* lockless. The worst case is that the other CPU runs the
* idle task through an additional NOOP schedule()
*/
set_tsk_need_resched(rq->idle);
/* NEED_RESCHED must be visible before we test polling */
smp_mb();
if (!tsk_is_polling(rq->idle))
smp_send_reschedule(cpu);
}
#endif /* CONFIG_NO_HZ */
static u64 sched_avg_period(void)
{
return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
}
static void sched_avg_update(struct rq *rq)
{
s64 period = sched_avg_period();
while ((s64)(rq->clock - rq->age_stamp) > period) {
/*
* Inline assembly required to prevent the compiler
* optimising this loop into a divmod call.
* See __iter_div_u64_rem() for another example of this.
*/
asm("" : "+rm" (rq->age_stamp));
rq->age_stamp += period;
rq->rt_avg /= 2;
}
}
static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
rq->rt_avg += rt_delta;
sched_avg_update(rq);
}
#else /* !CONFIG_SMP */
static void resched_task(struct task_struct *p)
{
assert_raw_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
}
static void sched_avg_update(struct rq *rq)
{
}
#endif /* CONFIG_SMP */
#if BITS_PER_LONG == 32
# define WMULT_CONST (~0UL)
#else
# define WMULT_CONST (1UL << 32)
#endif
#define WMULT_SHIFT 32
/*
* Shift right and round:
*/
#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
/*
* delta *= weight / lw
*/
static unsigned long
calc_delta_mine(unsigned long delta_exec, unsigned long weight,
struct load_weight *lw)
{
u64 tmp;
/*
* weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
* entities since MIN_SHARES = 2. Treat weight as 1 if less than
* 2^SCHED_LOAD_RESOLUTION.
*/
if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
tmp = (u64)delta_exec * scale_load_down(weight);
else
tmp = (u64)delta_exec;
if (!lw->inv_weight) {
unsigned long w = scale_load_down(lw->weight);
if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
lw->inv_weight = 1;
else if (unlikely(!w))
lw->inv_weight = WMULT_CONST;
else
lw->inv_weight = WMULT_CONST / w;
}
/*
* Check whether we'd overflow the 64-bit multiplication:
*/
if (unlikely(tmp > WMULT_CONST))
tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
WMULT_SHIFT/2);
else
tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
}
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
lw->inv_weight = 0;
}
static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
{
lw->weight -= dec;
lw->inv_weight = 0;
}
static inline void update_load_set(struct load_weight *lw, unsigned long w)
{
lw->weight = w;
lw->inv_weight = 0;
}
/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
* of tasks with abnormal "nice" values across CPUs the contribution that
* each task makes to its run queue's load is weighted according to its
* scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
* scaled version of the new time slice allocation that they receive on time
* slice expiry etc.
*/
#define WEIGHT_IDLEPRIO 3
#define WMULT_IDLEPRIO 1431655765
/*
* Nice levels are multiplicative, with a gentle 10% change for every
* nice level changed. I.e. when a CPU-bound task goes from nice 0 to
* nice 1, it will get ~10% less CPU time than another CPU-bound task
* that remained on nice 0.
*
* The "10% effect" is relative and cumulative: from _any_ nice level,
* if you go up 1 level, it's -10% CPU usage, if you go down 1 level
* it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
* If a task goes up by ~10% and another task goes down by ~10% then
* the relative distance between them is ~25%.)
*/
static const int prio_to_weight[40] = {
/* -20 */ 88761, 71755, 56483, 46273, 36291,
/* -15 */ 29154, 23254, 18705, 14949, 11916,
/* -10 */ 9548, 7620, 6100, 4904, 3906,
/* -5 */ 3121, 2501, 1991, 1586, 1277,
/* 0 */ 1024, 820, 655, 526, 423,
/* 5 */ 335, 272, 215, 172, 137,
/* 10 */ 110, 87, 70, 56, 45,
/* 15 */ 36, 29, 23, 18, 15,
};
/*
* Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
*
* In cases where the weight does not change often, we can use the
* precalculated inverse to speed up arithmetics by turning divisions
* into multiplications:
*/
static const u32 prio_to_wmult[40] = {
/* -20 */ 48388, 59856, 76040, 92818, 118348,
/* -15 */ 147320, 184698, 229616, 287308, 360437,
/* -10 */ 449829, 563644, 704093, 875809, 1099582,
/* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
/* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
/* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
/* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
};
/* Time spent by the tasks of the cpu accounting group executing in ... */
enum cpuacct_stat_index {
CPUACCT_STAT_USER, /* ... user mode */
CPUACCT_STAT_SYSTEM, /* ... kernel mode */
CPUACCT_STAT_NSTATS,
};
#ifdef CONFIG_CGROUP_CPUACCT
static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
static void cpuacct_update_stats(struct task_struct *tsk,
enum cpuacct_stat_index idx, cputime_t val);
#else
static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
static inline void cpuacct_update_stats(struct task_struct *tsk,
enum cpuacct_stat_index idx, cputime_t val) {}
#endif
static inline void inc_cpu_load(struct rq *rq, unsigned long load)
{
update_load_add(&rq->load, load);
}
static inline void dec_cpu_load(struct rq *rq, unsigned long load)
{
update_load_sub(&rq->load, load);
}
#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
typedef int (*tg_visitor)(struct task_group *, void *);
/*
* Iterate the full tree, calling @down when first entering a node and @up when
* leaving it for the final time.
*/
static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
{
struct task_group *parent, *child;
int ret;
rcu_read_lock();
parent = &root_task_group;
down:
ret = (*down)(parent, data);
if (ret)
goto out_unlock;
list_for_each_entry_rcu(child, &parent->children, siblings) {
parent = child;
goto down;
up:
continue;
}
ret = (*up)(parent, data);
if (ret)
goto out_unlock;
child = parent;
parent = parent->parent;
if (parent)
goto up;
out_unlock:
rcu_read_unlock();
return ret;
}
static int tg_nop(struct task_group *tg, void *data)
{
return 0;
}
#endif
#ifdef CONFIG_SMP
/* Used instead of source_load when we know the type == 0 */
static unsigned long weighted_cpuload(const int cpu)
{
return cpu_rq(cpu)->load.weight;
}
/*
* Return a low guess at the load of a migration-source cpu weighted
* according to the scheduling class and "nice" value.
*
* We want to under-estimate the load of migration sources, to
* balance conservatively.
*/
static unsigned long source_load(int cpu, int type)
{
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
if (type == 0 || !sched_feat(LB_BIAS))
return total;
return min(rq->cpu_load[type-1], total);
}
/*
* Return a high guess at the load of a migration-target cpu weighted
* according to the scheduling class and "nice" value.
*/
static unsigned long target_load(int cpu, int type)
{
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
if (type == 0 || !sched_feat(LB_BIAS))
return total;
return max(rq->cpu_load[type-1], total);
}
static unsigned long power_of(int cpu)
{
return cpu_rq(cpu)->cpu_power;
}
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
if (nr_running)
rq->avg_load_per_task = rq->load.weight / nr_running;
else
rq->avg_load_per_task = 0;
return rq->avg_load_per_task;
}
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* Compute the cpu's hierarchical load factor for each task group.
* This needs to be done in a top-down fashion because the load of a child
* group is a fraction of its parents load.
*/
static int tg_load_down(struct task_group *tg, void *data)
{
unsigned long load;
long cpu = (long)data;
if (!tg->parent) {
load = cpu_rq(cpu)->load.weight;
} else {
load = tg->parent->cfs_rq[cpu]->h_load;
load *= tg->se[cpu]->load.weight;
load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
}
tg->cfs_rq[cpu]->h_load = load;
return 0;
}
static void update_h_load(long cpu)
{
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
}
#endif
#ifdef CONFIG_PREEMPT
static void double_rq_lock(struct rq *rq1, struct rq *rq2);
/*
* fair double_lock_balance: Safely acquires both rq->locks in a fair
* way at the expense of forcing extra atomic operations in all
* invocations. This assures that the double_lock is acquired using the
* same underlying policy as the spinlock_t on this architecture, which
* reduces latency compared to the unfair variant below. However, it
* also adds more overhead and therefore may reduce throughput.
*/
static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__releases(this_rq->lock)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
raw_spin_unlock(&this_rq->lock);
double_rq_lock(this_rq, busiest);
return 1;
}
#else
/*
* Unfair double_lock_balance: Optimizes throughput at the expense of
* latency by eliminating extra atomic operations when the locks are
* already in proper order on entry. This favors lower cpu-ids and will
* grant the double lock to lower cpus over higher ids under contention,
* regardless of entry order into the function.
*/
static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__releases(this_rq->lock)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
int ret = 0;
if (unlikely(!raw_spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
raw_spin_unlock(&this_rq->lock);
raw_spin_lock(&busiest->lock);
raw_spin_lock_nested(&this_rq->lock,
SINGLE_DEPTH_NESTING);
ret = 1;
} else
raw_spin_lock_nested(&busiest->lock,
SINGLE_DEPTH_NESTING);
}
return ret;
}
#endif /* CONFIG_PREEMPT */
/*
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
*/
static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
raw_spin_unlock(&this_rq->lock);
BUG_ON(1);
}
return _double_lock_balance(this_rq, busiest);
}
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
raw_spin_unlock(&busiest->lock);
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
}
/*
* double_rq_lock - safely lock two runqueues
*
* Note this does not disable interrupts like task_rq_lock,
* you need to do so manually before calling.
*/
static void double_rq_lock(struct rq *rq1, struct rq *rq2)
__acquires(rq1->lock)
__acquires(rq2->lock)
{
BUG_ON(!irqs_disabled());
if (rq1 == rq2) {
raw_spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
if (rq1 < rq2) {
raw_spin_lock(&rq1->lock);
raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
} else {
raw_spin_lock(&rq2->lock);
raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
}
}
}
/*
* double_rq_unlock - safely unlock two runqueues
*
* Note this does not restore interrupts like task_rq_unlock,
* you need to do so manually after calling.
*/
static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__releases(rq1->lock)
__releases(rq2->lock)
{
raw_spin_unlock(&rq1->lock);
if (rq1 != rq2)
raw_spin_unlock(&rq2->lock);
else
__release(rq2->lock);
}
#else /* CONFIG_SMP */
/*
* double_rq_lock - safely lock two runqueues
*
* Note this does not disable interrupts like task_rq_lock,
* you need to do so manually before calling.
*/
static void double_rq_lock(struct rq *rq1, struct rq *rq2)
__acquires(rq1->lock)
__acquires(rq2->lock)
{
BUG_ON(!irqs_disabled());
BUG_ON(rq1 != rq2);
raw_spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
}
/*
* double_rq_unlock - safely unlock two runqueues
*
* Note this does not restore interrupts like task_rq_unlock,
* you need to do so manually after calling.
*/
static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__releases(rq1->lock)
__releases(rq2->lock)
{
BUG_ON(rq1 != rq2);
raw_spin_unlock(&rq1->lock);
__release(rq2->lock);
}
#endif
static void calc_load_account_idle(struct rq *this_rq);
static void update_sysctl(void);
static int get_update_sysctl_factor(void);
static void update_cpu_load(struct rq *this_rq);
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
{
set_task_rq(p, cpu);
#ifdef CONFIG_SMP
/*
* After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
* successfuly executed on another CPU. We must ensure that updates of
* per-task data have been completed by this moment.
*/
smp_wmb();
task_thread_info(p)->cpu = cpu;
#endif
}
static const struct sched_class rt_sched_class;
#define sched_class_highest (&stop_sched_class)
#define for_each_class(class) \
for (class = sched_class_highest; class; class = class->next)
#include "sched_stats.h"
static void inc_nr_running(struct rq *rq)
{
rq->nr_running++;
}
static void dec_nr_running(struct rq *rq)
{
rq->nr_running--;
}
static void set_load_weight(struct task_struct *p)
{
int prio = p->static_prio - MAX_RT_PRIO;
struct load_weight *load = &p->se.load;
/*
* SCHED_IDLE tasks get minimal weight:
*/
if (p->policy == SCHED_IDLE) {
load->weight = scale_load(WEIGHT_IDLEPRIO);
load->inv_weight = WMULT_IDLEPRIO;
return;
}
load->weight = scale_load(prio_to_weight[prio]);
load->inv_weight = prio_to_wmult[prio];
}
static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
update_rq_clock(rq);
sched_info_queued(p);
p->sched_class->enqueue_task(rq, p, flags);
}
static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
update_rq_clock(rq);
sched_info_dequeued(p);
p->sched_class->dequeue_task(rq, p, flags);
}
/*
* activate_task - move a task to the runqueue.
*/
static void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
enqueue_task(rq, p, flags);
inc_nr_running(rq);
}
/*
* deactivate_task - remove a task from the runqueue.
*/
static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
rq->nr_uninterruptible++;
dequeue_task(rq, p, flags);
dec_nr_running(rq);
}
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
* There are no locks covering percpu hardirq/softirq time.
* They are only modified in account_system_vtime, on corresponding CPU
* with interrupts disabled. So, writes are safe.
* They are read and saved off onto struct rq in update_rq_clock().
* This may result in other CPU reading this CPU's irq time and can
* race with irq/account_system_vtime on this CPU. We would either get old
* or new value with a side effect of accounting a slice of irq time to wrong
* task when irq is in progress while we read rq->clock. That is a worthy
* compromise in place of having locks on each irq in account_system_time.
*/
static DEFINE_PER_CPU(u64, cpu_hardirq_time);
static DEFINE_PER_CPU(u64, cpu_softirq_time);
static DEFINE_PER_CPU(u64, irq_start_time);
static int sched_clock_irqtime;
void enable_sched_clock_irqtime(void)
{
sched_clock_irqtime = 1;
}
void disable_sched_clock_irqtime(void)
{
sched_clock_irqtime = 0;
}
#ifndef CONFIG_64BIT
static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
static inline void irq_time_write_begin(void)
{
__this_cpu_inc(irq_time_seq.sequence);
smp_wmb();
}
static inline void irq_time_write_end(void)
{
smp_wmb();
__this_cpu_inc(irq_time_seq.sequence);
}
static inline u64 irq_time_read(int cpu)
{
u64 irq_time;
unsigned seq;
do {
seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
irq_time = per_cpu(cpu_softirq_time, cpu) +
per_cpu(cpu_hardirq_time, cpu);
} while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
return irq_time;
}
#else /* CONFIG_64BIT */
static inline void irq_time_write_begin(void)
{
}
static inline void irq_time_write_end(void)
{
}
static inline u64 irq_time_read(int cpu)
{
return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
}
#endif /* CONFIG_64BIT */
/*
* Called before incrementing preempt_count on {soft,}irq_enter
* and before decrementing preempt_count on {soft,}irq_exit.
*/
void account_system_vtime(struct task_struct *curr)
{
unsigned long flags;
s64 delta;
int cpu;
if (!sched_clock_irqtime)
return;
local_irq_save(flags);
cpu = smp_processor_id();
delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
__this_cpu_add(irq_start_time, delta);
irq_time_write_begin();
/*
* We do not account for softirq time from ksoftirqd here.
* We want to continue accounting softirq time to ksoftirqd thread
* in that case, so as not to confuse scheduler with a special task
* that do not consume any time, but still wants to run.
*/
if (hardirq_count())
__this_cpu_add(cpu_hardirq_time, delta);
else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
__this_cpu_add(cpu_softirq_time, delta);
irq_time_write_end();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(account_system_vtime);
static void update_rq_clock_task(struct rq *rq, s64 delta)
{
s64 irq_delta;
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
/*
* Since irq_time is only updated on {soft,}irq_exit, we might run into
* this case when a previous update_rq_clock() happened inside a
* {soft,}irq region.
*
* When this happens, we stop ->clock_task and only update the
* prev_irq_time stamp to account for the part that fit, so that a next
* update will consume the rest. This ensures ->clock_task is
* monotonic.
*
* It does however cause some slight miss-attribution of {soft,}irq
* time, a more accurate solution would be to update the irq_time using
* the current rq->clock timestamp, except that would require using
* atomic ops.
*/
if (irq_delta > delta)
irq_delta = delta;
rq->prev_irq_time += irq_delta;
delta -= irq_delta;
rq->clock_task += delta;
if (irq_delta && sched_feat(NONIRQ_POWER))
sched_rt_avg_update(rq, irq_delta);
}
static int irqtime_account_hi_update(void)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
unsigned long flags;
u64 latest_ns;
int ret = 0;
local_irq_save(flags);
latest_ns = this_cpu_read(cpu_hardirq_time);
if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
ret = 1;
local_irq_restore(flags);
return ret;
}
static int irqtime_account_si_update(void)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
unsigned long flags;
u64 latest_ns;
int ret = 0;
local_irq_save(flags);
latest_ns = this_cpu_read(cpu_softirq_time);
if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
ret = 1;
local_irq_restore(flags);
return ret;
}
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
#define sched_clock_irqtime (0)
static void update_rq_clock_task(struct rq *rq, s64 delta)
{
rq->clock_task += delta;
}
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
#include "sched_idletask.c"
#include "sched_fair.c"
#include "sched_rt.c"
#include "sched_autogroup.c"
#include "sched_stoptask.c"
#ifdef CONFIG_SCHED_DEBUG
# include "sched_debug.c"
#endif
void sched_set_stop_task(int cpu, struct task_struct *stop)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
struct task_struct *old_stop = cpu_rq(cpu)->stop;
if (stop) {
/*
* Make it appear like a SCHED_FIFO task, its something
* userspace knows about and won't get confused about.
*
* Also, it will make PI more or less work without too
* much confusion -- but then, stop work should not
* rely on PI working anyway.
*/
sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
stop->sched_class = &stop_sched_class;
}
cpu_rq(cpu)->stop = stop;
if (old_stop) {
/*
* Reset it back to a normal scheduling class so that
* it can die in pieces.
*/
old_stop->sched_class = &rt_sched_class;
}
}
/*
* __normal_prio - return the priority that is based on the static prio
*/
static inline int __normal_prio(struct task_struct *p)
{
return p->static_prio;
}
/*
* Calculate the expected normal priority: i.e. priority
* without taking RT-inheritance into account. Might be
* boosted by interactivity modifiers. Changes upon fork,
* setprio syscalls, and whenever the interactivity
* estimator recalculates.
*/
static inline int normal_prio(struct task_struct *p)
{
int prio;
if (task_has_rt_policy(p))
prio = MAX_RT_PRIO-1 - p->rt_priority;
else
prio = __normal_prio(p);
return prio;
}
/*
* Calculate the current priority, i.e. the priority
* taken into account by the scheduler. This value might
* be boosted by RT tasks, or might be boosted by
* interactivity modifiers. Will be RT if the task got
* RT-boosted. If not then it returns p->normal_prio.
*/
static int effective_prio(struct task_struct *p)
{
p->normal_prio = normal_prio(p);
/*
* If we are RT tasks or we were boosted to RT priority,
* keep the priority unchanged. Otherwise, update priority
* to the normal priority:
*/
if (!rt_prio(p->prio))
return p->normal_prio;
return p->prio;
}
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
*/
inline int task_curr(const struct task_struct *p)
{
return cpu_curr(task_cpu(p)) == p;
}
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
const struct sched_class *prev_class,
int oldprio)
{
if (prev_class != p->sched_class) {
if (prev_class->switched_from)
prev_class->switched_from(rq, p);
p->sched_class->switched_to(rq, p);
} else if (oldprio != p->prio)
p->sched_class->prio_changed(rq, p, oldprio);
}
static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
const struct sched_class *class;
if (p->sched_class == rq->curr->sched_class) {
rq->curr->sched_class->check_preempt_curr(rq, p, flags);
} else {
for_each_class(class) {
if (class == rq->curr->sched_class)
break;
if (class == p->sched_class) {
resched_task(rq->curr);
break;
}
}
}
/*
* A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update.
*/
if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
rq->skip_clock_update = 1;
}
#ifdef CONFIG_SMP
/*
* Is this task likely cache-hot:
*/
static int
task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
{
s64 delta;
if (p->sched_class != &fair_sched_class)
return 0;
if (unlikely(p->policy == SCHED_IDLE))
return 0;
/*
* Buddy candidates are cache hot:
*/
if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
(&p->se == cfs_rq_of(&p->se)->next ||
&p->se == cfs_rq_of(&p->se)->last))
return 1;
if (sysctl_sched_migration_cost == -1)
return 1;
if (sysctl_sched_migration_cost == 0)
return 0;
delta = now - p->se.exec_start;
return delta < (s64)sysctl_sched_migration_cost;
}
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
#ifdef CONFIG_SCHED_DEBUG
/*
* We should never call set_task_cpu() on a blocked task,
* ttwu() will sort out the placement.
*/
WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
!(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
#ifdef CONFIG_LOCKDEP
/*
* The caller should hold either p->pi_lock or rq->lock, when changing
* a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
*
* sched_move_task() holds both and thus holding either pins the cgroup,
* see set_task_rq().
*
* Furthermore, all task_rq users should acquire both locks, see
* task_rq_lock().
*/
WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
lockdep_is_held(&task_rq(p)->lock)));
#endif
#endif
trace_sched_migrate_task(p, new_cpu);
if (task_cpu(p) != new_cpu) {
p->se.nr_migrations++;
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
}
__set_task_cpu(p, new_cpu);
}
struct migration_arg {
struct task_struct *task;
int dest_cpu;
};
static int migration_cpu_stop(void *data);
/*
* wait_task_inactive - wait for a thread to unschedule.
*
* If @match_state is nonzero, it's the @p->state value just checked and
* not expected to change. If it changes, i.e. @p might have woken up,
* then return zero. When we succeed in waiting for @p to be off its CPU,
* we return a positive number (its total switch count). If a second call
* a short while later returns the same number, the caller can be sure that
* @p has remained unscheduled the whole time.
*
* The caller must ensure that the task *will* unschedule sometime soon,
* else this function might spin for a *long* time. This function can't
* be called with interrupts off, or it may introduce deadlock with
* smp_call_function() if an IPI is sent by the same process we are
* waiting to become inactive.
*/
unsigned long wait_task_inactive(struct task_struct *p, long match_state)
{
unsigned long flags;
int running, on_rq;
unsigned long ncsw;
struct rq *rq;
for (;;) {
/*
* We do the initial early heuristics without holding
* any task-queue locks at all. We'll only try to get
* the runqueue lock when things look like they will
* work out!
*/
rq = task_rq(p);
/*
* If the task is actively running on another CPU
* still, just relax and busy-wait without holding
* any locks.
*
* NOTE! Since we don't hold any locks, it's not
* even sure that "rq" stays as the right runqueue!
* But we don't care, since "task_running()" will
* return false if the runqueue has changed and p
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
if (match_state && unlikely(p->state != match_state))
return 0;
cpu_relax();
}
/*
* Ok, time to look more closely! We need the rq
* lock now, to be *sure*. If we're wrong, we'll
* just go back and repeat.
*/
rq = task_rq_lock(p, &flags);
trace_sched_wait_task(p);
running = task_running(rq, p);
on_rq = p->on_rq;
ncsw = 0;
if (!match_state || p->state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &flags);
/*
* If it changed from the expected state, bail out now.
*/
if (unlikely(!ncsw))
break;
/*
* Was it really running after all now that we
* checked with the proper locks actually held?
*
* Oops. Go back and try again..
*/
if (unlikely(running)) {
cpu_relax();
continue;
}
/*
* It's not enough that it's not actively running,
* it must be off the runqueue _entirely_, and not
* preempted!
*
* So if it was still runnable (but just not actively
* running right now), it's preempted, and we should
* yield - it could be a while.
*/
if (unlikely(on_rq)) {
ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_hrtimeout(&to, HRTIMER_MODE_REL);
continue;
}
/*
* Ahh, all good. It wasn't running, and it wasn't
* runnable, which means that it will never become
* running in the future either. We're all done!
*/
break;
}
return ncsw;
}
/***
* kick_process - kick a running thread to enter/exit the kernel
* @p: the to-be-kicked thread
*
* Cause a process which is running on another CPU to enter
* kernel-mode, without any delay. (to get signals handled.)
*
* NOTE: this function doesn't have to take the runqueue lock,
* because all it wants to ensure is that the remote task enters
* the kernel. If the IPI races and the task has been migrated
* to another CPU then no harm is done and the purpose has been
* achieved as well.
*/
void kick_process(struct task_struct *p)
{
int cpu;
preempt_disable();
cpu = task_cpu(p);
if ((cpu != smp_processor_id()) && task_curr(p))
smp_send_reschedule(cpu);
preempt_enable();
}
EXPORT_SYMBOL_GPL(kick_process);
#endif /* CONFIG_SMP */
#ifdef CONFIG_SMP
/*
* ->cpus_allowed is protected by both rq->lock and p->pi_lock
*/
static int select_fallback_rq(int cpu, struct task_struct *p)
{
int dest_cpu;
const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
/* Look for allowed, online CPU in same node. */
for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
return dest_cpu;
/* Any allowed, online CPU? */
dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
if (dest_cpu < nr_cpu_ids)
return dest_cpu;
/* No more Mr. Nice Guy. */
dest_cpu = cpuset_cpus_allowed_fallback(p);
/*
* Don't tell them about moving exiting tasks or
* kernel threads (both mm NULL), since they never
* leave kernel.
*/
if (p->mm && printk_ratelimit()) {
printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
task_pid_nr(p), p->comm, cpu);
}
return dest_cpu;
}
/*
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
*/
static inline
int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
{
int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
/*
* In order not to call set_task_cpu() on a blocking task we need
* to rely on ttwu() to place the task on a valid ->cpus_allowed
* cpu.
*
* Since this is common to all placement strategies, this lives here.
*
* [ this allows ->select_task() to simply return task_cpu(p) and
* not worry about this generic constraint ]
*/
if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
!cpu_online(cpu)))
cpu = select_fallback_rq(task_cpu(p), p);
return cpu;
}
static void update_avg(u64 *avg, u64 sample)
{
s64 diff = sample - *avg;
*avg += diff >> 3;
}
#endif
static void
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
{
#ifdef CONFIG_SCHEDSTATS
struct rq *rq = this_rq();
#ifdef CONFIG_SMP
int this_cpu = smp_processor_id();
if (cpu == this_cpu) {
schedstat_inc(rq, ttwu_local);
schedstat_inc(p, se.statistics.nr_wakeups_local);
} else {
struct sched_domain *sd;
schedstat_inc(p, se.statistics.nr_wakeups_remote);
rcu_read_lock();
for_each_domain(this_cpu, sd) {
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
schedstat_inc(sd, ttwu_wake_remote);
break;
}
}
rcu_read_unlock();
}
if (wake_flags & WF_MIGRATED)
schedstat_inc(p, se.statistics.nr_wakeups_migrate);
#endif /* CONFIG_SMP */
schedstat_inc(rq, ttwu_count);
schedstat_inc(p, se.statistics.nr_wakeups);
if (wake_flags & WF_SYNC)
schedstat_inc(p, se.statistics.nr_wakeups_sync);
#endif /* CONFIG_SCHEDSTATS */
}
static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
{
activate_task(rq, p, en_flags);
p->on_rq = 1;
/* if a worker is waking up, notify workqueue */
if (p->flags & PF_WQ_WORKER)
wq_worker_waking_up(p, cpu_of(rq));
}
/*
* Mark the task runnable and perform wakeup-preemption.
*/
static void
ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
{
trace_sched_wakeup(p, true);
check_preempt_curr(rq, p, wake_flags);
p->state = TASK_RUNNING;
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
if (unlikely(rq->idle_stamp)) {
u64 delta = rq->clock - rq->idle_stamp;
u64 max = 2*sysctl_sched_migration_cost;
if (delta > max)
rq->avg_idle = max;
else
update_avg(&rq->avg_idle, delta);
rq->idle_stamp = 0;
}
#endif
}
static void
ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
{
#ifdef CONFIG_SMP
if (p->sched_contributes_to_load)
rq->nr_uninterruptible--;
#endif
ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
ttwu_do_wakeup(rq, p, wake_flags);
}
/*
* Called in case the task @p isn't fully descheduled from its runqueue,
* in this case we must do a remote wakeup. Its a 'light' wakeup though,
* since all we need to do is flip p->state to TASK_RUNNING, since
* the task is still ->on_rq.
*/
static int ttwu_remote(struct task_struct *p, int wake_flags)
{
struct rq *rq;
int ret = 0;
rq = __task_rq_lock(p);
if (p->on_rq) {
ttwu_do_wakeup(rq, p, wake_flags);
ret = 1;
}
__task_rq_unlock(rq);
return ret;
}
#ifdef CONFIG_SMP
static void sched_ttwu_do_pending(struct task_struct *list)
{
struct rq *rq = this_rq();
raw_spin_lock(&rq->lock);
while (list) {
struct task_struct *p = list;
list = list->wake_entry;
ttwu_do_activate(rq, p, 0);
}
raw_spin_unlock(&rq->lock);
}
#ifdef CONFIG_HOTPLUG_CPU
static void sched_ttwu_pending(void)
{
struct rq *rq = this_rq();
struct task_struct *list = xchg(&rq->wake_list, NULL);
if (!list)
return;
sched_ttwu_do_pending(list);
}
#endif /* CONFIG_HOTPLUG_CPU */
void scheduler_ipi(void)
{
struct rq *rq = this_rq();
struct task_struct *list = xchg(&rq->wake_list, NULL);
if (!list)
return;
/*
* Not all reschedule IPI handlers call irq_enter/irq_exit, since
* traditionally all their work was done from the interrupt return
* path. Now that we actually do some work, we need to make sure
* we do call them.
*
* Some archs already do call them, luckily irq_enter/exit nest
* properly.
*
* Arguably we should visit all archs and update all handlers,
* however a fair share of IPIs are still resched only so this would
* somewhat pessimize the simple resched case.
*/
irq_enter();
sched_ttwu_do_pending(list);
irq_exit();
}
static void ttwu_queue_remote(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct task_struct *next = rq->wake_list;
for (;;) {
struct task_struct *old = next;
p->wake_entry = next;
next = cmpxchg(&rq->wake_list, old, p);
if (next == old)
break;
}
if (!next)
smp_send_reschedule(cpu);
}
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
{
struct rq *rq;
int ret = 0;
rq = __task_rq_lock(p);
if (p->on_cpu) {
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
ttwu_do_wakeup(rq, p, wake_flags);
ret = 1;
}
__task_rq_unlock(rq);
return ret;
}
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
#endif /* CONFIG_SMP */
static void ttwu_queue(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
#if defined(CONFIG_SMP)
if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
sched_clock_cpu(cpu); /* sync clocks x-cpu */
ttwu_queue_remote(p, cpu);
return;
}
#endif
raw_spin_lock(&rq->lock);
ttwu_do_activate(rq, p, 0);
raw_spin_unlock(&rq->lock);
}
/**
* try_to_wake_up - wake up a thread
* @p: the thread to be awakened
* @state: the mask of task states that can be woken
* @wake_flags: wake modifier flags (WF_*)
*
* Put it on the run-queue if it's not already there. The "current"
* thread is always on the run-queue (except when the actual
* re-schedule is in progress), and as such you're allowed to do
* the simpler "current->state = TASK_RUNNING" to mark yourself
* runnable without the overhead of this.
*
* Returns %true if @p was woken up, %false if it was already running
* or @state didn't match @p's state.
*/
static int
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
{
unsigned long flags;
int cpu, success = 0;
smp_wmb();
raw_spin_lock_irqsave(&p->pi_lock, flags);
if (!(p->state & state))
goto out;
success = 1; /* we're going to change ->state */
cpu = task_cpu(p);
if (p->on_rq && ttwu_remote(p, wake_flags))
goto stat;
#ifdef CONFIG_SMP
/*
* If the owning (remote) cpu is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*/
while (p->on_cpu) {
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
/*
* In case the architecture enables interrupts in
* context_switch(), we cannot busy wait, since that
* would lead to deadlocks when an interrupt hits and
* tries to wake up @prev. So bail and do a complete
* remote wakeup.
*/
if (ttwu_activate_remote(p, wake_flags))
goto stat;
#else
cpu_relax();
#endif
}
/*
* Pairs with the smp_wmb() in finish_lock_switch().
*/
smp_rmb();
p->sched_contributes_to_load = !!task_contributes_to_load(p);
p->state = TASK_WAKING;
if (p->sched_class->task_waking)
p->sched_class->task_waking(p);
cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
if (task_cpu(p) != cpu) {
wake_flags |= WF_MIGRATED;
set_task_cpu(p, cpu);
}
#endif /* CONFIG_SMP */
ttwu_queue(p, cpu);
stat:
ttwu_stat(p, cpu, wake_flags);
out:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
return success;
}
/**
* try_to_wake_up_local - try to wake up a local task with rq lock held
* @p: the thread to be awakened
*
* Put @p on the run-queue if it's not already there. The caller must
* ensure that this_rq() is locked, @p is bound to this_rq() and not
* the current task.
*/
static void try_to_wake_up_local(struct task_struct *p)
{
struct rq *rq = task_rq(p);
BUG_ON(rq != this_rq());
BUG_ON(p == current);
lockdep_assert_held(&rq->lock);
if (!raw_spin_trylock(&p->pi_lock)) {
raw_spin_unlock(&rq->lock);
raw_spin_lock(&p->pi_lock);
raw_spin_lock(&rq->lock);
}
if (!(p->state & TASK_NORMAL))
goto out;
if (!p->on_rq)
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
ttwu_do_wakeup(rq, p, 0);
ttwu_stat(p, smp_processor_id(), 0);
out:
raw_spin_unlock(&p->pi_lock);
}
/**
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
* Attempt to wake up the nominated process and move it to the set of runnable
* processes. Returns 1 if the process was woken up, 0 if it was already
* running.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
int wake_up_process(struct task_struct *p)
{
return try_to_wake_up(p, TASK_ALL, 0);
}
EXPORT_SYMBOL(wake_up_process);
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
}
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
*
* __sched_fork() is basic setup used by init_idle() too:
*/
static void __sched_fork(struct task_struct *p)
{
p->on_rq = 0;
p->se.on_rq = 0;
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif
INIT_LIST_HEAD(&p->rt.run_list);
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers);
#endif
}
/*
* fork()/clone()-time setup:
*/
void sched_fork(struct task_struct *p)
{
unsigned long flags;
int cpu = get_cpu();
__sched_fork(p);
/*
* We mark the process as running here. This guarantees that
* nobody will actually run it, and a signal or other external
* event cannot wake it up and insert it on the runqueue either.
*/
p->state = TASK_RUNNING;
/*
* Revert to default priority/policy on fork if requested.
*/
if (unlikely(p->sched_reset_on_fork)) {
if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
p->policy = SCHED_NORMAL;
p->normal_prio = p->static_prio;
}
if (PRIO_TO_NICE(p->static_prio) < 0) {
p->static_prio = NICE_TO_PRIO(0);
p->normal_prio = p->static_prio;
set_load_weight(p);
}
/*
* We don't need the reset flag anymore after the fork. It has
* fulfilled its duty:
*/
p->sched_reset_on_fork = 0;
}
/*
* Make sure we do not leak PI boosting priority to the child.
*/
p->prio = current->normal_prio;
if (!rt_prio(p->prio))
p->sched_class = &fair_sched_class;
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
/*
* The child is not yet in the pid-hash so no cgroup attach races,
* and the cgroup is pinned to this child due to cgroup_fork()
* is ran before sched_fork().
*
* Silence PROVE_RCU.
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
set_task_cpu(p, cpu);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
#if defined(CONFIG_SMP)
p->on_cpu = 0;
#endif
#ifdef CONFIG_PREEMPT
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
#endif
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
#endif
put_cpu();
}
/*
* wake_up_new_task - wake up a newly created task for the first time.
*
* This function will do some initial scheduler statistics housekeeping
* that must be done for every newly created context, then puts the task
* on the runqueue and wakes it.
*/
void wake_up_new_task(struct task_struct *p)
{
unsigned long flags;
struct rq *rq;
raw_spin_lock_irqsave(&p->pi_lock, flags);
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
* - cpus_allowed can change in the fork path
* - any previously selected cpu might disappear through hotplug
*/
set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
#endif
rq = __task_rq_lock(p);
activate_task(rq, p, 0);
p->on_rq = 1;
trace_sched_wakeup_new(p, true);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
#endif
task_rq_unlock(rq, p, &flags);
}
#ifdef CONFIG_PREEMPT_NOTIFIERS
/**
* preempt_notifier_register - tell me when current is being preempted & rescheduled
* @notifier: notifier struct to register
*/
void preempt_notifier_register(struct preempt_notifier *notifier)
{
hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
}
EXPORT_SYMBOL_GPL(preempt_notifier_register);
/**
* preempt_notifier_unregister - no longer interested in preemption notifications
* @notifier: notifier struct to unregister
*
* This is safe to call from within a preemption notifier.
*/
void preempt_notifier_unregister(struct preempt_notifier *notifier)
{
hlist_del(¬ifier->link);
}
EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
struct preempt_notifier *notifier;
struct hlist_node *node;
hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
notifier->ops->sched_in(notifier, raw_smp_processor_id());
}
static void
fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next)
{
struct preempt_notifier *notifier;
struct hlist_node *node;
hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
notifier->ops->sched_out(notifier, next);
}
#else /* !CONFIG_PREEMPT_NOTIFIERS */
static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
}
static void
fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next)
{
}
#endif /* CONFIG_PREEMPT_NOTIFIERS */
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
* @prev: the current task that is being switched out
* @next: the task we are going to switch to.
*
* This is called with the rq lock held and interrupts off. It must
* be paired with a subsequent finish_task_switch after the context
* switch.
*
* prepare_task_switch sets up locking and calls architecture specific
* hooks.
*/
static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
sched_info_switch(prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next);
prepare_arch_switch(next);
trace_sched_switch(prev, next);
}
/**
* finish_task_switch - clean up after a task-switch
* @rq: runqueue associated with task-switch
* @prev: the thread we just switched away from.
*
* finish_task_switch must be called after the context switch, paired
* with a prepare_task_switch call before the context switch.
* finish_task_switch will reconcile locking set up by prepare_task_switch,
* and do any other architecture-specific cleanup actions.
*
* Note that we may have delayed dropping an mm in context_switch(). If
* so, we finish that here outside of the runqueue lock. (Doing it
* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
static void finish_task_switch(struct rq *rq, struct task_struct *prev)
__releases(rq->lock)
{
struct mm_struct *mm = rq->prev_mm;
long prev_state;
rq->prev_mm = NULL;
/*
* A task struct has one reference for the use as "current".
* If a task dies, then it sets TASK_DEAD in tsk->state and calls
* schedule one last time. The schedule call will never return, and
* the scheduled task must drop that reference.
* The test for TASK_DEAD must occur while the runqueue locks are
* still held, otherwise prev could be scheduled on another cpu, die
* there before we look at prev->state, and then the reference would
* be dropped twice.
* Manfred Spraul <manfred@colorfullife.com>
*/
prev_state = prev->state;
finish_arch_switch(prev);
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
local_irq_disable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
perf_event_task_sched_in(current);
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
local_irq_enable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
finish_lock_switch(rq, prev);
fire_sched_in_preempt_notifiers(current);
if (mm)
mmdrop(mm);
if (unlikely(prev_state == TASK_DEAD)) {
/*
* Remove function-return probe instances associated with this
* task and put them back on the free list.
*/
kprobe_flush_task(prev);
put_task_struct(prev);
}
}
#ifdef CONFIG_SMP
/* assumes rq->lock is held */
static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
{
if (prev->sched_class->pre_schedule)
prev->sched_class->pre_schedule(rq, prev);
}
/* rq->lock is NOT held, but preemption is disabled */
static inline void post_schedule(struct rq *rq)
{
if (rq->post_schedule) {
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->curr->sched_class->post_schedule)
rq->curr->sched_class->post_schedule(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
rq->post_schedule = 0;
}
}
#else
static inline void pre_schedule(struct rq *rq, struct task_struct *p)
{
}
static inline void post_schedule(struct rq *rq)
{
}
#endif
/**
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
*/
asmlinkage void schedule_tail(struct task_struct *prev)
__releases(rq->lock)
{
struct rq *rq = this_rq();
finish_task_switch(rq, prev);
/*
* FIXME: do we need to worry about rq being invalidated by the
* task_switch?
*/
post_schedule(rq);
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
/* In this case, finish_task_switch does not reenable preemption */
preempt_enable();
#endif
if (current->set_child_tid)
put_user(task_pid_vnr(current), current->set_child_tid);
}
/*
* context_switch - switch to the new MM and the new
* thread's register state.
*/
static inline void
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
struct mm_struct *mm, *oldmm;
prepare_task_switch(rq, prev, next);
mm = next->mm;
oldmm = prev->active_mm;
/*
* For paravirt, this is coupled with an exit in switch_to to
* combine the page table reload and the switch backend into
* one hypercall.
*/
arch_start_context_switch(prev);
if (!mm) {
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next);
} else
switch_mm(oldmm, mm, next);
if (!prev->mm) {
prev->active_mm = NULL;
rq->prev_mm = oldmm;
}
/*
* Since the runqueue lock will be released by the next
* task (which is an invalid locking op but in the case
* of the scheduler it's an obvious special-case), so we
* do an early lockdep release here:
*/
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#endif
/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev);
barrier();
/*
* this_rq must be evaluated again because prev may have moved
* CPUs since it called schedule(), thus the 'rq' on its stack
* frame will be invalid.
*/
finish_task_switch(this_rq(), prev);
}
/*
* nr_running, nr_uninterruptible and nr_context_switches:
*
* externally visible scheduler statistics: current number of runnable
* threads, current number of uninterruptible-sleeping threads, total
* number of context switches performed since bootup.
*/
unsigned long nr_running(void)
{
unsigned long i, sum = 0;
for_each_online_cpu(i)
sum += cpu_rq(i)->nr_running;
return sum;
}
unsigned long nr_uninterruptible(void)
{
unsigned long i, sum = 0;
for_each_possible_cpu(i)
sum += cpu_rq(i)->nr_uninterruptible;
/*
* Since we read the counters lockless, it might be slightly
* inaccurate. Do not allow it to go below zero though:
*/
if (unlikely((long)sum < 0))
sum = 0;
return sum;
}
unsigned long long nr_context_switches(void)
{
int i;
unsigned long long sum = 0;
for_each_possible_cpu(i)
sum += cpu_rq(i)->nr_switches;
return sum;
}
unsigned long nr_iowait(void)
{
unsigned long i, sum = 0;
for_each_possible_cpu(i)
sum += atomic_read(&cpu_rq(i)->nr_iowait);
return sum;
}
unsigned long nr_iowait_cpu(int cpu)
{
struct rq *this = cpu_rq(cpu);
return atomic_read(&this->nr_iowait);
}
unsigned long this_cpu_load(void)
{
struct rq *this = this_rq();
return this->cpu_load[0];
}
/* Variables and functions for calc_load */
static atomic_long_t calc_load_tasks;
static unsigned long calc_load_update;
unsigned long avenrun[3];
EXPORT_SYMBOL(avenrun);
static long calc_load_fold_active(struct rq *this_rq)
{
long nr_active, delta = 0;
nr_active = this_rq->nr_running;
nr_active += (long) this_rq->nr_uninterruptible;
if (nr_active != this_rq->calc_load_active) {
delta = nr_active - this_rq->calc_load_active;
this_rq->calc_load_active = nr_active;
}
return delta;
}
static unsigned long
calc_load(unsigned long load, unsigned long exp, unsigned long active)
{
load *= exp;
load += active * (FIXED_1 - exp);
load += 1UL << (FSHIFT - 1);
return load >> FSHIFT;
}
#ifdef CONFIG_NO_HZ
/*
* For NO_HZ we delay the active fold to the next LOAD_FREQ update.
*
* When making the ILB scale, we should try to pull this in as well.
*/
static atomic_long_t calc_load_tasks_idle;
static void calc_load_account_idle(struct rq *this_rq)
{
long delta;
delta = calc_load_fold_active(this_rq);
if (delta)
atomic_long_add(delta, &calc_load_tasks_idle);
}
static long calc_load_fold_idle(void)
{
long delta = 0;
/*
* Its got a race, we don't care...
*/
if (atomic_long_read(&calc_load_tasks_idle))
delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
return delta;
}
/**
* fixed_power_int - compute: x^n, in O(log n) time
*
* @x: base of the power
* @frac_bits: fractional bits of @x
* @n: power to raise @x to.
*
* By exploiting the relation between the definition of the natural power
* function: x^n := x*x*...*x (x multiplied by itself for n times), and
* the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
* (where: n_i \elem {0, 1}, the binary vector representing n),
* we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
* of course trivially computable in O(log_2 n), the length of our binary
* vector.
*/
static unsigned long
fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
{
unsigned long result = 1UL << frac_bits;
if (n) for (;;) {
if (n & 1) {
result *= x;
result += 1UL << (frac_bits - 1);
result >>= frac_bits;
}
n >>= 1;
if (!n)
break;
x *= x;
x += 1UL << (frac_bits - 1);
x >>= frac_bits;
}
return result;
}
/*
* a1 = a0 * e + a * (1 - e)
*
* a2 = a1 * e + a * (1 - e)
* = (a0 * e + a * (1 - e)) * e + a * (1 - e)
* = a0 * e^2 + a * (1 - e) * (1 + e)
*
* a3 = a2 * e + a * (1 - e)
* = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
* = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
*
* ...
*
* an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
* = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
* = a0 * e^n + a * (1 - e^n)
*
* [1] application of the geometric series:
*
* n 1 - x^(n+1)
* S_n := \Sum x^i = -------------
* i=0 1 - x
*/
static unsigned long
calc_load_n(unsigned long load, unsigned long exp,
unsigned long active, unsigned int n)
{
return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
}
/*
* NO_HZ can leave us missing all per-cpu ticks calling
* calc_load_account_active(), but since an idle CPU folds its delta into
* calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
* in the pending idle delta if our idle period crossed a load cycle boundary.
*
* Once we've updated the global active value, we need to apply the exponential
* weights adjusted to the number of cycles missed.
*/
static void calc_global_nohz(unsigned long ticks)
{
long delta, active, n;
if (time_before(jiffies, calc_load_update))
return;
/*
* If we crossed a calc_load_update boundary, make sure to fold
* any pending idle changes, the respective CPUs might have
* missed the tick driven calc_load_account_active() update
* due to NO_HZ.
*/
delta = calc_load_fold_idle();
if (delta)
atomic_long_add(delta, &calc_load_tasks);
/*
* If we were idle for multiple load cycles, apply them.
*/
if (ticks >= LOAD_FREQ) {
n = ticks / LOAD_FREQ;
active = atomic_long_read(&calc_load_tasks);
active = active > 0 ? active * FIXED_1 : 0;
avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
calc_load_update += n * LOAD_FREQ;
}
/*
* Its possible the remainder of the above division also crosses
* a LOAD_FREQ period, the regular check in calc_global_load()
* which comes after this will take care of that.
*
* Consider us being 11 ticks before a cycle completion, and us
* sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
* age us 4 cycles, and the test in calc_global_load() will
* pick up the final one.
*/
}
#else
static void calc_load_account_idle(struct rq *this_rq)
{
}
static inline long calc_load_fold_idle(void)
{
return 0;
}
static void calc_global_nohz(unsigned long ticks)
{
}
#endif
/**
* get_avenrun - get the load average array
* @loads: pointer to dest load array
* @offset: offset to add
* @shift: shift count to shift the result left
*
* These values are estimates at best, so no need for locking.
*/
void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
{
loads[0] = (avenrun[0] + offset) << shift;
loads[1] = (avenrun[1] + offset) << shift;
loads[2] = (avenrun[2] + offset) << shift;
}
/*
* calc_load - update the avenrun load estimates 10 ticks after the
* CPUs have updated calc_load_tasks.
*/
void calc_global_load(unsigned long ticks)
{
long active;
calc_global_nohz(ticks);
if (time_before(jiffies, calc_load_update + 10))
return;
active = atomic_long_read(&calc_load_tasks);
active = active > 0 ? active * FIXED_1 : 0;
avenrun[0] = calc_load(avenrun[0], EXP_1, active);
avenrun[1] = calc_load(avenrun[1], EXP_5, active);
avenrun[2] = calc_load(avenrun[2], EXP_15, active);
calc_load_update += LOAD_FREQ;
}
/*
* Called from update_cpu_load() to periodically update this CPU's
* active count.
*/
static void calc_load_account_active(struct rq *this_rq)
{
long delta;
if (time_before(jiffies, this_rq->calc_load_update))
return;
delta = calc_load_fold_active(this_rq);
delta += calc_load_fold_idle();
if (delta)
atomic_long_add(delta, &calc_load_tasks);
this_rq->calc_load_update += LOAD_FREQ;
}
/*
* The exact cpuload at various idx values, calculated at every tick would be
* load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
*
* If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
* on nth tick when cpu may be busy, then we have:
* load = ((2^idx - 1) / 2^idx)^(n-1) * load
* load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
*
* decay_load_missed() below does efficient calculation of
* load = ((2^idx - 1) / 2^idx)^(n-1) * load
* avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
*
* The calculation is approximated on a 128 point scale.
* degrade_zero_ticks is the number of ticks after which load at any
* particular idx is approximated to be zero.
* degrade_factor is a precomputed table, a row for each load idx.
* Each column corresponds to degradation factor for a power of two ticks,
* based on 128 point scale.
* Example:
* row 2, col 3 (=12) says that the degradation at load idx 2 after
* 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
*
* With this power of 2 load factors, we can degrade the load n times
* by looking at 1 bits in n and doing as many mult/shift instead of
* n mult/shifts needed by the exact degradation.
*/
#define DEGRADE_SHIFT 7
static const unsigned char
degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
static const unsigned char
degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
{0, 0, 0, 0, 0, 0, 0, 0},
{64, 32, 8, 0, 0, 0, 0, 0},
{96, 72, 40, 12, 1, 0, 0},
{112, 98, 75, 43, 15, 1, 0},
{120, 112, 98, 76, 45, 16, 2} };
/*
* Update cpu_load for any missed ticks, due to tickless idle. The backlog
* would be when CPU is idle and so we just decay the old load without
* adding any new load.
*/
static unsigned long
decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
{
int j = 0;
if (!missed_updates)
return load;
if (missed_updates >= degrade_zero_ticks[idx])
return 0;
if (idx == 1)
return load >> missed_updates;
while (missed_updates) {
if (missed_updates % 2)
load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
missed_updates >>= 1;
j++;
}
return load;
}
/*
* Update rq->cpu_load[] statistics. This function is usually called every
* scheduler tick (TICK_NSEC). With tickless idle this will not be called
* every tick. We fix it up based on jiffies.
*/
static void update_cpu_load(struct rq *this_rq)
{
unsigned long this_load = this_rq->load.weight;
unsigned long curr_jiffies = jiffies;
unsigned long pending_updates;
int i, scale;
this_rq->nr_load_updates++;
/* Avoid repeated calls on same jiffy, when moving in and out of idle */
if (curr_jiffies == this_rq->last_load_update_tick)
return;
pending_updates = curr_jiffies - this_rq->last_load_update_tick;
this_rq->last_load_update_tick = curr_jiffies;
/* Update our load: */
this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
unsigned long old_load, new_load;
/* scale is effectively 1 << i now, and >> i divides by scale */
old_load = this_rq->cpu_load[i];
old_load = decay_load_missed(old_load, pending_updates - 1, i);
new_load = this_load;
/*
* Round up the averaging division if load is increasing. This
* prevents us from getting stuck on 9 if the load is 10, for
* example.
*/
if (new_load > old_load)
new_load += scale - 1;
this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
}
sched_avg_update(this_rq);
}
static void update_cpu_load_active(struct rq *this_rq)
{
update_cpu_load(this_rq);
calc_load_account_active(this_rq);
}
#ifdef CONFIG_SMP
/*
* sched_exec - execve() is a valuable balancing opportunity, because at
* this point the task has the smallest effective memory and cache footprint.
*/
void sched_exec(void)
{
struct task_struct *p = current;
unsigned long flags;
int dest_cpu;
raw_spin_lock_irqsave(&p->pi_lock, flags);
dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
if (dest_cpu == smp_processor_id())
goto unlock;
if (likely(cpu_active(dest_cpu))) {
struct migration_arg arg = { p, dest_cpu };
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
return;
}
unlock:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}
#endif
DEFINE_PER_CPU(struct kernel_stat, kstat);
EXPORT_PER_CPU_SYMBOL(kstat);
/*
* Return any ns on the sched_clock that have not yet been accounted in
* @p in case that task is currently running.
*
* Called with task_rq_lock() held on @rq.
*/
static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
{
u64 ns = 0;
if (task_current(rq, p)) {
update_rq_clock(rq);
ns = rq->clock_task - p->se.exec_start;
if ((s64)ns < 0)
ns = 0;
}
return ns;
}
unsigned long long task_delta_exec(struct task_struct *p)
{
unsigned long flags;
struct rq *rq;
u64 ns = 0;
rq = task_rq_lock(p, &flags);
ns = do_task_delta_exec(p, rq);
task_rq_unlock(rq, p, &flags);
return ns;
}
/*
* Return accounted runtime for the task.
* In case the task is currently running, return the runtime plus current's
* pending runtime that have not been accounted yet.
*/
unsigned long long task_sched_runtime(struct task_struct *p)
{
unsigned long flags;
struct rq *rq;
u64 ns = 0;
rq = task_rq_lock(p, &flags);
ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
task_rq_unlock(rq, p, &flags);
return ns;
}
/*
* Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in user space since the last update
* @cputime_scaled: cputime scaled by cpu frequency
*/
void account_user_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t tmp;
/* Add user time to process. */
p->utime = cputime_add(p->utime, cputime);
p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
account_group_user_time(p, cputime);
/* Add user time to cpustat. */
tmp = cputime_to_cputime64(cputime);
if (TASK_NICE(p) > 0)
cpustat->nice = cputime64_add(cpustat->nice, tmp);
else
cpustat->user = cputime64_add(cpustat->user, tmp);
cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
/* Account for user time used */
acct_update_integrals(p);
}
/*
* Account guest cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in virtual machine since the last update
* @cputime_scaled: cputime scaled by cpu frequency
*/
static void account_guest_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled)
{
cputime64_t tmp;
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
tmp = cputime_to_cputime64(cputime);
/* Add guest time to process. */
p->utime = cputime_add(p->utime, cputime);
p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
account_group_user_time(p, cputime);
p->gtime = cputime_add(p->gtime, cputime);
/* Add guest time to cpustat. */
if (TASK_NICE(p) > 0) {
cpustat->nice = cputime64_add(cpustat->nice, tmp);
cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
} else {
cpustat->user = cputime64_add(cpustat->user, tmp);
cpustat->guest = cputime64_add(cpustat->guest, tmp);
}
}
/*
* Account system cpu time to a process and desired cpustat field
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in kernel space since the last update
* @cputime_scaled: cputime scaled by cpu frequency
* @target_cputime64: pointer to cpustat field that has to be updated
*/
static inline
void __account_system_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled, cputime64_t *target_cputime64)
{
cputime64_t tmp = cputime_to_cputime64(cputime);
/* Add system time to process. */
p->stime = cputime_add(p->stime, cputime);
p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
account_group_system_time(p, cputime);
/* Add system time to cpustat. */
*target_cputime64 = cputime64_add(*target_cputime64, tmp);
cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
/* Account for system time used */
acct_update_integrals(p);
}
/*
* Account system cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
* @cputime: the cpu time spent in kernel space since the last update
* @cputime_scaled: cputime scaled by cpu frequency
*/
void account_system_time(struct task_struct *p, int hardirq_offset,
cputime_t cputime, cputime_t cputime_scaled)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t *target_cputime64;
if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
account_guest_time(p, cputime, cputime_scaled);
return;
}
if (hardirq_count() - hardirq_offset)
target_cputime64 = &cpustat->irq;
else if (in_serving_softirq())
target_cputime64 = &cpustat->softirq;
else
target_cputime64 = &cpustat->system;
__account_system_time(p, cputime, cputime_scaled, target_cputime64);
}
/*
* Account for involuntary wait time.
* @cputime: the cpu time spent in involuntary wait
*/
void account_steal_time(cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t cputime64 = cputime_to_cputime64(cputime);
cpustat->steal = cputime64_add(cpustat->steal, cputime64);
}
/*
* Account for idle time.
* @cputime: the cpu time spent in idle wait
*/
void account_idle_time(cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t cputime64 = cputime_to_cputime64(cputime);
struct rq *rq = this_rq();
if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
else
cpustat->idle = cputime64_add(cpustat->idle, cputime64);
}
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
* Account a tick to a process and cpustat
* @p: the process that the cpu time gets accounted to
* @user_tick: is the tick from userspace
* @rq: the pointer to rq
*
* Tick demultiplexing follows the order
* - pending hardirq update
* - pending softirq update
* - user_time
* - idle_time
* - system time
* - check for guest_time
* - else account as system_time
*
* Check for hardirq is done both for system and user time as there is
* no timer going off while we are on hardirq and hence we may never get an
* opportunity to update it solely in system time.
* p->stime and friends are only updated on system time and not on irq
* softirq as those do not count in task exec_runtime any more.
*/
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
struct rq *rq)
{
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
if (irqtime_account_hi_update()) {
cpustat->irq = cputime64_add(cpustat->irq, tmp);
} else if (irqtime_account_si_update()) {
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
} else if (this_cpu_ksoftirqd() == p) {
/*
* ksoftirqd time do not get accounted in cpu_softirq_time.
* So, we have to handle it separately here.
* Also, p->stime needs to be updated for ksoftirqd.
*/
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
&cpustat->softirq);
} else if (user_tick) {
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
} else if (p == rq->idle) {
account_idle_time(cputime_one_jiffy);
} else if (p->flags & PF_VCPU) { /* System time or guest time */
account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
} else {
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
&cpustat->system);
}
}
static void irqtime_account_idle_ticks(int ticks)
{
int i;
struct rq *rq = this_rq();
for (i = 0; i < ticks; i++)
irqtime_account_process_tick(current, 0, rq);
}
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
static void irqtime_account_idle_ticks(int ticks) {}
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
struct rq *rq) {}
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
/*
* Account a single tick of cpu time.
* @p: the process that the cpu time gets accounted to
* @user_tick: indicates if the tick is a user or a system tick
*/
void account_process_tick(struct task_struct *p, int user_tick)
{
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
struct rq *rq = this_rq();
if (sched_clock_irqtime) {
irqtime_account_process_tick(p, user_tick, rq);
return;
}
if (user_tick)
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
one_jiffy_scaled);
else
account_idle_time(cputime_one_jiffy);
}
/*
* Account multiple ticks of steal time.
* @p: the process from which the cpu time has been stolen
* @ticks: number of stolen ticks
*/
void account_steal_ticks(unsigned long ticks)
{
account_steal_time(jiffies_to_cputime(ticks));
}
/*
* Account multiple ticks of idle time.
* @ticks: number of stolen ticks
*/
void account_idle_ticks(unsigned long ticks)
{
if (sched_clock_irqtime) {
irqtime_account_idle_ticks(ticks);
return;
}
account_idle_time(jiffies_to_cputime(ticks));
}
#endif
/*
* Use precise platform statistics if available:
*/
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
*ut = p->utime;
*st = p->stime;
}
void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
struct task_cputime cputime;
thread_group_cputime(p, &cputime);
*ut = cputime.utime;
*st = cputime.stime;
}
#else
#ifndef nsecs_to_cputime
# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
#endif
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
/*
* Use CFS's precise accounting:
*/
rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
if (total) {
u64 temp = rtime;
temp *= utime;
do_div(temp, total);
utime = (cputime_t)temp;
} else
utime = rtime;
/*
* Compare with previous values, to keep monotonicity:
*/
p->prev_utime = max(p->prev_utime, utime);
p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
*ut = p->prev_utime;
*st = p->prev_stime;
}
/*
* Must be called with siglock held.
*/
void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
struct signal_struct *sig = p->signal;
struct task_cputime cputime;
cputime_t rtime, utime, total;
thread_group_cputime(p, &cputime);
total = cputime_add(cputime.utime, cputime.stime);
rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
if (total) {
u64 temp = rtime;
temp *= cputime.utime;
do_div(temp, total);
utime = (cputime_t)temp;
} else
utime = rtime;
sig->prev_utime = max(sig->prev_utime, utime);
sig->prev_stime = max(sig->prev_stime,
cputime_sub(rtime, sig->prev_utime));
*ut = sig->prev_utime;
*st = sig->prev_stime;
}
#endif
/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
*/
void scheduler_tick(void)
{
int cpu = smp_processor_id();
struct rq *rq = cpu_rq(cpu);
struct task_struct *curr = rq->curr;
sched_clock_tick();
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
update_cpu_load_active(rq);
curr->sched_class->task_tick(rq, curr, 0);
raw_spin_unlock(&rq->lock);
perf_event_task_tick();
#ifdef CONFIG_SMP
rq->idle_at_tick = idle_cpu(cpu);
trigger_load_balance(rq, cpu);
#endif
}
notrace unsigned long get_parent_ip(unsigned long addr)
{
if (in_lock_functions(addr)) {
addr = CALLER_ADDR2;
if (in_lock_functions(addr))
addr = CALLER_ADDR3;
}
return addr;
}
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_PREEMPT_TRACER))
void __kprobes add_preempt_count(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
*/
if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
return;
#endif
preempt_count() += val;
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Spinlock count overflowing soon?
*/
DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
PREEMPT_MASK - 10);
#endif
if (preempt_count() == val)
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
EXPORT_SYMBOL(add_preempt_count);
void __kprobes sub_preempt_count(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
*/
if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
return;
/*
* Is the spinlock portion underflowing?
*/
if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
!(preempt_count() & PREEMPT_MASK)))
return;
#endif
if (preempt_count() == val)
trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
preempt_count() -= val;
}
EXPORT_SYMBOL(sub_preempt_count);
#endif
/*
* Print scheduling while atomic bug:
*/
static noinline void __schedule_bug(struct task_struct *prev)
{
struct pt_regs *regs = get_irq_regs();
printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
prev->comm, prev->pid, preempt_count());
debug_show_held_locks(prev);
print_modules();
if (irqs_disabled())
print_irqtrace_events(prev);
if (regs)
show_regs(regs);
else
dump_stack();
}
/*
* Various schedule()-time debugging checks and statistics:
*/
static inline void schedule_debug(struct task_struct *prev)
{
/*
* Test if we are atomic. Since do_exit() needs to call into
* schedule() atomically, we ignore that path for now.
* Otherwise, whine if we are scheduling when we should not be.
*/
if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
__schedule_bug(prev);
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
schedstat_inc(this_rq(), sched_count);
}
static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
if (prev->on_rq || rq->skip_clock_update < 0)
update_rq_clock(rq);
prev->sched_class->put_prev_task(rq, prev);
}
/*
* Pick up the highest-prio task:
*/
static inline struct task_struct *
pick_next_task(struct rq *rq)
{
const struct sched_class *class;
struct task_struct *p;
/*
* Optimization: we know that if all tasks are in
* the fair class we can call that function directly:
*/
if (likely(rq->nr_running == rq->cfs.nr_running)) {
p = fair_sched_class.pick_next_task(rq);
if (likely(p))
return p;
}
for_each_class(class) {
p = class->pick_next_task(rq);
if (p)
return p;
}
BUG(); /* the idle class will always have a runnable task */
}
/*
* __schedule() is the main scheduler function.
*/
static void __sched __schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
struct rq *rq;
int cpu;
need_resched:
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_note_context_switch(cpu);
prev = rq->curr;
schedule_debug(prev);
if (sched_feat(HRTICK))
hrtick_clear(rq);
raw_spin_lock_irq(&rq->lock);
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
prev->state = TASK_RUNNING;
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
/*
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
*/
if (prev->flags & PF_WQ_WORKER) {
struct task_struct *to_wakeup;
to_wakeup = wq_worker_sleeping(prev, cpu);
if (to_wakeup)
try_to_wake_up_local(to_wakeup);
}
}
switch_count = &prev->nvcsw;
}
pre_schedule(rq, prev);
if (unlikely(!rq->nr_running))
idle_balance(cpu, rq);
put_prev_task(rq, prev);
next = pick_next_task(rq);
clear_tsk_need_resched(prev);
rq->skip_clock_update = 0;
if (likely(prev != next)) {
rq->nr_switches++;
rq->curr = next;
++*switch_count;
context_switch(rq, prev, next); /* unlocks the rq */
/*
* The context switch have flipped the stack from under us
* and restored the local variables which were saved when
* this task called schedule() in the past. prev == current
* is still correct, but it can be moved to another cpu/rq.
*/
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
raw_spin_unlock_irq(&rq->lock);
post_schedule(rq);
preempt_enable_no_resched();
if (need_resched())
goto need_resched;
}
static inline void sched_submit_work(struct task_struct *tsk)
{
if (!tsk->state)
return;
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
*/
if (blk_needs_flush_plug(tsk))
blk_schedule_flush_plug(tsk);
}
asmlinkage void __sched schedule(void)
{
struct task_struct *tsk = current;
sched_submit_work(tsk);
__schedule();
}
EXPORT_SYMBOL(schedule);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
{
bool ret = false;
rcu_read_lock();
if (lock->owner != owner)
goto fail;
/*
* Ensure we emit the owner->on_cpu, dereference _after_ checking
* lock->owner still matches owner, if that fails, owner might
* point to free()d memory, if it still matches, the rcu_read_lock()
* ensures the memory stays valid.
*/
barrier();
ret = owner->on_cpu;
fail:
rcu_read_unlock();
return ret;
}
/*
* Look out! "owner" is an entirely speculative pointer
* access and not reliable.
*/
int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
{
if (!sched_feat(OWNER_SPIN))
return 0;
while (owner_running(lock, owner)) {
if (need_resched())
return 0;
arch_mutex_cpu_relax();
}
/*
* If the owner changed to another task there is likely
* heavy contention, stop spinning.
*/
if (lock->owner)
return 0;
return 1;
}
#endif
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
* off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
asmlinkage void __sched notrace preempt_schedule(void)
{
struct thread_info *ti = current_thread_info();
/*
* If there is a non-zero preempt_count or interrupts are disabled,
* we do not want to preempt the current task. Just return..
*/
if (likely(ti->preempt_count || irqs_disabled()))
return;
do {
add_preempt_count_notrace(PREEMPT_ACTIVE);
__schedule();
sub_preempt_count_notrace(PREEMPT_ACTIVE);
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched());
}
EXPORT_SYMBOL(preempt_schedule);
/*
* this is the entry point to schedule() from kernel preemption
* off of irq context.
* Note, that this is called and return with irqs disabled. This will
* protect us against recursive calling from irq.
*/
asmlinkage void __sched preempt_schedule_irq(void)
{
struct thread_info *ti = current_thread_info();
/* Catch callers which need to be fixed */
BUG_ON(ti->preempt_count || !irqs_disabled());
do {
add_preempt_count(PREEMPT_ACTIVE);
local_irq_enable();
__schedule();
local_irq_disable();
sub_preempt_count(PREEMPT_ACTIVE);
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched());
}
#endif /* CONFIG_PREEMPT */
int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
void *key)
{
return try_to_wake_up(curr->private, mode, wake_flags);
}
EXPORT_SYMBOL(default_wake_function);
/*
* The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
* wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
* number) then we wake all the non-exclusive tasks and one exclusive task.
*
* There are circumstances in which we can try to wake a task which has already
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, int wake_flags, void *key)
{
wait_queue_t *curr, *next;
list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
unsigned flags = curr->flags;
if (curr->func(curr, mode, wake_flags, key) &&
(flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
break;
}
}
/**
* __wake_up - wake up threads blocked on a waitqueue.
* @q: the waitqueue
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
* @key: is directly passed to the wakeup function
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
void __wake_up(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, void *key)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
__wake_up_common(q, mode, nr_exclusive, 0, key);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(__wake_up);
/*
* Same as __wake_up but called with the spinlock in wait_queue_head_t held.
*/
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
{
__wake_up_common(q, mode, 1, 0, NULL);
}
EXPORT_SYMBOL_GPL(__wake_up_locked);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
{
__wake_up_common(q, mode, 1, 0, key);
}
EXPORT_SYMBOL_GPL(__wake_up_locked_key);
/**
* __wake_up_sync_key - wake up threads blocked on a waitqueue.
* @q: the waitqueue
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
* @key: opaque value to be passed to wakeup targets
*
* The sync wakeup differs that the waker knows that it will schedule
* away soon, so while the target thread will be woken up, it will not
* be migrated to another CPU - ie. the two threads are 'synchronized'
* with each other. This can prevent needless bouncing between CPUs.
*
* On UP it can prevent extra preemption.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, void *key)
{
unsigned long flags;
int wake_flags = WF_SYNC;
if (unlikely(!q))
return;
if (unlikely(!nr_exclusive))
wake_flags = 0;
spin_lock_irqsave(&q->lock, flags);
__wake_up_common(q, mode, nr_exclusive, wake_flags, key);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL_GPL(__wake_up_sync_key);
/*
* __wake_up_sync - see __wake_up_sync_key()
*/
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
{
__wake_up_sync_key(q, mode, nr_exclusive, NULL);
}
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
/**
* complete: - signals a single thread waiting on this completion
* @x: holds the state of this particular completion
*
* This will wake up a single thread waiting on this completion. Threads will be
* awakened in the same order in which they were queued.
*
* See also complete_all(), wait_for_completion() and related routines.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
void complete(struct completion *x)
{
unsigned long flags;
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete);
/**
* complete_all: - signals all threads waiting on this completion
* @x: holds the state of this particular completion
*
* This will wake up all threads waiting on this particular completion event.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
void complete_all(struct completion *x)
{
unsigned long flags;
spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
__wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete_all);
static inline long __sched
do_wait_for_common(struct completion *x, long timeout, int state)
{
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);
__add_wait_queue_tail_exclusive(&x->wait, &wait);
do {
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
break;
}
__set_current_state(state);
spin_unlock_irq(&x->wait.lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&x->wait.lock);
} while (!x->done && timeout);
__remove_wait_queue(&x->wait, &wait);
if (!x->done)
return timeout;
}
x->done--;
return timeout ?: 1;
}
static long __sched
wait_for_common(struct completion *x, long timeout, int state)
{
might_sleep();
spin_lock_irq(&x->wait.lock);
timeout = do_wait_for_common(x, timeout, state);
spin_unlock_irq(&x->wait.lock);
return timeout;
}
/**
* wait_for_completion: - waits for completion of a task
* @x: holds the state of this particular completion
*
* This waits to be signaled for completion of a specific task. It is NOT
* interruptible and there is no timeout.
*
* See also similar routines (i.e. wait_for_completion_timeout()) with timeout
* and interrupt capability. Also see complete().
*/
void __sched wait_for_completion(struct completion *x)
{
wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion);
/**
* wait_for_completion_timeout: - waits for completion of a task (w/timeout)
* @x: holds the state of this particular completion
* @timeout: timeout value in jiffies
*
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible.
*/
unsigned long __sched
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
{
return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion_timeout);
/**
* wait_for_completion_interruptible: - waits for completion of a task (w/intr)
* @x: holds the state of this particular completion
*
* This waits for completion of a specific task to be signaled. It is
* interruptible.
*/
int __sched wait_for_completion_interruptible(struct completion *x)
{
long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
if (t == -ERESTARTSYS)
return t;
return 0;
}
EXPORT_SYMBOL(wait_for_completion_interruptible);
/**
* wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
* @x: holds the state of this particular completion
* @timeout: timeout value in jiffies
*
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. It is interruptible. The timeout is in jiffies.
*/
long __sched
wait_for_completion_interruptible_timeout(struct completion *x,
unsigned long timeout)
{
return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
/**
* wait_for_completion_killable: - waits for completion of a task (killable)
* @x: holds the state of this particular completion
*
* This waits to be signaled for completion of a specific task. It can be
* interrupted by a kill signal.
*/
int __sched wait_for_completion_killable(struct completion *x)
{
long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
if (t == -ERESTARTSYS)
return t;
return 0;
}
EXPORT_SYMBOL(wait_for_completion_killable);
/**
* wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
* @x: holds the state of this particular completion
* @timeout: timeout value in jiffies
*
* This waits for either a completion of a specific task to be
* signaled or for a specified timeout to expire. It can be
* interrupted by a kill signal. The timeout is in jiffies.
*/
long __sched
wait_for_completion_killable_timeout(struct completion *x,
unsigned long timeout)
{
return wait_for_common(x, timeout, TASK_KILLABLE);
}
EXPORT_SYMBOL(wait_for_completion_killable_timeout);
/**
* try_wait_for_completion - try to decrement a completion without blocking
* @x: completion structure
*
* Returns: 0 if a decrement cannot be done without blocking
* 1 if a decrement succeeded.
*
* If a completion is being used as a counting completion,
* attempt to decrement the counter without blocking. This
* enables us to avoid waiting if the resource the completion
* is protecting is not available.
*/
bool try_wait_for_completion(struct completion *x)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
else
x->done--;
spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
/**
* completion_done - Test to see if a completion has any waiters
* @x: completion structure
*
* Returns: 0 if there are waiters (wait_for_completion() in progress)
* 1 if there are no waiters.
*
*/
bool completion_done(struct completion *x)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(completion_done);
static long __sched
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{
unsigned long flags;
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
__set_current_state(state);
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue(q, &wait);
spin_unlock(&q->lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&q->lock);
__remove_wait_queue(q, &wait);
spin_unlock_irqrestore(&q->lock, flags);
return timeout;
}
void __sched interruptible_sleep_on(wait_queue_head_t *q)
{
sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(interruptible_sleep_on);
long __sched
interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
}
EXPORT_SYMBOL(interruptible_sleep_on_timeout);
void __sched sleep_on(wait_queue_head_t *q)
{
sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(sleep_on);
long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
}
EXPORT_SYMBOL(sleep_on_timeout);
#ifdef CONFIG_RT_MUTEXES
/*
* rt_mutex_setprio - set the current priority of a task
* @p: task
* @prio: prio value (kernel-internal form)
*
* This function changes the 'effective' priority of a task. It does
* not touch ->normal_prio like __setscheduler().
*
* Used by the rt_mutex code to implement priority inheritance logic.
*/
void rt_mutex_setprio(struct task_struct *p, int prio)
{
int oldprio, on_rq, running;
struct rq *rq;
const struct sched_class *prev_class;
BUG_ON(prio < 0 || prio > MAX_PRIO);
rq = __task_rq_lock(p);
trace_sched_pi_setprio(p, prio);
oldprio = p->prio;
prev_class = p->sched_class;
on_rq = p->on_rq;
running = task_current(rq, p);
if (on_rq)
dequeue_task(rq, p, 0);
if (running)
p->sched_class->put_prev_task(rq, p);
if (rt_prio(prio))
p->sched_class = &rt_sched_class;
else
p->sched_class = &fair_sched_class;
p->prio = prio;
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq)
enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
check_class_changed(rq, p, prev_class, oldprio);
__task_rq_unlock(rq);
}
#endif
void set_user_nice(struct task_struct *p, long nice)
{
int old_prio, delta, on_rq;
unsigned long flags;
struct rq *rq;
if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
return;
/*
* We have to be careful, if called from sys_setpriority(),
* the task might be in the middle of scheduling on another CPU.
*/
rq = task_rq_lock(p, &flags);
/*
* The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected
* it wont have any effect on scheduling until the task is
* SCHED_FIFO/SCHED_RR:
*/
if (task_has_rt_policy(p)) {
p->static_prio = NICE_TO_PRIO(nice);
goto out_unlock;
}
on_rq = p->on_rq;
if (on_rq)
dequeue_task(rq, p, 0);
p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p);
old_prio = p->prio;
p->prio = effective_prio(p);
delta = p->prio - old_prio;
if (on_rq) {
enqueue_task(rq, p, 0);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
*/
if (delta < 0 || (delta > 0 && task_running(rq, p)))
resched_task(rq->curr);
}
out_unlock:
task_rq_unlock(rq, p, &flags);
}
EXPORT_SYMBOL(set_user_nice);
/*
* can_nice - check if a task can reduce its nice value
* @p: task
* @nice: nice value
*/
int can_nice(const struct task_struct *p, const int nice)
{
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = 20 - nice;
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
#ifdef __ARCH_WANT_SYS_NICE
/*
* sys_nice - change the priority of the current process.
* @increment: priority increment
*
* sys_setpriority is a more generic, but much slower function that
* does similar things.
*/
SYSCALL_DEFINE1(nice, int, increment)
{
long nice, retval;
/*
* Setpriority might change our priority at the same moment.
* We don't have to worry. Conceptually one call occurs first
* and we have a single winner.
*/
if (increment < -40)
increment = -40;
if (increment > 40)
increment = 40;
nice = TASK_NICE(current) + increment;
if (nice < -20)
nice = -20;
if (nice > 19)
nice = 19;
if (increment < 0 && !can_nice(current, nice))
return -EPERM;
retval = security_task_setnice(current, nice);
if (retval)
return retval;
set_user_nice(current, nice);
return 0;
}
#endif
/**
* task_prio - return the priority value of a given task.
* @p: the task in question.
*
* This is the priority value as seen by users in /proc.
* RT tasks are offset by -200. Normal tasks are centered
* around 0, value goes from -16 to +15.
*/
int task_prio(const struct task_struct *p)
{
return p->prio - MAX_RT_PRIO;
}
/**
* task_nice - return the nice value of a given task.
* @p: the task in question.
*/
int task_nice(const struct task_struct *p)
{
return TASK_NICE(p);
}
EXPORT_SYMBOL(task_nice);
/**
* idle_cpu - is a given cpu idle currently?
* @cpu: the processor in question.
*/
int idle_cpu(int cpu)
{
return cpu_curr(cpu) == cpu_rq(cpu)->idle;
}
/**
* idle_task - return the idle task for a given cpu.
* @cpu: the processor in question.
*/
struct task_struct *idle_task(int cpu)
{
return cpu_rq(cpu)->idle;
}
/**
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
*/
static struct task_struct *find_process_by_pid(pid_t pid)
{
return pid ? find_task_by_vpid(pid) : current;
}
/* Actually do priority change: must hold rq lock. */
static void
__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
{
p->policy = policy;
p->rt_priority = prio;
p->normal_prio = normal_prio(p);
/* we are holding p->pi_lock already */
p->prio = rt_mutex_getprio(p);
if (rt_prio(p->prio))
p->sched_class = &rt_sched_class;
else
p->sched_class = &fair_sched_class;
set_load_weight(p);
}
/*
* check the target process has a UID that matches the current process's
*/
static bool check_same_owner(struct task_struct *p)
{
const struct cred *cred = current_cred(), *pcred;
bool match;
rcu_read_lock();
pcred = __task_cred(p);
if (cred->user->user_ns == pcred->user->user_ns)
match = (cred->euid == pcred->euid ||
cred->euid == pcred->uid);
else
match = false;
rcu_read_unlock();
return match;
}
static int __sched_setscheduler(struct task_struct *p, int policy,
const struct sched_param *param, bool user)
{
int retval, oldprio, oldpolicy = -1, on_rq, running;
unsigned long flags;
const struct sched_class *prev_class;
struct rq *rq;
int reset_on_fork;
/* may grab non-irq protected spin_locks */
BUG_ON(in_interrupt());
recheck:
/* double check policy once rq lock held */
if (policy < 0) {
reset_on_fork = p->sched_reset_on_fork;
policy = oldpolicy = p->policy;
} else {
reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
policy &= ~SCHED_RESET_ON_FORK;
if (policy != SCHED_FIFO && policy != SCHED_RR &&
policy != SCHED_NORMAL && policy != SCHED_BATCH &&
policy != SCHED_IDLE)
return -EINVAL;
}
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are
* 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
* SCHED_BATCH and SCHED_IDLE is 0.
*/
if (param->sched_priority < 0 ||
(p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
(!p->mm && param->sched_priority > MAX_RT_PRIO-1))
return -EINVAL;
if (rt_policy(policy) != (param->sched_priority != 0))
return -EINVAL;
/*
* Allow unprivileged RT tasks to decrease priority:
*/
if (user && !capable(CAP_SYS_NICE)) {
if (rt_policy(policy)) {
unsigned long rlim_rtprio =
task_rlimit(p, RLIMIT_RTPRIO);
/* can't set/change the rt policy */
if (policy != p->policy && !rlim_rtprio)
return -EPERM;
/* can't increase priority */
if (param->sched_priority > p->rt_priority &&
param->sched_priority > rlim_rtprio)
return -EPERM;
}
/*
* Treat SCHED_IDLE as nice 20. Only allow a switch to
* SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
*/
if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
if (!can_nice(p, TASK_NICE(p)))
return -EPERM;
}
/* can't change other user's priorities */
if (!check_same_owner(p))
return -EPERM;
/* Normal users shall not reset the sched_reset_on_fork flag */
if (p->sched_reset_on_fork && !reset_on_fork)
return -EPERM;
}
if (user) {
retval = security_task_setscheduler(p);
if (retval)
return retval;
}
/*
* make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
*
* To be able to change p->policy safely, the appropriate
* runqueue lock must be held.
*/
rq = task_rq_lock(p, &flags);
/*
* Changing the policy of the stop threads its a very bad idea
*/
if (p == rq->stop) {
task_rq_unlock(rq, p, &flags);
return -EINVAL;
}
/*
* If not changing anything there's no need to proceed further:
*/
if (unlikely(policy == p->policy && (!rt_policy(policy) ||
param->sched_priority == p->rt_priority))) {
__task_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
return 0;
}
#ifdef CONFIG_RT_GROUP_SCHED
if (user) {
/*
* Do not allow realtime tasks into groups that have no runtime
* assigned.
*/
if (rt_bandwidth_enabled() && rt_policy(policy) &&
task_group(p)->rt_bandwidth.rt_runtime == 0 &&
!task_group_is_autogroup(task_group(p))) {
task_rq_unlock(rq, p, &flags);
return -EPERM;
}
}
#endif
/* recheck policy now with rq lock held */
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
task_rq_unlock(rq, p, &flags);
goto recheck;
}
on_rq = p->on_rq;
running = task_current(rq, p);
if (on_rq)
deactivate_task(rq, p, 0);
if (running)
p->sched_class->put_prev_task(rq, p);
p->sched_reset_on_fork = reset_on_fork;
oldprio = p->prio;
prev_class = p->sched_class;
__setscheduler(rq, p, policy, param->sched_priority);
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq)
activate_task(rq, p, 0);
check_class_changed(rq, p, prev_class, oldprio);
task_rq_unlock(rq, p, &flags);
rt_mutex_adjust_pi(p);
return 0;
}
/**
* sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
* @p: the task in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*
* NOTE that the task may be already dead.
*/
int sched_setscheduler(struct task_struct *p, int policy,
const struct sched_param *param)
{
return __sched_setscheduler(p, policy, param, true);
}
EXPORT_SYMBOL_GPL(sched_setscheduler);
/**
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
* @p: the task in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*
* Just like sched_setscheduler, only don't bother checking if the
* current context has permission. For example, this is needed in
* stop_machine(): we create temporary high priority worker threads,
* but our caller might not have that capability.
*/
int sched_setscheduler_nocheck(struct task_struct *p, int policy,
const struct sched_param *param)
{
return __sched_setscheduler(p, policy, param, false);
}
static int
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
{
struct sched_param lparam;
struct task_struct *p;
int retval;
if (!param || pid < 0)
return -EINVAL;
if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
return -EFAULT;
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
if (p != NULL)
retval = sched_setscheduler(p, policy, &lparam);
rcu_read_unlock();
return retval;
}
/**
* sys_sched_setscheduler - set/change the scheduler policy and RT priority
* @pid: the pid in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*/
SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
struct sched_param __user *, param)
{
/* negative values for policy are not valid */
if (policy < 0)
return -EINVAL;
return do_sched_setscheduler(pid, policy, param);
}
/**
* sys_sched_setparam - set/change the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the new RT priority.
*/
SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
{
return do_sched_setscheduler(pid, -1, param);
}
/**
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
* @pid: the pid in question.
*/
SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
{
struct task_struct *p;
int retval;
if (pid < 0)
return -EINVAL;
retval = -ESRCH;
rcu_read_lock();
p = find_process_by_pid(pid);
if (p) {
retval = security_task_getscheduler(p);
if (!retval)
retval = p->policy
| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
}
rcu_read_unlock();
return retval;
}
/**
* sys_sched_getparam - get the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the RT priority.
*/
SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
{
struct sched_param lp;
struct task_struct *p;
int retval;
if (!param || pid < 0)
return -EINVAL;
rcu_read_lock();
p = find_process_by_pid(pid);
retval = -ESRCH;
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
lp.sched_priority = p->rt_priority;
rcu_read_unlock();
/*
* This one might sleep, we cannot do it with a spinlock held ...
*/
retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
return retval;
out_unlock:
rcu_read_unlock();
return retval;
}
long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
{
cpumask_var_t cpus_allowed, new_mask;
struct task_struct *p;
int retval;
get_online_cpus();
rcu_read_lock();
p = find_process_by_pid(pid);
if (!p) {
rcu_read_unlock();
put_online_cpus();
return -ESRCH;
}
/* Prevent p going away */
get_task_struct(p);
rcu_read_unlock();
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_put_task;
}
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_free_cpus_allowed;
}
retval = -EPERM;
if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE))
goto out_unlock;
retval = security_task_setscheduler(p);
if (retval)
goto out_unlock;
cpuset_cpus_allowed(p, cpus_allowed);
cpumask_and(new_mask, in_mask, cpus_allowed);
again:
retval = set_cpus_allowed_ptr(p, new_mask);
if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed);
if (!cpumask_subset(new_mask, cpus_allowed)) {
/*
* We must have raced with a concurrent cpuset
* update. Just reset the cpus_allowed to the
* cpuset's cpus_allowed
*/
cpumask_copy(new_mask, cpus_allowed);
goto again;
}
}
out_unlock:
free_cpumask_var(new_mask);
out_free_cpus_allowed:
free_cpumask_var(cpus_allowed);
out_put_task:
put_task_struct(p);
put_online_cpus();
return retval;
}
static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
struct cpumask *new_mask)
{
if (len < cpumask_size())
cpumask_clear(new_mask);
else if (len > cpumask_size())
len = cpumask_size();
return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
}
/**
* sys_sched_setaffinity - set the cpu affinity of a process
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to the new cpu mask
*/
SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
{
cpumask_var_t new_mask;
int retval;
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM;
retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
if (retval == 0)
retval = sched_setaffinity(pid, new_mask);
free_cpumask_var(new_mask);
return retval;
}
long sched_getaffinity(pid_t pid, struct cpumask *mask)
{
struct task_struct *p;
unsigned long flags;
int retval;
get_online_cpus();
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
raw_spin_lock_irqsave(&p->pi_lock, flags);
cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
rcu_read_unlock();
put_online_cpus();
return retval;
}
/**
* sys_sched_getaffinity - get the cpu affinity of a process
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to hold the current cpu mask
*/
SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
{
int ret;
cpumask_var_t mask;
if ((len * BITS_PER_BYTE) < nr_cpu_ids)
return -EINVAL;
if (len & (sizeof(unsigned long)-1))
return -EINVAL;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
if (ret == 0) {
size_t retlen = min_t(size_t, len, cpumask_size());
if (copy_to_user(user_mask_ptr, mask, retlen))
ret = -EFAULT;
else
ret = retlen;
}
free_cpumask_var(mask);
return ret;
}
/**
* sys_sched_yield - yield the current processor to other threads.
*
* This function yields the current CPU to other tasks. If there are no
* other threads running on this CPU then this function will return.
*/
SYSCALL_DEFINE0(sched_yield)
{
struct rq *rq = this_rq_lock();
schedstat_inc(rq, yld_count);
current->sched_class->yield_task(rq);
/*
* Since we are going to call schedule() anyway, there's
* no need to preempt or enable interrupts:
*/
__release(rq->lock);
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
do_raw_spin_unlock(&rq->lock);
preempt_enable_no_resched();
schedule();
return 0;
}
static inline int should_resched(void)
{
return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
}
static void __cond_resched(void)
{
add_preempt_count(PREEMPT_ACTIVE);
__schedule();
sub_preempt_count(PREEMPT_ACTIVE);
}
int __sched _cond_resched(void)
{
if (should_resched()) {
__cond_resched();
return 1;
}
return 0;
}
EXPORT_SYMBOL(_cond_resched);
/*
* __cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock.
*
* This works OK both with and without CONFIG_PREEMPT. We do strange low-level
* operations here to prevent schedule() from being called twice (once via
* spin_unlock(), once by hand).
*/
int __cond_resched_lock(spinlock_t *lock)
{
int resched = should_resched();
int ret = 0;
lockdep_assert_held(lock);
if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
if (resched)
__cond_resched();
else
cpu_relax();
ret = 1;
spin_lock(lock);
}
return ret;
}
EXPORT_SYMBOL(__cond_resched_lock);
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
if (should_resched()) {
local_bh_enable();
__cond_resched();
local_bh_disable();
return 1;
}
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
/**
* yield - yield the current processor to other threads.
*
* This is a shortcut for kernel-space yielding - it marks the
* thread runnable and calls sys_sched_yield().
*/
void __sched yield(void)
{
set_current_state(TASK_RUNNING);
sys_sched_yield();
}
EXPORT_SYMBOL(yield);
/**
* yield_to - yield the current processor to another thread in
* your thread group, or accelerate that thread toward the
* processor it's on.
* @p: target task
* @preempt: whether task preemption is allowed or not
*
* It's the caller's job to ensure that the target task struct
* can't go away on us before we can do any checks.
*
* Returns true if we indeed boosted the target task.
*/
bool __sched yield_to(struct task_struct *p, bool preempt)
{
struct task_struct *curr = current;
struct rq *rq, *p_rq;
unsigned long flags;
bool yielded = 0;
local_irq_save(flags);
rq = this_rq();
again:
p_rq = task_rq(p);
double_rq_lock(rq, p_rq);
while (task_rq(p) != p_rq) {
double_rq_unlock(rq, p_rq);
goto again;
}
if (!curr->sched_class->yield_to_task)
goto out;
if (curr->sched_class != p->sched_class)
goto out;
if (task_running(p_rq, p) || p->state)
goto out;
yielded = curr->sched_class->yield_to_task(rq, p, preempt);
if (yielded) {
schedstat_inc(rq, yld_count);
/*
* Make p's CPU reschedule; pick_next_entity takes care of
* fairness.
*/
if (preempt && rq != p_rq)
resched_task(p_rq->curr);
}
out:
double_rq_unlock(rq, p_rq);
local_irq_restore(flags);
if (yielded)
schedule();
return yielded;
}
EXPORT_SYMBOL_GPL(yield_to);
/*
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
*/
void __sched io_schedule(void)
{
struct rq *rq = raw_rq();
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
blk_flush_plug(current);
current->in_iowait = 1;
schedule();
current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
}
EXPORT_SYMBOL(io_schedule);
long __sched io_schedule_timeout(long timeout)
{
struct rq *rq = raw_rq();
long ret;
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
blk_flush_plug(current);
current->in_iowait = 1;
ret = schedule_timeout(timeout);
current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
return ret;
}
/**
* sys_sched_get_priority_max - return maximum RT priority.
* @policy: scheduling class.
*
* this syscall returns the maximum rt_priority that can be used
* by a given scheduling class.
*/
SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
{
int ret = -EINVAL;
switch (policy) {
case SCHED_FIFO:
case SCHED_RR:
ret = MAX_USER_RT_PRIO-1;
break;
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
ret = 0;
break;
}
return ret;
}
/**
* sys_sched_get_priority_min - return minimum RT priority.
* @policy: scheduling class.
*
* this syscall returns the minimum rt_priority that can be used
* by a given scheduling class.
*/
SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
{
int ret = -EINVAL;
switch (policy) {
case SCHED_FIFO:
case SCHED_RR:
ret = 1;
break;
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
ret = 0;
}
return ret;
}
/**
* sys_sched_rr_get_interval - return the default timeslice of a process.
* @pid: pid of the process.
* @interval: userspace pointer to the timeslice value.
*
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
*/
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
struct timespec __user *, interval)
{
struct task_struct *p;
unsigned int time_slice;
unsigned long flags;
struct rq *rq;
int retval;
struct timespec t;
if (pid < 0)
return -EINVAL;
retval = -ESRCH;
rcu_read_lock();
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
rq = task_rq_lock(p, &flags);
time_slice = p->sched_class->get_rr_interval(rq, p);
task_rq_unlock(rq, p, &flags);
rcu_read_unlock();
jiffies_to_timespec(time_slice, &t);
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
return retval;
out_unlock:
rcu_read_unlock();
return retval;
}
static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
void sched_show_task(struct task_struct *p)
{
unsigned long free = 0;
unsigned state;
state = p->state ? __ffs(p->state) + 1 : 0;
printk(KERN_INFO "%-15.15s %c", p->comm,
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
#if BITS_PER_LONG == 32
if (state == TASK_RUNNING)
printk(KERN_CONT " running ");
else
printk(KERN_CONT " %08lx ", thread_saved_pc(p));
#else
if (state == TASK_RUNNING)
printk(KERN_CONT " running task ");
else
printk(KERN_CONT " %016lx ", thread_saved_pc(p));
#endif
#ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p);
#endif
printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
task_pid_nr(p), task_pid_nr(p->real_parent),
(unsigned long)task_thread_info(p)->flags);
show_stack(p, NULL);
}
void show_state_filter(unsigned long state_filter)
{
struct task_struct *g, *p;
#if BITS_PER_LONG == 32
printk(KERN_INFO
" task PC stack pid father\n");
#else
printk(KERN_INFO
" task PC stack pid father\n");
#endif
read_lock(&tasklist_lock);
do_each_thread(g, p) {
/*
* reset the NMI-timeout, listing all files on a slow
* console might take a lot of time:
*/
touch_nmi_watchdog();
if (!state_filter || (p->state & state_filter))
sched_show_task(p);
} while_each_thread(g, p);
touch_all_softlockup_watchdogs();
#ifdef CONFIG_SCHED_DEBUG
sysrq_sched_debug_show();
#endif
read_unlock(&tasklist_lock);
/*
* Only show locks if all tasks are dumped:
*/
if (!state_filter)
debug_show_all_locks();
}
void __cpuinit init_idle_bootup_task(struct task_struct *idle)
{
idle->sched_class = &idle_sched_class;
}
/**
* init_idle - set up an idle thread for a given CPU
* @idle: task in question
* @cpu: cpu the idle task belongs to
*
* NOTE: this function does not set the idle thread's NEED_RESCHED
* flag, to make booting more robust.
*/
void __cpuinit init_idle(struct task_struct *idle, int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
__sched_fork(idle);
idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
do_set_cpus_allowed(idle, cpumask_of(cpu));
/*
* We're having a chicken and egg problem, even though we are
* holding rq->lock, the cpu isn't yet set to this cpu so the
* lockdep check in task_group() will fail.
*
* Similar case to sched_fork(). / Alternatively we could
* use task_rq_lock() here and obtain the other rq->lock.
*
* Silence PROVE_RCU
*/
rcu_read_lock();
__set_task_cpu(idle, cpu);
rcu_read_unlock();
rq->curr = rq->idle = idle;
#if defined(CONFIG_SMP)
idle->on_cpu = 1;
#endif
raw_spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
task_thread_info(idle)->preempt_count = 0;
/*
* The idle tasks have their own, simple scheduling class:
*/
idle->sched_class = &idle_sched_class;
ftrace_graph_init_idle_task(idle, cpu);
}
/*
* In a system that switches off the HZ timer nohz_cpu_mask
* indicates which cpus entered this state. This is used
* in the rcu update to wait only for active cpus. For system
* which do not switch off the HZ timer nohz_cpu_mask should
* always be CPU_BITS_NONE.
*/
cpumask_var_t nohz_cpu_mask;
/*
* Increase the granularity value when there are more CPUs,
* because with more CPUs the 'effective latency' as visible
* to users decreases. But the relationship is not linear,
* so pick a second-best guess by going with the log2 of the
* number of CPUs.
*
* This idea comes from the SD scheduler of Con Kolivas:
*/
static int get_update_sysctl_factor(void)
{
unsigned int cpus = min_t(int, num_online_cpus(), 8);
unsigned int factor;
switch (sysctl_sched_tunable_scaling) {
case SCHED_TUNABLESCALING_NONE:
factor = 1;
break;
case SCHED_TUNABLESCALING_LINEAR:
factor = cpus;
break;
case SCHED_TUNABLESCALING_LOG:
default:
factor = 1 + ilog2(cpus);
break;
}
return factor;
}
static void update_sysctl(void)
{
unsigned int factor = get_update_sysctl_factor();
#define SET_SYSCTL(name) \
(sysctl_##name = (factor) * normalized_sysctl_##name)
SET_SYSCTL(sched_min_granularity);
SET_SYSCTL(sched_latency);
SET_SYSCTL(sched_wakeup_granularity);
#undef SET_SYSCTL
}
static inline void sched_init_granularity(void)
{
update_sysctl();
}
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
if (p->sched_class && p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);
else {
cpumask_copy(&p->cpus_allowed, new_mask);
p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
}
}
/*
* This is how migration works:
*
* 1) we invoke migration_cpu_stop() on the target CPU using
* stop_one_cpu().
* 2) stopper starts to run (implicitly forcing the migrated thread
* off the CPU)
* 3) it checks whether the migrated task is still in the wrong runqueue.
* 4) if it's in the wrong runqueue then the migration thread removes
* it and puts it into the right queue.
* 5) stopper completes and stop_one_cpu() returns and the migration
* is done.
*/
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
* is removed from the allowed bitmask.
*
* NOTE: the caller must have a valid reference to the task, the
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
unsigned long flags;
struct rq *rq;
unsigned int dest_cpu;
int ret = 0;
rq = task_rq_lock(p, &flags);
if (cpumask_equal(&p->cpus_allowed, new_mask))
goto out;
if (!cpumask_intersects(new_mask, cpu_active_mask)) {
ret = -EINVAL;
goto out;
}
if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
ret = -EINVAL;
goto out;
}
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
if (cpumask_test_cpu(task_cpu(p), new_mask))
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
if (p->on_rq) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, p, &flags);
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
return 0;
}
out:
task_rq_unlock(rq, p, &flags);
return ret;
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
/*
* Move (not current) task off this cpu, onto dest cpu. We're doing
* this because either it can't run here any more (set_cpus_allowed()
* away from this CPU, or CPU going down), or because we're
* attempting to rebalance this task on exec (sched_exec).
*
* So we race with normal scheduler movements, but that's OK, as long
* as the task is no longer on this CPU.
*
* Returns non-zero if task was successfully migrated.
*/
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
{
struct rq *rq_dest, *rq_src;
int ret = 0;
if (unlikely(!cpu_active(dest_cpu)))
return ret;
rq_src = cpu_rq(src_cpu);
rq_dest = cpu_rq(dest_cpu);
raw_spin_lock(&p->pi_lock);
double_rq_lock(rq_src, rq_dest);
/* Already moved. */
if (task_cpu(p) != src_cpu)
goto done;
/* Affinity changed (again). */
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
goto fail;
/*
* If we're not on a rq, the next wake-up will ensure we're
* placed properly.
*/
if (p->on_rq) {
deactivate_task(rq_src, p, 0);
set_task_cpu(p, dest_cpu);
activate_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p, 0);
}
done:
ret = 1;
fail:
double_rq_unlock(rq_src, rq_dest);
raw_spin_unlock(&p->pi_lock);
return ret;
}
/*
* migration_cpu_stop - this will be executed by a highprio stopper thread
* and performs thread migration by bumping thread off CPU then
* 'pushing' onto another runqueue.
*/
static int migration_cpu_stop(void *data)
{
struct migration_arg *arg = data;
/*
* The original target cpu might have gone down and we might
* be on another cpu but it doesn't matter.
*/
local_irq_disable();
__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
local_irq_enable();
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
*/
void idle_task_exit(void)
{
struct mm_struct *mm = current->active_mm;
BUG_ON(cpu_online(smp_processor_id()));
if (mm != &init_mm)
switch_mm(mm, &init_mm, current);
mmdrop(mm);
}
/*
* While a dead CPU has no uninterruptible tasks queued at this point,
* it might still have a nonzero ->nr_uninterruptible counter, because
* for performance reasons the counter is not stricly tracking tasks to
* their home CPUs. So we just add the counter to another CPU's counter,
* to keep the global sum constant after CPU-down:
*/
static void migrate_nr_uninterruptible(struct rq *rq_src)
{
struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
rq_src->nr_uninterruptible = 0;
}
/*
* remove the tasks which were accounted by rq from calc_load_tasks.
*/
static void calc_global_load_remove(struct rq *rq)
{
atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
rq->calc_load_active = 0;
}
/*
* Migrate all tasks from the rq, sleeping tasks will be migrated by
* try_to_wake_up()->select_task_rq().
*
* Called with rq->lock held even though we'er in stop_machine() and
* there's no concurrency possible, we hold the required locks anyway
* because of lock validation efforts.
*/
static void migrate_tasks(unsigned int dead_cpu)
{
struct rq *rq = cpu_rq(dead_cpu);
struct task_struct *next, *stop = rq->stop;
int dest_cpu;
/*
* Fudge the rq selection such that the below task selection loop
* doesn't get stuck on the currently eligible stop task.
*
* We're currently inside stop_machine() and the rq is either stuck
* in the stop_machine_cpu_stop() loop, or we're executing this code,
* either way we should never end up calling schedule() until we're
* done here.
*/
rq->stop = NULL;
for ( ; ; ) {
/*
* There's this thread running, bail when that's the only
* remaining thread.
*/
if (rq->nr_running == 1)
break;
next = pick_next_task(rq);
BUG_ON(!next);
next->sched_class->put_prev_task(rq, next);
/* Find suitable destination for @next, with force if needed. */
dest_cpu = select_fallback_rq(dead_cpu, next);
raw_spin_unlock(&rq->lock);
__migrate_task(next, dead_cpu, dest_cpu);
raw_spin_lock(&rq->lock);
}
rq->stop = stop;
}
#endif /* CONFIG_HOTPLUG_CPU */
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
static struct ctl_table sd_ctl_dir[] = {
{
.procname = "sched_domain",
.mode = 0555,
},
{}
};
static struct ctl_table sd_ctl_root[] = {
{
.procname = "kernel",
.mode = 0555,
.child = sd_ctl_dir,
},
{}
};
static struct ctl_table *sd_alloc_ctl_entry(int n)
{
struct ctl_table *entry =
kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
return entry;
}
static void sd_free_ctl_entry(struct ctl_table **tablep)
{
struct ctl_table *entry;
/*
* In the intermediate directories, both the child directory and
* procname are dynamically allocated and could fail but the mode
* will always be set. In the lowest directory the names are
* static strings and all have proc handlers.
*/
for (entry = *tablep; entry->mode; entry++) {
if (entry->child)
sd_free_ctl_entry(&entry->child);
if (entry->proc_handler == NULL)
kfree(entry->procname);
}
kfree(*tablep);
*tablep = NULL;
}
static void
set_table_entry(struct ctl_table *entry,
const char *procname, void *data, int maxlen,
mode_t mode, proc_handler *proc_handler)
{
entry->procname = procname;
entry->data = data;
entry->maxlen = maxlen;
entry->mode = mode;
entry->proc_handler = proc_handler;
}
static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
struct ctl_table *table = sd_alloc_ctl_entry(13);
if (table == NULL)
return NULL;
set_table_entry(&table[0], "min_interval", &sd->min_interval,
sizeof(long), 0644, proc_doulongvec_minmax);
set_table_entry(&table[1], "max_interval", &sd->max_interval,
sizeof(long), 0644, proc_doulongvec_minmax);
set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[9], "cache_nice_tries",
&sd->cache_nice_tries,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[10], "flags", &sd->flags,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[11], "name", sd->name,
CORENAME_MAX_SIZE, 0444, proc_dostring);
/* &table[12] is terminator */
return table;
}
static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
{
struct ctl_table *entry, *table;
struct sched_domain *sd;
int domain_num = 0, i;
char buf[32];
for_each_domain(cpu, sd)
domain_num++;
entry = table = sd_alloc_ctl_entry(domain_num + 1);
if (table == NULL)
return NULL;
i = 0;
for_each_domain(cpu, sd) {
snprintf(buf, 32, "domain%d", i);
entry->procname = kstrdup(buf, GFP_KERNEL);
entry->mode = 0555;
entry->child = sd_alloc_ctl_domain_table(sd);
entry++;
i++;
}
return table;
}
static struct ctl_table_header *sd_sysctl_header;
static void register_sched_domain_sysctl(void)
{
int i, cpu_num = num_possible_cpus();
struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
char buf[32];
WARN_ON(sd_ctl_dir[0].child);
sd_ctl_dir[0].child = entry;
if (entry == NULL)
return;
for_each_possible_cpu(i) {
snprintf(buf, 32, "cpu%d", i);
entry->procname = kstrdup(buf, GFP_KERNEL);
entry->mode = 0555;
entry->child = sd_alloc_ctl_cpu_table(i);
entry++;
}
WARN_ON(sd_sysctl_header);
sd_sysctl_header = register_sysctl_table(sd_ctl_root);
}
/* may be called multiple times per register */
static void unregister_sched_domain_sysctl(void)
{
if (sd_sysctl_header)
unregister_sysctl_table(sd_sysctl_header);
sd_sysctl_header = NULL;
if (sd_ctl_dir[0].child)
sd_free_ctl_entry(&sd_ctl_dir[0].child);
}
#else
static void register_sched_domain_sysctl(void)
{
}
static void unregister_sched_domain_sysctl(void)
{
}
#endif
static void set_rq_online(struct rq *rq)
{
if (!rq->online) {
const struct sched_class *class;
cpumask_set_cpu(rq->cpu, rq->rd->online);
rq->online = 1;
for_each_class(class) {
if (class->rq_online)
class->rq_online(rq);
}
}
}
static void set_rq_offline(struct rq *rq)
{
if (rq->online) {
const struct sched_class *class;
for_each_class(class) {
if (class->rq_offline)
class->rq_offline(rq);
}
cpumask_clear_cpu(rq->cpu, rq->rd->online);
rq->online = 0;
}
}
/*
* migration_call - callback that gets triggered when a CPU is added.
* Here we can start up the necessary migration thread for the new CPU.
*/
static int __cpuinit
migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int cpu = (long)hcpu;
unsigned long flags;
struct rq *rq = cpu_rq(cpu);
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
rq->calc_load_update = calc_load_update;
break;
case CPU_ONLINE:
/* Update our root-domain */
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_online(rq);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DYING:
sched_ttwu_pending();
/* Update our root-domain */
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
migrate_tasks(cpu);
BUG_ON(rq->nr_running != 1); /* the migration thread */
raw_spin_unlock_irqrestore(&rq->lock, flags);
migrate_nr_uninterruptible(rq);
calc_global_load_remove(rq);
break;
#endif
}
update_max_interval();
return NOTIFY_OK;
}
/*
* Register at high priority so that task migration (migrate_all_tasks)
* happens before everything else. This has to be lower priority than
* the notifier in the perf_event subsystem, though.
*/
static struct notifier_block __cpuinitdata migration_notifier = {
.notifier_call = migration_call,
.priority = CPU_PRI_MIGRATION,
};
static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
set_cpu_active((long)hcpu, true);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
set_cpu_active((long)hcpu, false);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
static int __init migration_init(void)
{
void *cpu = (void *)(long)smp_processor_id();
int err;
/* Initialize migration for the boot CPU */
err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
BUG_ON(err == NOTIFY_BAD);
migration_call(&migration_notifier, CPU_ONLINE, cpu);
register_cpu_notifier(&migration_notifier);
/* Register cpu active notifiers */
cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
return 0;
}
early_initcall(migration_init);
#endif
#ifdef CONFIG_SMP
static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
#ifdef CONFIG_SCHED_DEBUG
static __read_mostly int sched_domain_debug_enabled;
static int __init sched_domain_debug_setup(char *str)
{
sched_domain_debug_enabled = 1;
return 0;
}
early_param("sched_debug", sched_domain_debug_setup);
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
struct cpumask *groupmask)
{
struct sched_group *group = sd->groups;
char str[256];
cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
cpumask_clear(groupmask);
printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
if (!(sd->flags & SD_LOAD_BALANCE)) {
printk("does not load-balance\n");
if (sd->parent)
printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
" has parent");
return -1;
}
printk(KERN_CONT "span %s level %s\n", str, sd->name);
if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
printk(KERN_ERR "ERROR: domain->span does not contain "
"CPU%d\n", cpu);
}
if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
printk(KERN_ERR "ERROR: domain->groups does not contain"
" CPU%d\n", cpu);
}
printk(KERN_DEBUG "%*s groups:", level + 1, "");
do {
if (!group) {
printk("\n");
printk(KERN_ERR "ERROR: group is NULL\n");
break;
}
if (!group->sgp->power) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: domain->cpu_power not "
"set\n");
break;
}
if (!cpumask_weight(sched_group_cpus(group))) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: empty group\n");
break;
}
if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: repeated CPUs\n");
break;
}
cpumask_or(groupmask, groupmask, sched_group_cpus(group));
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
printk(KERN_CONT " %s", str);
if (group->sgp->power != SCHED_POWER_SCALE) {
printk(KERN_CONT " (cpu_power = %d)",
group->sgp->power);
}
group = group->next;
} while (group != sd->groups);
printk(KERN_CONT "\n");
if (!cpumask_equal(sched_domain_span(sd), groupmask))
printk(KERN_ERR "ERROR: groups don't span domain->span\n");
if (sd->parent &&
!cpumask_subset(groupmask, sched_domain_span(sd->parent)))
printk(KERN_ERR "ERROR: parent span is not a superset "
"of domain->span\n");
return 0;
}
static void sched_domain_debug(struct sched_domain *sd, int cpu)
{
int level = 0;
if (!sched_domain_debug_enabled)
return;
if (!sd) {
printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
return;
}
printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
for (;;) {
if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
break;
level++;
sd = sd->parent;
if (!sd)
break;
}
}
#else /* !CONFIG_SCHED_DEBUG */
# define sched_domain_debug(sd, cpu) do { } while (0)
#endif /* CONFIG_SCHED_DEBUG */
static int sd_degenerate(struct sched_domain *sd)
{
if (cpumask_weight(sched_domain_span(sd)) == 1)
return 1;
/* Following flags need at least 2 groups */
if (sd->flags & (SD_LOAD_BALANCE |
SD_BALANCE_NEWIDLE |
SD_BALANCE_FORK |
SD_BALANCE_EXEC |
SD_SHARE_CPUPOWER |
SD_SHARE_PKG_RESOURCES)) {
if (sd->groups != sd->groups->next)
return 0;
}
/* Following flags don't use groups */
if (sd->flags & (SD_WAKE_AFFINE))
return 0;
return 1;
}
static int
sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
{
unsigned long cflags = sd->flags, pflags = parent->flags;
if (sd_degenerate(parent))
return 1;
if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
return 0;
/* Flags needing groups don't count if only 1 group in parent */
if (parent->groups == parent->groups->next) {
pflags &= ~(SD_LOAD_BALANCE |
SD_BALANCE_NEWIDLE |
SD_BALANCE_FORK |
SD_BALANCE_EXEC |
SD_SHARE_CPUPOWER |
SD_SHARE_PKG_RESOURCES);
if (nr_node_ids == 1)
pflags &= ~SD_SERIALIZE;
}
if (~cflags & pflags)
return 0;
return 1;
}
static void free_rootdomain(struct rcu_head *rcu)
{
struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
cpupri_cleanup(&rd->cpupri);
free_cpumask_var(rd->rto_mask);
free_cpumask_var(rd->online);
free_cpumask_var(rd->span);
kfree(rd);
}
static void rq_attach_root(struct rq *rq, struct root_domain *rd)
{
struct root_domain *old_rd = NULL;
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
old_rd = rq->rd;
if (cpumask_test_cpu(rq->cpu, old_rd->online))
set_rq_offline(rq);
cpumask_clear_cpu(rq->cpu, old_rd->span);
/*
* If we dont want to free the old_rt yet then
* set old_rd to NULL to skip the freeing later
* in this function:
*/
if (!atomic_dec_and_test(&old_rd->refcount))
old_rd = NULL;
}
atomic_inc(&rd->refcount);
rq->rd = rd;
cpumask_set_cpu(rq->cpu, rd->span);
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
if (old_rd)
call_rcu_sched(&old_rd->rcu, free_rootdomain);
}
static int init_rootdomain(struct root_domain *rd)
{
memset(rd, 0, sizeof(*rd));
if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
goto out;
if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
goto free_span;
if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
goto free_online;
if (cpupri_init(&rd->cpupri) != 0)
goto free_rto_mask;
return 0;
free_rto_mask:
free_cpumask_var(rd->rto_mask);
free_online:
free_cpumask_var(rd->online);
free_span:
free_cpumask_var(rd->span);
out:
return -ENOMEM;
}
static void init_defrootdomain(void)
{
init_rootdomain(&def_root_domain);
atomic_set(&def_root_domain.refcount, 1);
}
static struct root_domain *alloc_rootdomain(void)
{
struct root_domain *rd;
rd = kmalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return NULL;
if (init_rootdomain(rd) != 0) {
kfree(rd);
return NULL;
}
return rd;
}
static void free_sched_groups(struct sched_group *sg, int free_sgp)
{
struct sched_group *tmp, *first;
if (!sg)
return;
first = sg;
do {
tmp = sg->next;
if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
kfree(sg->sgp);
kfree(sg);
sg = tmp;
} while (sg != first);
}
static void free_sched_domain(struct rcu_head *rcu)
{
struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
/*
* If its an overlapping domain it has private groups, iterate and
* nuke them all.
*/
if (sd->flags & SD_OVERLAP) {
free_sched_groups(sd->groups, 1);
} else if (atomic_dec_and_test(&sd->groups->ref)) {
kfree(sd->groups->sgp);
kfree(sd->groups);
}
kfree(sd);
}
static void destroy_sched_domain(struct sched_domain *sd, int cpu)
{
call_rcu(&sd->rcu, free_sched_domain);
}
static void destroy_sched_domains(struct sched_domain *sd, int cpu)
{
for (; sd; sd = sd->parent)
destroy_sched_domain(sd, cpu);
}
/*
* Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* hold the hotplug lock.
*/
static void
cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
/* Remove the sched domains which do not contribute to scheduling. */
for (tmp = sd; tmp; ) {
struct sched_domain *parent = tmp->parent;
if (!parent)
break;
if (sd_parent_degenerate(tmp, parent)) {
tmp->parent = parent->parent;
if (parent->parent)
parent->parent->child = tmp;
destroy_sched_domain(parent, cpu);
} else
tmp = tmp->parent;
}
if (sd && sd_degenerate(sd)) {
tmp = sd;
sd = sd->parent;
destroy_sched_domain(tmp, cpu);
if (sd)
sd->child = NULL;
}
sched_domain_debug(sd, cpu);
rq_attach_root(rq, rd);
tmp = rq->sd;
rcu_assign_pointer(rq->sd, sd);
destroy_sched_domains(tmp, cpu);
}
/* cpus with isolated domains */
static cpumask_var_t cpu_isolated_map;
/* Setup the mask of cpus configured for isolated domains */
static int __init isolated_cpu_setup(char *str)
{
alloc_bootmem_cpumask_var(&cpu_isolated_map);
cpulist_parse(str, cpu_isolated_map);
return 1;
}
__setup("isolcpus=", isolated_cpu_setup);
#define SD_NODES_PER_DOMAIN 16
#ifdef CONFIG_NUMA
/**
* find_next_best_node - find the next node to include in a sched_domain
* @node: node whose sched_domain we're building
* @used_nodes: nodes already in the sched_domain
*
* Find the next node to include in a given scheduling domain. Simply
* finds the closest node not already in the @used_nodes map.
*
* Should use nodemask_t.
*/
static int find_next_best_node(int node, nodemask_t *used_nodes)
{
int i, n, val, min_val, best_node = -1;
min_val = INT_MAX;
for (i = 0; i < nr_node_ids; i++) {
/* Start at @node */
n = (node + i) % nr_node_ids;
if (!nr_cpus_node(n))
continue;
/* Skip already used nodes */
if (node_isset(n, *used_nodes))
continue;
/* Simple min distance search */
val = node_distance(node, n);
if (val < min_val) {
min_val = val;
best_node = n;
}
}
if (best_node != -1)
node_set(best_node, *used_nodes);
return best_node;
}
/**
* sched_domain_node_span - get a cpumask for a node's sched_domain
* @node: node whose cpumask we're constructing
* @span: resulting cpumask
*
* Given a node, construct a good cpumask for its sched_domain to span. It
* should be one that prevents unnecessary balancing, but also spreads tasks
* out optimally.
*/
static void sched_domain_node_span(int node, struct cpumask *span)
{
nodemask_t used_nodes;
int i;
cpumask_clear(span);
nodes_clear(used_nodes);
cpumask_or(span, span, cpumask_of_node(node));
node_set(node, used_nodes);
for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
int next_node = find_next_best_node(node, &used_nodes);
if (next_node < 0)
break;
cpumask_or(span, span, cpumask_of_node(next_node));
}
}
static const struct cpumask *cpu_node_mask(int cpu)
{
lockdep_assert_held(&sched_domains_mutex);
sched_domain_node_span(cpu_to_node(cpu), sched_domains_tmpmask);
return sched_domains_tmpmask;
}
static const struct cpumask *cpu_allnodes_mask(int cpu)
{
return cpu_possible_mask;
}
#endif /* CONFIG_NUMA */
static const struct cpumask *cpu_cpu_mask(int cpu)
{
return cpumask_of_node(cpu_to_node(cpu));
}
int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
struct sd_data {
struct sched_domain **__percpu sd;
struct sched_group **__percpu sg;
struct sched_group_power **__percpu sgp;
};
struct s_data {
struct sched_domain ** __percpu sd;
struct root_domain *rd;
};
enum s_alloc {
sa_rootdomain,
sa_sd,
sa_sd_storage,
sa_none,
};
struct sched_domain_topology_level;
typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
#define SDTL_OVERLAP 0x01
struct sched_domain_topology_level {
sched_domain_init_f init;
sched_domain_mask_f mask;
int flags;
struct sd_data data;
};
static int
build_overlap_sched_groups(struct sched_domain *sd, int cpu)
{
struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
const struct cpumask *span = sched_domain_span(sd);
struct cpumask *covered = sched_domains_tmpmask;
struct sd_data *sdd = sd->private;
struct sched_domain *child;
int i;
cpumask_clear(covered);
for_each_cpu(i, span) {
struct cpumask *sg_span;
if (cpumask_test_cpu(i, covered))
continue;
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, cpu_to_node(i));
if (!sg)
goto fail;
sg_span = sched_group_cpus(sg);
child = *per_cpu_ptr(sdd->sd, i);
if (child->child) {
child = child->child;
cpumask_copy(sg_span, sched_domain_span(child));
} else
cpumask_set_cpu(i, sg_span);
cpumask_or(covered, covered, sg_span);
sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
atomic_inc(&sg->sgp->ref);
if (cpumask_test_cpu(cpu, sg_span))
groups = sg;
if (!first)
first = sg;
if (last)
last->next = sg;
last = sg;
last->next = first;
}
sd->groups = groups;
return 0;
fail:
free_sched_groups(first, 0);
return -ENOMEM;
}
static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
{
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
struct sched_domain *child = sd->child;
if (child)
cpu = cpumask_first(sched_domain_span(child));
if (sg) {
*sg = *per_cpu_ptr(sdd->sg, cpu);
(*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
}
return cpu;
}
/*
* build_sched_groups will build a circular linked list of the groups
* covered by the given span, and will set each group's ->cpumask correctly,
* and ->cpu_power to 0.
*
* Assumes the sched_domain tree is fully constructed
*/
static int
build_sched_groups(struct sched_domain *sd, int cpu)
{
struct sched_group *first = NULL, *last = NULL;
struct sd_data *sdd = sd->private;
const struct cpumask *span = sched_domain_span(sd);
struct cpumask *covered;
int i;
get_group(cpu, sdd, &sd->groups);
atomic_inc(&sd->groups->ref);
if (cpu != cpumask_first(sched_domain_span(sd)))
return 0;
lockdep_assert_held(&sched_domains_mutex);
covered = sched_domains_tmpmask;
cpumask_clear(covered);
for_each_cpu(i, span) {
struct sched_group *sg;
int group = get_group(i, sdd, &sg);
int j;
if (cpumask_test_cpu(i, covered))
continue;
cpumask_clear(sched_group_cpus(sg));
sg->sgp->power = 0;
for_each_cpu(j, span) {
if (get_group(j, sdd, NULL) != group)
continue;
cpumask_set_cpu(j, covered);
cpumask_set_cpu(j, sched_group_cpus(sg));
}
if (!first)
first = sg;
if (last)
last->next = sg;
last = sg;
}
last->next = first;
return 0;
}
/*
* Initialize sched groups cpu_power.
*
* cpu_power indicates the capacity of sched group, which is used while
* distributing the load between different sched groups in a sched domain.
* Typically cpu_power for all the groups in a sched domain will be same unless
* there are asymmetries in the topology. If there are asymmetries, group
* having more cpu_power will pickup more load compared to the group having
* less cpu_power.
*/
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
{
struct sched_group *sg = sd->groups;
WARN_ON(!sd || !sg);
do {
sg->group_weight = cpumask_weight(sched_group_cpus(sg));
sg = sg->next;
} while (sg != sd->groups);
if (cpu != group_first_cpu(sg))
return;
update_group_power(sd, cpu);
}
/*
* Initializers for schedule domains
* Non-inlined to reduce accumulated stack pressure in build_sched_domains()
*/
#ifdef CONFIG_SCHED_DEBUG
# define SD_INIT_NAME(sd, type) sd->name = #type
#else
# define SD_INIT_NAME(sd, type) do { } while (0)
#endif
#define SD_INIT_FUNC(type) \
static noinline struct sched_domain * \
sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
{ \
struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
*sd = SD_##type##_INIT; \
SD_INIT_NAME(sd, type); \
sd->private = &tl->data; \
return sd; \
}
SD_INIT_FUNC(CPU)
#ifdef CONFIG_NUMA
SD_INIT_FUNC(ALLNODES)
SD_INIT_FUNC(NODE)
#endif
#ifdef CONFIG_SCHED_SMT
SD_INIT_FUNC(SIBLING)
#endif
#ifdef CONFIG_SCHED_MC
SD_INIT_FUNC(MC)
#endif
#ifdef CONFIG_SCHED_BOOK
SD_INIT_FUNC(BOOK)
#endif
static int default_relax_domain_level = -1;
int sched_domain_level_max;
static int __init setup_relax_domain_level(char *str)
{
unsigned long val;
val = simple_strtoul(str, NULL, 0);
if (val < sched_domain_level_max)
default_relax_domain_level = val;
return 1;
}
__setup("relax_domain_level=", setup_relax_domain_level);
static void set_domain_attribute(struct sched_domain *sd,
struct sched_domain_attr *attr)
{
int request;
if (!attr || attr->relax_domain_level < 0) {
if (default_relax_domain_level < 0)
return;
else
request = default_relax_domain_level;
} else
request = attr->relax_domain_level;
if (request < sd->level) {
/* turn off idle balance on this domain */
sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
} else {
/* turn on idle balance on this domain */
sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
}
}
static void __sdt_free(const struct cpumask *cpu_map);
static int __sdt_alloc(const struct cpumask *cpu_map);
static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
const struct cpumask *cpu_map)
{
switch (what) {
case sa_rootdomain:
if (!atomic_read(&d->rd->refcount))
free_rootdomain(&d->rd->rcu); /* fall through */
case sa_sd:
free_percpu(d->sd); /* fall through */
case sa_sd_storage:
__sdt_free(cpu_map); /* fall through */
case sa_none:
break;
}
}
static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
const struct cpumask *cpu_map)
{
memset(d, 0, sizeof(*d));
if (__sdt_alloc(cpu_map))
return sa_sd_storage;
d->sd = alloc_percpu(struct sched_domain *);
if (!d->sd)
return sa_sd_storage;
d->rd = alloc_rootdomain();
if (!d->rd)
return sa_sd;
return sa_rootdomain;
}
/*
* NULL the sd_data elements we've used to build the sched_domain and
* sched_group structure so that the subsequent __free_domain_allocs()
* will not free the data we're using.
*/
static void claim_allocations(int cpu, struct sched_domain *sd)
{
struct sd_data *sdd = sd->private;
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
*per_cpu_ptr(sdd->sd, cpu) = NULL;
if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
*per_cpu_ptr(sdd->sg, cpu) = NULL;
if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
*per_cpu_ptr(sdd->sgp, cpu) = NULL;
}
#ifdef CONFIG_SCHED_SMT
static const struct cpumask *cpu_smt_mask(int cpu)
{
return topology_thread_cpumask(cpu);
}
#endif
/*
* Topology list, bottom-up.
*/
static struct sched_domain_topology_level default_topology[] = {
#ifdef CONFIG_SCHED_SMT
{ sd_init_SIBLING, cpu_smt_mask, },
#endif
#ifdef CONFIG_SCHED_MC
{ sd_init_MC, cpu_coregroup_mask, },
#endif
#ifdef CONFIG_SCHED_BOOK
{ sd_init_BOOK, cpu_book_mask, },
#endif
{ sd_init_CPU, cpu_cpu_mask, },
#ifdef CONFIG_NUMA
{ sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
{ sd_init_ALLNODES, cpu_allnodes_mask, },
#endif
{ NULL, },
};
static struct sched_domain_topology_level *sched_domain_topology = default_topology;
static int __sdt_alloc(const struct cpumask *cpu_map)
{
struct sched_domain_topology_level *tl;
int j;
for (tl = sched_domain_topology; tl->init; tl++) {
struct sd_data *sdd = &tl->data;
sdd->sd = alloc_percpu(struct sched_domain *);
if (!sdd->sd)
return -ENOMEM;
sdd->sg = alloc_percpu(struct sched_group *);
if (!sdd->sg)
return -ENOMEM;
sdd->sgp = alloc_percpu(struct sched_group_power *);
if (!sdd->sgp)
return -ENOMEM;
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
struct sched_group *sg;
struct sched_group_power *sgp;
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sd)
return -ENOMEM;
*per_cpu_ptr(sdd->sd, j) = sd;
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sg)
return -ENOMEM;
*per_cpu_ptr(sdd->sg, j) = sg;
sgp = kzalloc_node(sizeof(struct sched_group_power),
GFP_KERNEL, cpu_to_node(j));
if (!sgp)
return -ENOMEM;
*per_cpu_ptr(sdd->sgp, j) = sgp;
}
}
return 0;
}
static void __sdt_free(const struct cpumask *cpu_map)
{
struct sched_domain_topology_level *tl;
int j;
for (tl = sched_domain_topology; tl->init; tl++) {
struct sd_data *sdd = &tl->data;
for_each_cpu(j, cpu_map) {
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
if (sd && (sd->flags & SD_OVERLAP))
free_sched_groups(sd->groups, 0);
kfree(*per_cpu_ptr(sdd->sd, j));
kfree(*per_cpu_ptr(sdd->sg, j));
kfree(*per_cpu_ptr(sdd->sgp, j));
}
free_percpu(sdd->sd);
free_percpu(sdd->sg);
free_percpu(sdd->sgp);
}
}
struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
struct s_data *d, const struct cpumask *cpu_map,
struct sched_domain_attr *attr, struct sched_domain *child,
int cpu)
{
struct sched_domain *sd = tl->init(tl, cpu);
if (!sd)
return child;
set_domain_attribute(sd, attr);
cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
if (child) {
sd->level = child->level + 1;
sched_domain_level_max = max(sched_domain_level_max, sd->level);
child->parent = sd;
}
sd->child = child;
return sd;
}
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
*/
static int build_sched_domains(const struct cpumask *cpu_map,
struct sched_domain_attr *attr)
{
enum s_alloc alloc_state = sa_none;
struct sched_domain *sd;
struct s_data d;
int i, ret = -ENOMEM;
alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
if (alloc_state != sa_rootdomain)
goto error;
/* Set up domains for cpus specified by the cpu_map. */
for_each_cpu(i, cpu_map) {
struct sched_domain_topology_level *tl;
sd = NULL;
for (tl = sched_domain_topology; tl->init; tl++) {
sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
sd->flags |= SD_OVERLAP;
if (cpumask_equal(cpu_map, sched_domain_span(sd)))
break;
}
while (sd->child)
sd = sd->child;
*per_cpu_ptr(d.sd, i) = sd;
}
/* Build the groups for the domains */
for_each_cpu(i, cpu_map) {
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
sd->span_weight = cpumask_weight(sched_domain_span(sd));
if (sd->flags & SD_OVERLAP) {
if (build_overlap_sched_groups(sd, i))
goto error;
} else {
if (build_sched_groups(sd, i))
goto error;
}
}
}
/* Calculate CPU power for physical packages and nodes */
for (i = nr_cpumask_bits-1; i >= 0; i--) {
if (!cpumask_test_cpu(i, cpu_map))
continue;
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
claim_allocations(i, sd);
init_sched_groups_power(i, sd);
}
}
/* Attach the domains */
rcu_read_lock();
for_each_cpu(i, cpu_map) {
sd = *per_cpu_ptr(d.sd, i);
cpu_attach_domain(sd, d.rd, i);
}
rcu_read_unlock();
ret = 0;
error:
__free_domain_allocs(&d, alloc_state, cpu_map);
return ret;
}
static cpumask_var_t *doms_cur; /* current sched domains */
static int ndoms_cur; /* number of sched domains in 'doms_cur' */
static struct sched_domain_attr *dattr_cur;
/* attribues of custom domains in 'doms_cur' */
/*
* Special case: If a kmalloc of a doms_cur partition (array of
* cpumask) fails, then fallback to a single sched domain,
* as determined by the single cpumask fallback_doms.
*/
static cpumask_var_t fallback_doms;
/*
* arch_update_cpu_topology lets virtualized architectures update the
* cpu core maps. It is supposed to return 1 if the topology changed
* or 0 if it stayed the same.
*/
int __attribute__((weak)) arch_update_cpu_topology(void)
{
return 0;
}
cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
{
int i;
cpumask_var_t *doms;
doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
if (!doms)
return NULL;
for (i = 0; i < ndoms; i++) {
if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
free_sched_domains(doms, i);
return NULL;
}
}
return doms;
}
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
{
unsigned int i;
for (i = 0; i < ndoms; i++)
free_cpumask_var(doms[i]);
kfree(doms);
}
/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to
* exclude other special cases in the future.
*/
static int init_sched_domains(const struct cpumask *cpu_map)
{
int err;
arch_update_cpu_topology();
ndoms_cur = 1;
doms_cur = alloc_sched_domains(ndoms_cur);
if (!doms_cur)
doms_cur = &fallback_doms;
cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
dattr_cur = NULL;
err = build_sched_domains(doms_cur[0], NULL);
register_sched_domain_sysctl();
return err;
}
/*
* Detach sched domains from a group of cpus specified in cpu_map
* These cpus will now be attached to the NULL domain
*/
static void detach_destroy_domains(const struct cpumask *cpu_map)
{
int i;
rcu_read_lock();
for_each_cpu(i, cpu_map)
cpu_attach_domain(NULL, &def_root_domain, i);
rcu_read_unlock();
}
/* handle null as "default" */
static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
struct sched_domain_attr *new, int idx_new)
{
struct sched_domain_attr tmp;
/* fast path */
if (!new && !cur)
return 1;
tmp = SD_ATTR_INIT;
return !memcmp(cur ? (cur + idx_cur) : &tmp,
new ? (new + idx_new) : &tmp,
sizeof(struct sched_domain_attr));
}
/*
* Partition sched domains as specified by the 'ndoms_new'
* cpumasks in the array doms_new[] of cpumasks. This compares
* doms_new[] to the current sched domain partitioning, doms_cur[].
* It destroys each deleted domain and builds each new domain.
*
* 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
* The masks don't intersect (don't overlap.) We should setup one
* sched domain for each mask. CPUs not in any of the cpumasks will
* not be load balanced. If the same cpumask appears both in the
* current 'doms_cur' domains and in the new 'doms_new', we can leave
* it as it is.
*
* The passed in 'doms_new' should be allocated using
* alloc_sched_domains. This routine takes ownership of it and will
* free_sched_domains it when done with it. If the caller failed the
* alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
* and partition_sched_domains() will fallback to the single partition
* 'fallback_doms', it also forces the domains to be rebuilt.
*
* If doms_new == NULL it will be replaced with cpu_online_mask.
* ndoms_new == 0 is a special case for destroying existing domains,
* and it will not create the default domain.
*
* Call with hotplug lock held
*/
void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
int i, j, n;
int new_topology;
mutex_lock(&sched_domains_mutex);
/* always unregister in case we don't destroy any domains */
unregister_sched_domain_sysctl();
/* Let architecture update cpu core mappings. */
new_topology = arch_update_cpu_topology();
n = doms_new ? ndoms_new : 0;
/* Destroy deleted domains */
for (i = 0; i < ndoms_cur; i++) {
for (j = 0; j < n && !new_topology; j++) {
if (cpumask_equal(doms_cur[i], doms_new[j])
&& dattrs_equal(dattr_cur, i, dattr_new, j))
goto match1;
}
/* no match - a current sched domain not in new doms_new[] */
detach_destroy_domains(doms_cur[i]);
match1:
;
}
if (doms_new == NULL) {
ndoms_cur = 0;
doms_new = &fallback_doms;
cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
WARN_ON_ONCE(dattr_new);
}
/* Build new domains */
for (i = 0; i < ndoms_new; i++) {
for (j = 0; j < ndoms_cur && !new_topology; j++) {
if (cpumask_equal(doms_new[i], doms_cur[j])
&& dattrs_equal(dattr_new, i, dattr_cur, j))
goto match2;
}
/* no match - add a new doms_new */
build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
match2:
;
}
/* Remember the new sched domains */
if (doms_cur != &fallback_doms)
free_sched_domains(doms_cur, ndoms_cur);
kfree(dattr_cur); /* kfree(NULL) is safe */
doms_cur = doms_new;
dattr_cur = dattr_new;
ndoms_cur = ndoms_new;
register_sched_domain_sysctl();
mutex_unlock(&sched_domains_mutex);
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
static void reinit_sched_domains(void)
{
get_online_cpus();
/* Destroy domains first to force the rebuild */
partition_sched_domains(0, NULL, NULL);
rebuild_sched_domains();
put_online_cpus();
}
static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
{
unsigned int level = 0;
if (sscanf(buf, "%u", &level) != 1)
return -EINVAL;
/*
* level is always be positive so don't check for
* level < POWERSAVINGS_BALANCE_NONE which is 0
* What happens on 0 or 1 byte write,
* need to check for count as well?
*/
if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
return -EINVAL;
if (smt)
sched_smt_power_savings = level;
else
sched_mc_power_savings = level;
reinit_sched_domains();
return count;
}
#ifdef CONFIG_SCHED_MC
static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
struct sysdev_class_attribute *attr,
char *page)
{
return sprintf(page, "%u\n", sched_mc_power_savings);
}
static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
struct sysdev_class_attribute *attr,
const char *buf, size_t count)
{
return sched_power_savings_store(buf, count, 0);
}
static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
sched_mc_power_savings_show,
sched_mc_power_savings_store);
#endif
#ifdef CONFIG_SCHED_SMT
static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
struct sysdev_class_attribute *attr,
char *page)
{
return sprintf(page, "%u\n", sched_smt_power_savings);
}
static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
struct sysdev_class_attribute *attr,
const char *buf, size_t count)
{
return sched_power_savings_store(buf, count, 1);
}
static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
sched_smt_power_savings_show,
sched_smt_power_savings_store);
#endif
int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
{
int err = 0;
#ifdef CONFIG_SCHED_SMT
if (smt_capable())
err = sysfs_create_file(&cls->kset.kobj,
&attr_sched_smt_power_savings.attr);
#endif
#ifdef CONFIG_SCHED_MC
if (!err && mc_capable())
err = sysfs_create_file(&cls->kset.kobj,
&attr_sched_mc_power_savings.attr);
#endif
return err;
}
#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
/*
* Update cpusets according to cpu_active mask. If cpusets are
* disabled, cpuset_update_active_cpus() becomes a simple wrapper
* around partition_sched_domains().
*/
static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
cpuset_update_active_cpus();
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
cpuset_update_active_cpus();
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
static int update_runtime(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
int cpu = (int)(long)hcpu;
switch (action) {
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
disable_runtime(cpu_rq(cpu));
return NOTIFY_OK;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
enable_runtime(cpu_rq(cpu));
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
void __init sched_init_smp(void)
{
cpumask_var_t non_isolated_cpus;
alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
get_online_cpus();
mutex_lock(&sched_domains_mutex);
init_sched_domains(cpu_active_mask);
cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
if (cpumask_empty(non_isolated_cpus))
cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
mutex_unlock(&sched_domains_mutex);
put_online_cpus();
hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
/* RT runtime code needs to handle some hotplug events */
hotcpu_notifier(update_runtime, 0);
init_hrtick();
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
BUG();
sched_init_granularity();
free_cpumask_var(non_isolated_cpus);
init_sched_rt_class();
}
#else
void __init sched_init_smp(void)
{
sched_init_granularity();
}
#endif /* CONFIG_SMP */
const_debug unsigned int sysctl_timer_migration = 1;
int in_sched_functions(unsigned long addr)
{
return in_lock_functions(addr) ||
(addr >= (unsigned long)__sched_text_start
&& addr < (unsigned long)__sched_text_end);
}
static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
{
cfs_rq->tasks_timeline = RB_ROOT;
INIT_LIST_HEAD(&cfs_rq->tasks);
#ifdef CONFIG_FAIR_GROUP_SCHED
cfs_rq->rq = rq;
/* allow initial update_cfs_load() to truncate */
#ifdef CONFIG_SMP
cfs_rq->load_stamp = 1;
#endif
#endif
cfs_rq->min_vruntime = (u64)(-(1LL << 20));
#ifndef CONFIG_64BIT
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
}
static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
{
struct rt_prio_array *array;
int i;
array = &rt_rq->active;
for (i = 0; i < MAX_RT_PRIO; i++) {
INIT_LIST_HEAD(array->queue + i);
__clear_bit(i, array->bitmap);
}
/* delimiter for bitsearch: */
__set_bit(MAX_RT_PRIO, array->bitmap);
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
rt_rq->highest_prio.curr = MAX_RT_PRIO;
#ifdef CONFIG_SMP
rt_rq->highest_prio.next = MAX_RT_PRIO;
#endif
#endif
#ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
plist_head_init(&rt_rq->pushable_tasks);
#endif
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq->rt_nr_boosted = 0;
rt_rq->rq = rq;
#endif
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu,
struct sched_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
tg->cfs_rq[cpu] = cfs_rq;
init_cfs_rq(cfs_rq, rq);
cfs_rq->tg = tg;
tg->se[cpu] = se;
/* se could be NULL for root_task_group */
if (!se)
return;
if (!parent)
se->cfs_rq = &rq->cfs;
else
se->cfs_rq = parent->my_q;
se->my_q = cfs_rq;
update_load_set(&se->load, 0);
se->parent = parent;
}
#endif
#ifdef CONFIG_RT_GROUP_SCHED
static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
struct sched_rt_entity *rt_se, int cpu,
struct sched_rt_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
tg->rt_rq[cpu] = rt_rq;
init_rt_rq(rt_rq, rq);
rt_rq->tg = tg;
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
tg->rt_se[cpu] = rt_se;
if (!rt_se)
return;
if (!parent)
rt_se->rt_rq = &rq->rt;
else
rt_se->rt_rq = parent->my_q;
rt_se->my_q = rt_rq;
rt_se->parent = parent;
INIT_LIST_HEAD(&rt_se->run_list);
}
#endif
void __init sched_init(void)
{
int i, j;
unsigned long alloc_size = 0, ptr;
#ifdef CONFIG_FAIR_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_RT_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_CPUMASK_OFFSTACK
alloc_size += num_possible_cpus() * cpumask_size();
#endif
if (alloc_size) {
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
#ifdef CONFIG_FAIR_GROUP_SCHED
root_task_group.se = (struct sched_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
root_task_group.cfs_rq = (struct cfs_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
root_task_group.rt_se = (struct sched_rt_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
root_task_group.rt_rq = (struct rt_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CPUMASK_OFFSTACK
for_each_possible_cpu(i) {
per_cpu(load_balance_tmpmask, i) = (void *)ptr;
ptr += cpumask_size();
}
#endif /* CONFIG_CPUMASK_OFFSTACK */
}
#ifdef CONFIG_SMP
init_defrootdomain();
#endif
init_rt_bandwidth(&def_rt_bandwidth,
global_rt_period(), global_rt_runtime());
#ifdef CONFIG_RT_GROUP_SCHED
init_rt_bandwidth(&root_task_group.rt_bandwidth,
global_rt_period(), global_rt_runtime());
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CGROUP_SCHED
list_add(&root_task_group.list, &task_groups);
INIT_LIST_HEAD(&root_task_group.children);
autogroup_init(&init_task);
#endif /* CONFIG_CGROUP_SCHED */
for_each_possible_cpu(i) {
struct rq *rq;
rq = cpu_rq(i);
raw_spin_lock_init(&rq->lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
init_cfs_rq(&rq->cfs, rq);
init_rt_rq(&rq->rt, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
root_task_group.shares = root_task_group_load;
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
/*
* How much cpu bandwidth does root_task_group get?
*
* In case of task-groups formed thr' the cgroup filesystem, it
* gets 100% of the cpu resources in the system. This overall
* system cpu resource is divided among the tasks of
* root_task_group and its child task-groups in a fair manner,
* based on each entity's (task or task-group's) weight
* (se->load.weight).
*
* In other words, if root_task_group has 10 tasks of weight
* 1024) and two child groups A0 and A1 (of weight 1024 each),
* then A0's share of the cpu resource is:
*
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
*
* We achieve this by letting root_task_group's tasks sit
* directly in rq->cfs (i.e root_task_group->se[] = NULL).
*/
init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
#ifdef CONFIG_RT_GROUP_SCHED
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
#endif
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
rq->cpu_load[j] = 0;
rq->last_load_update_tick = jiffies;
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
rq->cpu_power = SCHED_POWER_SCALE;
rq->post_schedule = 0;
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
rq->cpu = i;
rq->online = 0;
rq->idle_stamp = 0;
rq->avg_idle = 2*sysctl_sched_migration_cost;
rq_attach_root(rq, &def_root_domain);
#ifdef CONFIG_NO_HZ
rq->nohz_balance_kick = 0;
init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i));
#endif
#endif
init_rq_hrtick(rq);
atomic_set(&rq->nr_iowait, 0);
}
set_load_weight(&init_task);
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&init_task.preempt_notifiers);
#endif
#ifdef CONFIG_SMP
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
#endif
#ifdef CONFIG_RT_MUTEXES
plist_head_init(&init_task.pi_waiters);
#endif
/*
* The boot idle thread does lazy MMU switching as well:
*/
atomic_inc(&init_mm.mm_count);
enter_lazy_tlb(&init_mm, current);
/*
* Make us the idle thread. Technically, schedule() should not be
* called from this thread, however somewhere below it might be,
* but because we are the idle thread, we just pick up running again
* when this runqueue becomes "idle".
*/
init_idle(current, smp_processor_id());
calc_load_update = jiffies + LOAD_FREQ;
/*
* During early bootup we pretend to be a normal task:
*/
current->sched_class = &fair_sched_class;
/* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
#ifdef CONFIG_SMP
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
#ifdef CONFIG_NO_HZ
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
atomic_set(&nohz.load_balancer, nr_cpu_ids);
atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
#endif
/* May be allocated at isolcpus cmdline parse time */
if (cpu_isolated_map == NULL)
zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
#endif /* SMP */
scheduler_running = 1;
}
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
return (nested == preempt_offset);
}
static int __might_sleep_init_called;
int __init __might_sleep_init(void)
{
__might_sleep_init_called = 1;
return 0;
}
early_initcall(__might_sleep_init);
void __might_sleep(const char *file, int line, int preempt_offset)
{
#ifdef in_atomic
static unsigned long prev_jiffy; /* ratelimiting */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
oops_in_progress)
return;
if (system_state != SYSTEM_RUNNING &&
(!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
prev_jiffy = jiffies;
printk(KERN_ERR
"BUG: sleeping function called from invalid context at %s:%d\n",
file, line);
printk(KERN_ERR
"in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
in_atomic(), irqs_disabled(),
current->pid, current->comm);
debug_show_held_locks(current);
if (irqs_disabled())
print_irqtrace_events(current);
dump_stack();
#endif
}
EXPORT_SYMBOL(__might_sleep);
#endif
#ifdef CONFIG_MAGIC_SYSRQ
static void normalize_task(struct rq *rq, struct task_struct *p)
{
const struct sched_class *prev_class = p->sched_class;
int old_prio = p->prio;
int on_rq;
on_rq = p->on_rq;
if (on_rq)
deactivate_task(rq, p, 0);
__setscheduler(rq, p, SCHED_NORMAL, 0);
if (on_rq) {
activate_task(rq, p, 0);
resched_task(rq->curr);
}
check_class_changed(rq, p, prev_class, old_prio);
}
void normalize_rt_tasks(void)
{
struct task_struct *g, *p;
unsigned long flags;
struct rq *rq;
read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, p) {
/*
* Only normalize user tasks:
*/
if (!p->mm)
continue;
p->se.exec_start = 0;
#ifdef CONFIG_SCHEDSTATS
p->se.statistics.wait_start = 0;
p->se.statistics.sleep_start = 0;
p->se.statistics.block_start = 0;
#endif
if (!rt_task(p)) {
/*
* Renice negative nice level userspace
* tasks back to 0:
*/
if (TASK_NICE(p) < 0 && p->mm)
set_user_nice(p, 0);
continue;
}
raw_spin_lock(&p->pi_lock);
rq = __task_rq_lock(p);
normalize_task(rq, p);
__task_rq_unlock(rq);
raw_spin_unlock(&p->pi_lock);
} while_each_thread(g, p);
read_unlock_irqrestore(&tasklist_lock, flags);
}
#endif /* CONFIG_MAGIC_SYSRQ */
#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
/*
* These functions are only useful for the IA64 MCA handling, or kdb.
*
* They can only be called when the whole system has been
* stopped - every CPU needs to be quiescent, and no scheduling
* activity can take place. Using them for anything else would
* be a serious bug, and as a result, they aren't even visible
* under any other configuration.
*/
/**
* curr_task - return the current task for a given cpu.
* @cpu: the processor in question.
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
*/
struct task_struct *curr_task(int cpu)
{
return cpu_curr(cpu);
}
#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
#ifdef CONFIG_IA64
/**
* set_curr_task - set the current task for a given cpu.
* @cpu: the processor in question.
* @p: the task pointer to set.
*
* Description: This function must only be used when non-maskable interrupts
* are serviced on a separate stack. It allows the architecture to switch the
* notion of the current task on a cpu in a non-blocking manner. This function
* must be called with all CPU's synchronized, and interrupts disabled, the
* and caller must save the original value of the current task (see
* curr_task() above) and restore that value before reenabling interrupts and
* re-starting the system.
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
*/
void set_curr_task(int cpu, struct task_struct *p)
{
cpu_curr(cpu) = p;
}
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
static void free_fair_sched_group(struct task_group *tg)
{
int i;
for_each_possible_cpu(i) {
if (tg->cfs_rq)
kfree(tg->cfs_rq[i]);
if (tg->se)
kfree(tg->se[i]);
}
kfree(tg->cfs_rq);
kfree(tg->se);
}
static
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se;
int i;
tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
if (!tg->cfs_rq)
goto err;
tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
if (!tg->se)
goto err;
tg->shares = NICE_0_LOAD;
for_each_possible_cpu(i) {
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
GFP_KERNEL, cpu_to_node(i));
if (!cfs_rq)
goto err;
se = kzalloc_node(sizeof(struct sched_entity),
GFP_KERNEL, cpu_to_node(i));
if (!se)
goto err_free_rq;
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
}
return 1;
err_free_rq:
kfree(cfs_rq);
err:
return 0;
}
static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
/*
* Only empty task groups can be destroyed; so we can speculatively
* check on_list without danger of it being re-added.
*/
if (!tg->cfs_rq[cpu]->on_list)
return;
raw_spin_lock_irqsave(&rq->lock, flags);
list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
#else /* !CONFG_FAIR_GROUP_SCHED */
static inline void free_fair_sched_group(struct task_group *tg)
{
}
static inline
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
return 1;
}
static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
{
}
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static void free_rt_sched_group(struct task_group *tg)
{
int i;
destroy_rt_bandwidth(&tg->rt_bandwidth);
for_each_possible_cpu(i) {
if (tg->rt_rq)
kfree(tg->rt_rq[i]);
if (tg->rt_se)
kfree(tg->rt_se[i]);
}
kfree(tg->rt_rq);
kfree(tg->rt_se);
}
static
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
struct rt_rq *rt_rq;
struct sched_rt_entity *rt_se;
int i;
tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
if (!tg->rt_rq)
goto err;
tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
if (!tg->rt_se)
goto err;
init_rt_bandwidth(&tg->rt_bandwidth,
ktime_to_ns(def_rt_bandwidth.rt_period), 0);
for_each_possible_cpu(i) {
rt_rq = kzalloc_node(sizeof(struct rt_rq),
GFP_KERNEL, cpu_to_node(i));
if (!rt_rq)
goto err;
rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
GFP_KERNEL, cpu_to_node(i));
if (!rt_se)
goto err_free_rq;
init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
}
return 1;
err_free_rq:
kfree(rt_rq);
err:
return 0;
}
#else /* !CONFIG_RT_GROUP_SCHED */
static inline void free_rt_sched_group(struct task_group *tg)
{
}
static inline
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
return 1;
}
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CGROUP_SCHED
static void free_sched_group(struct task_group *tg)
{
free_fair_sched_group(tg);
free_rt_sched_group(tg);
autogroup_free(tg);
kfree(tg);
}
/* allocate runqueue etc for a new task group */
struct task_group *sched_create_group(struct task_group *parent)
{
struct task_group *tg;
unsigned long flags;
tg = kzalloc(sizeof(*tg), GFP_KERNEL);
if (!tg)
return ERR_PTR(-ENOMEM);
if (!alloc_fair_sched_group(tg, parent))
goto err;
if (!alloc_rt_sched_group(tg, parent))
goto err;
spin_lock_irqsave(&task_group_lock, flags);
list_add_rcu(&tg->list, &task_groups);
WARN_ON(!parent); /* root should already exist */
tg->parent = parent;
INIT_LIST_HEAD(&tg->children);
list_add_rcu(&tg->siblings, &parent->children);
spin_unlock_irqrestore(&task_group_lock, flags);
return tg;
err:
free_sched_group(tg);
return ERR_PTR(-ENOMEM);
}
/* rcu callback to free various structures associated with a task group */
static void free_sched_group_rcu(struct rcu_head *rhp)
{
/* now it should be safe to free those cfs_rqs */
free_sched_group(container_of(rhp, struct task_group, rcu));
}
/* Destroy runqueue etc associated with a task group */
void sched_destroy_group(struct task_group *tg)
{
unsigned long flags;
int i;
/* end participation in shares distribution */
for_each_possible_cpu(i)
unregister_fair_sched_group(tg, i);
spin_lock_irqsave(&task_group_lock, flags);
list_del_rcu(&tg->list);
list_del_rcu(&tg->siblings);
spin_unlock_irqrestore(&task_group_lock, flags);
/* wait for possible concurrent references to cfs_rqs complete */
call_rcu(&tg->rcu, free_sched_group_rcu);
}
/* change task's runqueue when it moves between groups.
* The caller of this function should have put the task in its new group
* by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
* reflect its new group.
*/
void sched_move_task(struct task_struct *tsk)
{
int on_rq, running;
unsigned long flags;
struct rq *rq;
rq = task_rq_lock(tsk, &flags);
running = task_current(rq, tsk);
on_rq = tsk->on_rq;
if (on_rq)
dequeue_task(rq, tsk, 0);
if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk);
#ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->task_move_group)
tsk->sched_class->task_move_group(tsk, on_rq);
else
#endif
set_task_rq(tsk, task_cpu(tsk));
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
if (on_rq)
enqueue_task(rq, tsk, 0);
task_rq_unlock(rq, tsk, &flags);
}
#endif /* CONFIG_CGROUP_SCHED */
#ifdef CONFIG_FAIR_GROUP_SCHED
static DEFINE_MUTEX(shares_mutex);
int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int i;
unsigned long flags;
/*
* We can't change the weight of the root cgroup.
*/
if (!tg->se[0])
return -EINVAL;
shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
mutex_lock(&shares_mutex);
if (tg->shares == shares)
goto done;
tg->shares = shares;
for_each_possible_cpu(i) {
struct rq *rq = cpu_rq(i);
struct sched_entity *se;
se = tg->se[i];
/* Propagate contribution to hierarchy */
raw_spin_lock_irqsave(&rq->lock, flags);
for_each_sched_entity(se)
update_cfs_shares(group_cfs_rq(se));
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
done:
mutex_unlock(&shares_mutex);
return 0;
}
unsigned long sched_group_shares(struct task_group *tg)
{
return tg->shares;
}
#endif
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Ensure that the real time constraints are schedulable.
*/
static DEFINE_MUTEX(rt_constraints_mutex);
static unsigned long to_ratio(u64 period, u64 runtime)
{
if (runtime == RUNTIME_INF)
return 1ULL << 20;
return div64_u64(runtime << 20, period);
}
/* Must be called with tasklist_lock held */
static inline int tg_has_rt_tasks(struct task_group *tg)
{
struct task_struct *g, *p;
do_each_thread(g, p) {
if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
return 1;
} while_each_thread(g, p);
return 0;
}
struct rt_schedulable_data {
struct task_group *tg;
u64 rt_period;
u64 rt_runtime;
};
static int tg_schedulable(struct task_group *tg, void *data)
{
struct rt_schedulable_data *d = data;
struct task_group *child;
unsigned long total, sum = 0;
u64 period, runtime;
period = ktime_to_ns(tg->rt_bandwidth.rt_period);
runtime = tg->rt_bandwidth.rt_runtime;
if (tg == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}
/*
* Cannot have more runtime than the period.
*/
if (runtime > period && runtime != RUNTIME_INF)
return -EINVAL;
/*
* Ensure we don't starve existing RT tasks.
*/
if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
return -EBUSY;
total = to_ratio(period, runtime);
/*
* Nobody can have more than the global setting allows.
*/
if (total > to_ratio(global_rt_period(), global_rt_runtime()))
return -EINVAL;
/*
* The sum of our children's runtime should not exceed our own.
*/
list_for_each_entry_rcu(child, &tg->children, siblings) {
period = ktime_to_ns(child->rt_bandwidth.rt_period);
runtime = child->rt_bandwidth.rt_runtime;
if (child == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}
sum += to_ratio(period, runtime);
}
if (sum > total)
return -EINVAL;
return 0;
}
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
struct rt_schedulable_data data = {
.tg = tg,
.rt_period = period,
.rt_runtime = runtime,
};
return walk_tg_tree(tg_schedulable, tg_nop, &data);
}
static int tg_set_bandwidth(struct task_group *tg,
u64 rt_period, u64 rt_runtime)
{
int i, err = 0;
mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
err = __rt_schedulable(tg, rt_period, rt_runtime);
if (err)
goto unlock;
raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
return err;
}
int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
{
u64 rt_runtime, rt_period;
rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
if (rt_runtime_us < 0)
rt_runtime = RUNTIME_INF;
return tg_set_bandwidth(tg, rt_period, rt_runtime);
}
long sched_group_rt_runtime(struct task_group *tg)
{
u64 rt_runtime_us;
if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
return -1;
rt_runtime_us = tg->rt_bandwidth.rt_runtime;
do_div(rt_runtime_us, NSEC_PER_USEC);
return rt_runtime_us;
}
int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
{
u64 rt_runtime, rt_period;
rt_period = (u64)rt_period_us * NSEC_PER_USEC;
rt_runtime = tg->rt_bandwidth.rt_runtime;
if (rt_period == 0)
return -EINVAL;
return tg_set_bandwidth(tg, rt_period, rt_runtime);
}
long sched_group_rt_period(struct task_group *tg)
{
u64 rt_period_us;
rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
do_div(rt_period_us, NSEC_PER_USEC);
return rt_period_us;
}
static int sched_rt_global_constraints(void)
{
u64 runtime, period;
int ret = 0;
if (sysctl_sched_rt_period <= 0)
return -EINVAL;
runtime = global_rt_runtime();
period = global_rt_period();
/*
* Sanity check on the sysctl variables.
*/
if (runtime > period && runtime != RUNTIME_INF)
return -EINVAL;
mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
ret = __rt_schedulable(NULL, 0, 0);
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
return ret;
}
int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
{
/* Don't accept realtime tasks when there is no way for them to run */
if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
return 0;
return 1;
}
#else /* !CONFIG_RT_GROUP_SCHED */
static int sched_rt_global_constraints(void)
{
unsigned long flags;
int i;
if (sysctl_sched_rt_period <= 0)
return -EINVAL;
/*
* There's always some RT tasks in the root group
* -- migration, kstopmachine etc..
*/
if (sysctl_sched_rt_runtime == 0)
return -EBUSY;
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0;
}
#endif /* CONFIG_RT_GROUP_SCHED */
int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
int old_period, old_runtime;
static DEFINE_MUTEX(mutex);
mutex_lock(&mutex);
old_period = sysctl_sched_rt_period;
old_runtime = sysctl_sched_rt_runtime;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write) {
ret = sched_rt_global_constraints();
if (ret) {
sysctl_sched_rt_period = old_period;
sysctl_sched_rt_runtime = old_runtime;
} else {
def_rt_bandwidth.rt_runtime = global_rt_runtime();
def_rt_bandwidth.rt_period =
ns_to_ktime(global_rt_period());
}
}
mutex_unlock(&mutex);
return ret;
}
#ifdef CONFIG_CGROUP_SCHED
/* return corresponding task_group object of a cgroup */
static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
{
return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
struct task_group, css);
}
static struct cgroup_subsys_state *
cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct task_group *tg, *parent;
if (!cgrp->parent) {
/* This is early initialization for the top cgroup */
return &root_task_group.css;
}
parent = cgroup_tg(cgrp->parent);
tg = sched_create_group(parent);
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
return &tg->css;
}
static void
cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct task_group *tg = cgroup_tg(cgrp);
sched_destroy_group(tg);
}
static int
cpu_cgroup_allow_attach(struct cgroup *cgrp, struct task_struct *tsk)
{
const struct cred *cred = current_cred(), *tcred;
tcred = __task_cred(tsk);
if ((current != tsk) && !capable(CAP_SYS_NICE) &&
cred->euid != tcred->uid && cred->euid != tcred->suid)
return -EACCES;
return 0;
}
static int
cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
return -EINVAL;
#else
/* We don't support RT-tasks being in separate groups */
if (tsk->sched_class != &fair_sched_class)
return -EINVAL;
#endif
return 0;
}
static void
cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
sched_move_task(tsk);
}
static void
cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup *old_cgrp, struct task_struct *task)
{
/*
* cgroup_exit() is called in the copy_process() failure path.
* Ignore this case since the task hasn't ran yet, this avoids
* trying to poke a half freed task state from generic code.
*/
if (!(task->flags & PF_EXITING))
return;
sched_move_task(task);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
u64 shareval)
{
return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
}
static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
{
struct task_group *tg = cgroup_tg(cgrp);
return (u64) scale_load_down(tg->shares);
}
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
s64 val)
{
return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
}
static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
{
return sched_group_rt_runtime(cgroup_tg(cgrp));
}
static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
u64 rt_period_us)
{
return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
}
static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
{
return sched_group_rt_period(cgroup_tg(cgrp));
}
#endif /* CONFIG_RT_GROUP_SCHED */
static struct cftype cpu_files[] = {
#ifdef CONFIG_FAIR_GROUP_SCHED
{
.name = "shares",
.read_u64 = cpu_shares_read_u64,
.write_u64 = cpu_shares_write_u64,
},
#endif
#ifdef CONFIG_RT_GROUP_SCHED
{
.name = "rt_runtime_us",
.read_s64 = cpu_rt_runtime_read,
.write_s64 = cpu_rt_runtime_write,
},
{
.name = "rt_period_us",
.read_u64 = cpu_rt_period_read_uint,
.write_u64 = cpu_rt_period_write_uint,
},
#endif
};
static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
{
return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
}
struct cgroup_subsys cpu_cgroup_subsys = {
.name = "cpu",
.create = cpu_cgroup_create,
.destroy = cpu_cgroup_destroy,
.allow_attach = cpu_cgroup_allow_attach,
.can_attach_task = cpu_cgroup_can_attach_task,
.attach_task = cpu_cgroup_attach_task,
.exit = cpu_cgroup_exit,
.populate = cpu_cgroup_populate,
.subsys_id = cpu_cgroup_subsys_id,
.early_init = 1,
};
#endif /* CONFIG_CGROUP_SCHED */
#ifdef CONFIG_CGROUP_CPUACCT
/*
* CPU accounting code for task groups.
*
* Based on the work by Paul Menage (menage@google.com) and Balbir Singh
* (balbir@in.ibm.com).
*/
/* track cpu usage of a group of tasks and its child groups */
struct cpuacct {
struct cgroup_subsys_state css;
/* cpuusage holds pointer to a u64-type object on every cpu */
u64 __percpu *cpuusage;
struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
struct cpuacct *parent;
struct cpuacct_charge_calls *cpufreq_fn;
void *cpuacct_data;
};
static struct cpuacct *cpuacct_root;
/* Default calls for cpufreq accounting */
static struct cpuacct_charge_calls *cpuacct_cpufreq;
int cpuacct_register_cpufreq(struct cpuacct_charge_calls *fn)
{
cpuacct_cpufreq = fn;
/*
* Root node is created before platform can register callbacks,
* initalize here.
*/
if (cpuacct_root && fn) {
cpuacct_root->cpufreq_fn = fn;
if (fn->init)
fn->init(&cpuacct_root->cpuacct_data);
}
return 0;
}
struct cgroup_subsys cpuacct_subsys;
/* return cpu accounting group corresponding to this container */
static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
{
return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
struct cpuacct, css);
}
/* return cpu accounting group to which this task belongs */
static inline struct cpuacct *task_ca(struct task_struct *tsk)
{
return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
struct cpuacct, css);
}
/* create a new cpu accounting group */
static struct cgroup_subsys_state *cpuacct_create(
struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
int i;
if (!ca)
goto out;
ca->cpuusage = alloc_percpu(u64);
if (!ca->cpuusage)
goto out_free_ca;
for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
if (percpu_counter_init(&ca->cpustat[i], 0))
goto out_free_counters;
ca->cpufreq_fn = cpuacct_cpufreq;
/* If available, have platform code initalize cpu frequency table */
if (ca->cpufreq_fn && ca->cpufreq_fn->init)
ca->cpufreq_fn->init(&ca->cpuacct_data);
if (cgrp->parent)
ca->parent = cgroup_ca(cgrp->parent);
else
cpuacct_root = ca;
return &ca->css;
out_free_counters:
while (--i >= 0)
percpu_counter_destroy(&ca->cpustat[i]);
free_percpu(ca->cpuusage);
out_free_ca:
kfree(ca);
out:
return ERR_PTR(-ENOMEM);
}
/* destroy an existing cpu accounting group */
static void
cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct cpuacct *ca = cgroup_ca(cgrp);
int i;
for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
percpu_counter_destroy(&ca->cpustat[i]);
free_percpu(ca->cpuusage);
kfree(ca);
}
static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
{
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
u64 data;
#ifndef CONFIG_64BIT
/*
* Take rq->lock to make 64-bit read safe on 32-bit platforms.
*/
raw_spin_lock_irq(&cpu_rq(cpu)->lock);
data = *cpuusage;
raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
data = *cpuusage;
#endif
return data;
}
static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
{
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
#ifndef CONFIG_64BIT
/*
* Take rq->lock to make 64-bit write safe on 32-bit platforms.
*/
raw_spin_lock_irq(&cpu_rq(cpu)->lock);
*cpuusage = val;
raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
*cpuusage = val;
#endif
}
/* return total cpu usage (in nanoseconds) of a group */
static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
{
struct cpuacct *ca = cgroup_ca(cgrp);
u64 totalcpuusage = 0;
int i;
for_each_present_cpu(i)
totalcpuusage += cpuacct_cpuusage_read(ca, i);
return totalcpuusage;
}
static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
u64 reset)
{
struct cpuacct *ca = cgroup_ca(cgrp);
int err = 0;
int i;
if (reset) {
err = -EINVAL;
goto out;
}
for_each_present_cpu(i)
cpuacct_cpuusage_write(ca, i, 0);
out:
return err;
}
static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
struct seq_file *m)
{
struct cpuacct *ca = cgroup_ca(cgroup);
u64 percpu;
int i;
for_each_present_cpu(i) {
percpu = cpuacct_cpuusage_read(ca, i);
seq_printf(m, "%llu ", (unsigned long long) percpu);
}
seq_printf(m, "\n");
return 0;
}
static const char *cpuacct_stat_desc[] = {
[CPUACCT_STAT_USER] = "user",
[CPUACCT_STAT_SYSTEM] = "system",
};
static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
struct cgroup_map_cb *cb)
{
struct cpuacct *ca = cgroup_ca(cgrp);
int i;
for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
s64 val = percpu_counter_read(&ca->cpustat[i]);
val = cputime64_to_clock_t(val);
cb->fill(cb, cpuacct_stat_desc[i], val);
}
return 0;
}
static int cpuacct_cpufreq_show(struct cgroup *cgrp, struct cftype *cft,
struct cgroup_map_cb *cb)
{
struct cpuacct *ca = cgroup_ca(cgrp);
if (ca->cpufreq_fn && ca->cpufreq_fn->cpufreq_show)
ca->cpufreq_fn->cpufreq_show(ca->cpuacct_data, cb);
return 0;
}
/* return total cpu power usage (milliWatt second) of a group */
static u64 cpuacct_powerusage_read(struct cgroup *cgrp, struct cftype *cft)
{
int i;
struct cpuacct *ca = cgroup_ca(cgrp);
u64 totalpower = 0;
if (ca->cpufreq_fn && ca->cpufreq_fn->power_usage)
for_each_present_cpu(i) {
totalpower += ca->cpufreq_fn->power_usage(
ca->cpuacct_data);
}
return totalpower;
}
static struct cftype files[] = {
{
.name = "usage",
.read_u64 = cpuusage_read,
.write_u64 = cpuusage_write,
},
{
.name = "usage_percpu",
.read_seq_string = cpuacct_percpu_seq_read,
},
{
.name = "stat",
.read_map = cpuacct_stats_show,
},
{
.name = "cpufreq",
.read_map = cpuacct_cpufreq_show,
},
{
.name = "power",
.read_u64 = cpuacct_powerusage_read
},
};
static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
}
/*
* charge this task's execution time to its accounting group.
*
* called with rq->lock held.
*/
static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
{
struct cpuacct *ca;
int cpu;
if (unlikely(!cpuacct_subsys.active))
return;
cpu = task_cpu(tsk);
rcu_read_lock();
ca = task_ca(tsk);
for (; ca; ca = ca->parent) {
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
*cpuusage += cputime;
/* Call back into platform code to account for CPU speeds */
if (ca->cpufreq_fn && ca->cpufreq_fn->charge)
ca->cpufreq_fn->charge(ca->cpuacct_data, cputime, cpu);
}
rcu_read_unlock();
}
/*
* When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
* in cputime_t units. As a result, cpuacct_update_stats calls
* percpu_counter_add with values large enough to always overflow the
* per cpu batch limit causing bad SMP scalability.
*
* To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
* batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
* and enabled. We cap it at INT_MAX which is the largest allowed batch value.
*/
#ifdef CONFIG_SMP
#define CPUACCT_BATCH \
min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
#else
#define CPUACCT_BATCH 0
#endif
/*
* Charge the system/user time to the task's accounting group.
*/
static void cpuacct_update_stats(struct task_struct *tsk,
enum cpuacct_stat_index idx, cputime_t val)
{
struct cpuacct *ca;
int batch = CPUACCT_BATCH;
if (unlikely(!cpuacct_subsys.active))
return;
rcu_read_lock();
ca = task_ca(tsk);
do {
__percpu_counter_add(&ca->cpustat[idx], val, batch);
ca = ca->parent;
} while (ca);
rcu_read_unlock();
}
struct cgroup_subsys cpuacct_subsys = {
.name = "cpuacct",
.create = cpuacct_create,
.destroy = cpuacct_destroy,
.populate = cpuacct_populate,
.subsys_id = cpuacct_subsys_id,
};
#endif /* CONFIG_CGROUP_CPUACCT */
| gpl-2.0 |
marcOcram/Acer-Liquid-MT-Kernel | fs/locks.c | 167 | 59009 | /*
* linux/fs/locks.c
*
* Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
* Doug Evans (dje@spiff.uucp), August 07, 1992
*
* Deadlock detection added.
* FIXME: one thing isn't handled yet:
* - mandatory locks (requires lots of changes elsewhere)
* Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
*
* Miscellaneous edits, and a total rewrite of posix_lock_file() code.
* Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
*
* Converted file_lock_table to a linked list from an array, which eliminates
* the limits on how many active file locks are open.
* Chad Page (pageone@netcom.com), November 27, 1994
*
* Removed dependency on file descriptors. dup()'ed file descriptors now
* get the same locks as the original file descriptors, and a close() on
* any file descriptor removes ALL the locks on the file for the current
* process. Since locks still depend on the process id, locks are inherited
* after an exec() but not after a fork(). This agrees with POSIX, and both
* BSD and SVR4 practice.
* Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
*
* Scrapped free list which is redundant now that we allocate locks
* dynamically with kmalloc()/kfree().
* Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
*
* Implemented two lock personalities - FL_FLOCK and FL_POSIX.
*
* FL_POSIX locks are created with calls to fcntl() and lockf() through the
* fcntl() system call. They have the semantics described above.
*
* FL_FLOCK locks are created with calls to flock(), through the flock()
* system call, which is new. Old C libraries implement flock() via fcntl()
* and will continue to use the old, broken implementation.
*
* FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
* with a file pointer (filp). As a result they can be shared by a parent
* process and its children after a fork(). They are removed when the last
* file descriptor referring to the file pointer is closed (unless explicitly
* unlocked).
*
* FL_FLOCK locks never deadlock, an existing lock is always removed before
* upgrading from shared to exclusive (or vice versa). When this happens
* any processes blocked by the current lock are woken up and allowed to
* run before the new lock is applied.
* Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
*
* Removed some race conditions in flock_lock_file(), marked other possible
* races. Just grep for FIXME to see them.
* Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
*
* Addressed Dmitry's concerns. Deadlock checking no longer recursive.
* Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
* once we've checked for blocking and deadlocking.
* Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
*
* Initial implementation of mandatory locks. SunOS turned out to be
* a rotten model, so I implemented the "obvious" semantics.
* See 'Documentation/mandatory.txt' for details.
* Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
*
* Don't allow mandatory locks on mmap()'ed files. Added simple functions to
* check if a file has mandatory locks, used by mmap(), open() and creat() to
* see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
* Manual, Section 2.
* Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
*
* Tidied up block list handling. Added '/proc/locks' interface.
* Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
*
* Fixed deadlock condition for pathological code that mixes calls to
* flock() and fcntl().
* Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
*
* Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
* for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
* guarantee sensible behaviour in the case where file system modules might
* be compiled with different options than the kernel itself.
* Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
*
* Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
* (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
* Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
*
* Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
* locks. Changed process synchronisation to avoid dereferencing locks that
* have already been freed.
* Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
*
* Made the block list a circular list to minimise searching in the list.
* Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
*
* Made mandatory locking a mount option. Default is not to allow mandatory
* locking.
* Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
*
* Some adaptations for NFS support.
* Olaf Kirch (okir@monad.swb.de), Dec 1996,
*
* Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
* Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
*
* Use slab allocator instead of kmalloc/kfree.
* Use generic list implementation from <linux/list.h>.
* Sped up posix_locks_deadlock by only considering blocked locks.
* Matthew Wilcox <willy@debian.org>, March, 2000.
*
* Leases and LOCK_MAND
* Matthew Wilcox <willy@debian.org>, June, 2000.
* Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
*/
#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/security.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/time.h>
#include <linux/rcupdate.h>
#include <linux/pid_namespace.h>
#include <asm/uaccess.h>
#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
int leases_enable = 1;
int lease_break_time = 45;
#define for_each_lock(inode, lockp) \
for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
static LIST_HEAD(file_lock_list);
static LIST_HEAD(blocked_list);
static struct kmem_cache *filelock_cache __read_mostly;
/* Allocate an empty lock structure. */
static struct file_lock *locks_alloc_lock(void)
{
return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
}
static void locks_release_private(struct file_lock *fl)
{
if (fl->fl_ops) {
if (fl->fl_ops->fl_release_private)
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
}
if (fl->fl_lmops) {
if (fl->fl_lmops->fl_release_private)
fl->fl_lmops->fl_release_private(fl);
fl->fl_lmops = NULL;
}
}
/* Free a lock which is not in use. */
static void locks_free_lock(struct file_lock *fl)
{
BUG_ON(waitqueue_active(&fl->fl_wait));
BUG_ON(!list_empty(&fl->fl_block));
BUG_ON(!list_empty(&fl->fl_link));
locks_release_private(fl);
kmem_cache_free(filelock_cache, fl);
}
void locks_init_lock(struct file_lock *fl)
{
INIT_LIST_HEAD(&fl->fl_link);
INIT_LIST_HEAD(&fl->fl_block);
init_waitqueue_head(&fl->fl_wait);
fl->fl_next = NULL;
fl->fl_fasync = NULL;
fl->fl_owner = NULL;
fl->fl_pid = 0;
fl->fl_nspid = NULL;
fl->fl_file = NULL;
fl->fl_flags = 0;
fl->fl_type = 0;
fl->fl_start = fl->fl_end = 0;
fl->fl_ops = NULL;
fl->fl_lmops = NULL;
}
EXPORT_SYMBOL(locks_init_lock);
/*
* Initialises the fields of the file lock which are invariant for
* free file_locks.
*/
static void init_once(void *foo)
{
struct file_lock *lock = (struct file_lock *) foo;
locks_init_lock(lock);
}
static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
{
if (fl->fl_ops) {
if (fl->fl_ops->fl_copy_lock)
fl->fl_ops->fl_copy_lock(new, fl);
new->fl_ops = fl->fl_ops;
}
if (fl->fl_lmops) {
if (fl->fl_lmops->fl_copy_lock)
fl->fl_lmops->fl_copy_lock(new, fl);
new->fl_lmops = fl->fl_lmops;
}
}
/*
* Initialize a new lock from an existing file_lock structure.
*/
void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
{
new->fl_owner = fl->fl_owner;
new->fl_pid = fl->fl_pid;
new->fl_file = NULL;
new->fl_flags = fl->fl_flags;
new->fl_type = fl->fl_type;
new->fl_start = fl->fl_start;
new->fl_end = fl->fl_end;
new->fl_ops = NULL;
new->fl_lmops = NULL;
}
EXPORT_SYMBOL(__locks_copy_lock);
void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
locks_release_private(new);
__locks_copy_lock(new, fl);
new->fl_file = fl->fl_file;
new->fl_ops = fl->fl_ops;
new->fl_lmops = fl->fl_lmops;
locks_copy_private(new, fl);
}
EXPORT_SYMBOL(locks_copy_lock);
static inline int flock_translate_cmd(int cmd) {
if (cmd & LOCK_MAND)
return cmd & (LOCK_MAND | LOCK_RW);
switch (cmd) {
case LOCK_SH:
return F_RDLCK;
case LOCK_EX:
return F_WRLCK;
case LOCK_UN:
return F_UNLCK;
}
return -EINVAL;
}
/* Fill in a file_lock structure with an appropriate FLOCK lock. */
static int flock_make_lock(struct file *filp, struct file_lock **lock,
unsigned int cmd)
{
struct file_lock *fl;
int type = flock_translate_cmd(cmd);
if (type < 0)
return type;
fl = locks_alloc_lock();
if (fl == NULL)
return -ENOMEM;
fl->fl_file = filp;
fl->fl_pid = current->tgid;
fl->fl_flags = FL_FLOCK;
fl->fl_type = type;
fl->fl_end = OFFSET_MAX;
*lock = fl;
return 0;
}
static int assign_type(struct file_lock *fl, int type)
{
switch (type) {
case F_RDLCK:
case F_WRLCK:
case F_UNLCK:
fl->fl_type = type;
break;
default:
return -EINVAL;
}
return 0;
}
/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
* style lock.
*/
static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
struct flock *l)
{
off_t start, end;
switch (l->l_whence) {
case SEEK_SET:
start = 0;
break;
case SEEK_CUR:
start = filp->f_pos;
break;
case SEEK_END:
start = i_size_read(filp->f_path.dentry->d_inode);
break;
default:
return -EINVAL;
}
/* POSIX-1996 leaves the case l->l_len < 0 undefined;
POSIX-2001 defines it. */
start += l->l_start;
if (start < 0)
return -EINVAL;
fl->fl_end = OFFSET_MAX;
if (l->l_len > 0) {
end = start + l->l_len - 1;
fl->fl_end = end;
} else if (l->l_len < 0) {
end = start - 1;
fl->fl_end = end;
start += l->l_len;
if (start < 0)
return -EINVAL;
}
fl->fl_start = start; /* we record the absolute position */
if (fl->fl_end < fl->fl_start)
return -EOVERFLOW;
fl->fl_owner = current->files;
fl->fl_pid = current->tgid;
fl->fl_file = filp;
fl->fl_flags = FL_POSIX;
fl->fl_ops = NULL;
fl->fl_lmops = NULL;
return assign_type(fl, l->l_type);
}
#if BITS_PER_LONG == 32
static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
struct flock64 *l)
{
loff_t start;
switch (l->l_whence) {
case SEEK_SET:
start = 0;
break;
case SEEK_CUR:
start = filp->f_pos;
break;
case SEEK_END:
start = i_size_read(filp->f_path.dentry->d_inode);
break;
default:
return -EINVAL;
}
start += l->l_start;
if (start < 0)
return -EINVAL;
fl->fl_end = OFFSET_MAX;
if (l->l_len > 0) {
fl->fl_end = start + l->l_len - 1;
} else if (l->l_len < 0) {
fl->fl_end = start - 1;
start += l->l_len;
if (start < 0)
return -EINVAL;
}
fl->fl_start = start; /* we record the absolute position */
if (fl->fl_end < fl->fl_start)
return -EOVERFLOW;
fl->fl_owner = current->files;
fl->fl_pid = current->tgid;
fl->fl_file = filp;
fl->fl_flags = FL_POSIX;
fl->fl_ops = NULL;
fl->fl_lmops = NULL;
switch (l->l_type) {
case F_RDLCK:
case F_WRLCK:
case F_UNLCK:
fl->fl_type = l->l_type;
break;
default:
return -EINVAL;
}
return (0);
}
#endif
/* default lease lock manager operations */
static void lease_break_callback(struct file_lock *fl)
{
kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
}
static void lease_release_private_callback(struct file_lock *fl)
{
if (!fl->fl_file)
return;
f_delown(fl->fl_file);
fl->fl_file->f_owner.signum = 0;
}
static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
{
return fl->fl_file == try->fl_file;
}
static struct lock_manager_operations lease_manager_ops = {
.fl_break = lease_break_callback,
.fl_release_private = lease_release_private_callback,
.fl_mylease = lease_mylease_callback,
.fl_change = lease_modify,
};
/*
* Initialize a lease, use the default lock manager operations
*/
static int lease_init(struct file *filp, int type, struct file_lock *fl)
{
if (assign_type(fl, type) != 0)
return -EINVAL;
fl->fl_owner = current->files;
fl->fl_pid = current->tgid;
fl->fl_file = filp;
fl->fl_flags = FL_LEASE;
fl->fl_start = 0;
fl->fl_end = OFFSET_MAX;
fl->fl_ops = NULL;
fl->fl_lmops = &lease_manager_ops;
return 0;
}
/* Allocate a file_lock initialised to this type of lease */
static struct file_lock *lease_alloc(struct file *filp, int type)
{
struct file_lock *fl = locks_alloc_lock();
int error = -ENOMEM;
if (fl == NULL)
return ERR_PTR(error);
error = lease_init(filp, type, fl);
if (error) {
locks_free_lock(fl);
return ERR_PTR(error);
}
return fl;
}
/* Check if two locks overlap each other.
*/
static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
{
return ((fl1->fl_end >= fl2->fl_start) &&
(fl2->fl_end >= fl1->fl_start));
}
/*
* Check whether two locks have the same owner.
*/
static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
{
if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
return fl2->fl_lmops == fl1->fl_lmops &&
fl1->fl_lmops->fl_compare_owner(fl1, fl2);
return fl1->fl_owner == fl2->fl_owner;
}
/* Remove waiter from blocker's block list.
* When blocker ends up pointing to itself then the list is empty.
*/
static void __locks_delete_block(struct file_lock *waiter)
{
list_del_init(&waiter->fl_block);
list_del_init(&waiter->fl_link);
waiter->fl_next = NULL;
}
/*
*/
static void locks_delete_block(struct file_lock *waiter)
{
lock_kernel();
__locks_delete_block(waiter);
unlock_kernel();
}
/* Insert waiter into blocker's block list.
* We use a circular list so that processes can be easily woken up in
* the order they blocked. The documentation doesn't require this but
* it seems like the reasonable thing to do.
*/
static void locks_insert_block(struct file_lock *blocker,
struct file_lock *waiter)
{
BUG_ON(!list_empty(&waiter->fl_block));
list_add_tail(&waiter->fl_block, &blocker->fl_block);
waiter->fl_next = blocker;
if (IS_POSIX(blocker))
list_add(&waiter->fl_link, &blocked_list);
}
/* Wake up processes blocked waiting for blocker.
* If told to wait then schedule the processes until the block list
* is empty, otherwise empty the block list ourselves.
*/
static void locks_wake_up_blocks(struct file_lock *blocker)
{
while (!list_empty(&blocker->fl_block)) {
struct file_lock *waiter;
waiter = list_first_entry(&blocker->fl_block,
struct file_lock, fl_block);
__locks_delete_block(waiter);
if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
waiter->fl_lmops->fl_notify(waiter);
else
wake_up(&waiter->fl_wait);
}
}
/* Insert file lock fl into an inode's lock list at the position indicated
* by pos. At the same time add the lock to the global file lock list.
*/
static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
{
list_add(&fl->fl_link, &file_lock_list);
fl->fl_nspid = get_pid(task_tgid(current));
/* insert into file's list */
fl->fl_next = *pos;
*pos = fl;
}
/*
* Delete a lock and then free it.
* Wake up processes that are blocked waiting for this lock,
* notify the FS that the lock has been cleared and
* finally free the lock.
*/
static void locks_delete_lock(struct file_lock **thisfl_p)
{
struct file_lock *fl = *thisfl_p;
*thisfl_p = fl->fl_next;
fl->fl_next = NULL;
list_del_init(&fl->fl_link);
fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
if (fl->fl_fasync != NULL) {
printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
fl->fl_fasync = NULL;
}
if (fl->fl_nspid) {
put_pid(fl->fl_nspid);
fl->fl_nspid = NULL;
}
locks_wake_up_blocks(fl);
locks_free_lock(fl);
}
/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
* checks for shared/exclusive status of overlapping locks.
*/
static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
{
if (sys_fl->fl_type == F_WRLCK)
return 1;
if (caller_fl->fl_type == F_WRLCK)
return 1;
return 0;
}
/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
* checking before calling the locks_conflict().
*/
static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
{
/* POSIX locks owned by the same process do not conflict with
* each other.
*/
if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
return (0);
/* Check whether they overlap */
if (!locks_overlap(caller_fl, sys_fl))
return 0;
return (locks_conflict(caller_fl, sys_fl));
}
/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
* checking before calling the locks_conflict().
*/
static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
{
/* FLOCK locks referring to the same filp do not conflict with
* each other.
*/
if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
return (0);
if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
return 0;
return (locks_conflict(caller_fl, sys_fl));
}
void
posix_test_lock(struct file *filp, struct file_lock *fl)
{
struct file_lock *cfl;
lock_kernel();
for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
if (!IS_POSIX(cfl))
continue;
if (posix_locks_conflict(fl, cfl))
break;
}
if (cfl) {
__locks_copy_lock(fl, cfl);
if (cfl->fl_nspid)
fl->fl_pid = pid_vnr(cfl->fl_nspid);
} else
fl->fl_type = F_UNLCK;
unlock_kernel();
return;
}
EXPORT_SYMBOL(posix_test_lock);
/*
* Deadlock detection:
*
* We attempt to detect deadlocks that are due purely to posix file
* locks.
*
* We assume that a task can be waiting for at most one lock at a time.
* So for any acquired lock, the process holding that lock may be
* waiting on at most one other lock. That lock in turns may be held by
* someone waiting for at most one other lock. Given a requested lock
* caller_fl which is about to wait for a conflicting lock block_fl, we
* follow this chain of waiters to ensure we are not about to create a
* cycle.
*
* Since we do this before we ever put a process to sleep on a lock, we
* are ensured that there is never a cycle; that is what guarantees that
* the while() loop in posix_locks_deadlock() eventually completes.
*
* Note: the above assumption may not be true when handling lock
* requests from a broken NFS client. It may also fail in the presence
* of tasks (such as posix threads) sharing the same open file table.
*
* To handle those cases, we just bail out after a few iterations.
*/
#define MAX_DEADLK_ITERATIONS 10
/* Find a lock that the owner of the given block_fl is blocking on. */
static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
{
struct file_lock *fl;
list_for_each_entry(fl, &blocked_list, fl_link) {
if (posix_same_owner(fl, block_fl))
return fl->fl_next;
}
return NULL;
}
static int posix_locks_deadlock(struct file_lock *caller_fl,
struct file_lock *block_fl)
{
int i = 0;
while ((block_fl = what_owner_is_waiting_for(block_fl))) {
if (i++ > MAX_DEADLK_ITERATIONS)
return 0;
if (posix_same_owner(caller_fl, block_fl))
return 1;
}
return 0;
}
/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
* after any leases, but before any posix locks.
*
* Note that if called with an FL_EXISTS argument, the caller may determine
* whether or not a lock was successfully freed by testing the return
* value for -ENOENT.
*/
static int flock_lock_file(struct file *filp, struct file_lock *request)
{
struct file_lock *new_fl = NULL;
struct file_lock **before;
struct inode * inode = filp->f_path.dentry->d_inode;
int error = 0;
int found = 0;
lock_kernel();
if (request->fl_flags & FL_ACCESS)
goto find_conflict;
if (request->fl_type != F_UNLCK) {
error = -ENOMEM;
new_fl = locks_alloc_lock();
if (new_fl == NULL)
goto out;
error = 0;
}
for_each_lock(inode, before) {
struct file_lock *fl = *before;
if (IS_POSIX(fl))
break;
if (IS_LEASE(fl))
continue;
if (filp != fl->fl_file)
continue;
if (request->fl_type == fl->fl_type)
goto out;
found = 1;
locks_delete_lock(before);
break;
}
if (request->fl_type == F_UNLCK) {
if ((request->fl_flags & FL_EXISTS) && !found)
error = -ENOENT;
goto out;
}
/*
* If a higher-priority process was blocked on the old file lock,
* give it the opportunity to lock the file.
*/
if (found)
cond_resched_bkl();
find_conflict:
for_each_lock(inode, before) {
struct file_lock *fl = *before;
if (IS_POSIX(fl))
break;
if (IS_LEASE(fl))
continue;
if (!flock_locks_conflict(request, fl))
continue;
error = -EAGAIN;
if (!(request->fl_flags & FL_SLEEP))
goto out;
error = FILE_LOCK_DEFERRED;
locks_insert_block(fl, request);
goto out;
}
if (request->fl_flags & FL_ACCESS)
goto out;
locks_copy_lock(new_fl, request);
locks_insert_lock(before, new_fl);
new_fl = NULL;
error = 0;
out:
unlock_kernel();
if (new_fl)
locks_free_lock(new_fl);
return error;
}
static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
{
struct file_lock *fl;
struct file_lock *new_fl = NULL;
struct file_lock *new_fl2 = NULL;
struct file_lock *left = NULL;
struct file_lock *right = NULL;
struct file_lock **before;
int error, added = 0;
/*
* We may need two file_lock structures for this operation,
* so we get them in advance to avoid races.
*
* In some cases we can be sure, that no new locks will be needed
*/
if (!(request->fl_flags & FL_ACCESS) &&
(request->fl_type != F_UNLCK ||
request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
new_fl = locks_alloc_lock();
new_fl2 = locks_alloc_lock();
}
lock_kernel();
if (request->fl_type != F_UNLCK) {
for_each_lock(inode, before) {
fl = *before;
if (!IS_POSIX(fl))
continue;
if (!posix_locks_conflict(request, fl))
continue;
if (conflock)
__locks_copy_lock(conflock, fl);
error = -EAGAIN;
if (!(request->fl_flags & FL_SLEEP))
goto out;
error = -EDEADLK;
if (posix_locks_deadlock(request, fl))
goto out;
error = FILE_LOCK_DEFERRED;
locks_insert_block(fl, request);
goto out;
}
}
/* If we're just looking for a conflict, we're done. */
error = 0;
if (request->fl_flags & FL_ACCESS)
goto out;
/*
* Find the first old lock with the same owner as the new lock.
*/
before = &inode->i_flock;
/* First skip locks owned by other processes. */
while ((fl = *before) && (!IS_POSIX(fl) ||
!posix_same_owner(request, fl))) {
before = &fl->fl_next;
}
/* Process locks with this owner. */
while ((fl = *before) && posix_same_owner(request, fl)) {
/* Detect adjacent or overlapping regions (if same lock type)
*/
if (request->fl_type == fl->fl_type) {
/* In all comparisons of start vs end, use
* "start - 1" rather than "end + 1". If end
* is OFFSET_MAX, end + 1 will become negative.
*/
if (fl->fl_end < request->fl_start - 1)
goto next_lock;
/* If the next lock in the list has entirely bigger
* addresses than the new one, insert the lock here.
*/
if (fl->fl_start - 1 > request->fl_end)
break;
/* If we come here, the new and old lock are of the
* same type and adjacent or overlapping. Make one
* lock yielding from the lower start address of both
* locks to the higher end address.
*/
if (fl->fl_start > request->fl_start)
fl->fl_start = request->fl_start;
else
request->fl_start = fl->fl_start;
if (fl->fl_end < request->fl_end)
fl->fl_end = request->fl_end;
else
request->fl_end = fl->fl_end;
if (added) {
locks_delete_lock(before);
continue;
}
request = fl;
added = 1;
}
else {
/* Processing for different lock types is a bit
* more complex.
*/
if (fl->fl_end < request->fl_start)
goto next_lock;
if (fl->fl_start > request->fl_end)
break;
if (request->fl_type == F_UNLCK)
added = 1;
if (fl->fl_start < request->fl_start)
left = fl;
/* If the next lock in the list has a higher end
* address than the new one, insert the new one here.
*/
if (fl->fl_end > request->fl_end) {
right = fl;
break;
}
if (fl->fl_start >= request->fl_start) {
/* The new lock completely replaces an old
* one (This may happen several times).
*/
if (added) {
locks_delete_lock(before);
continue;
}
/* Replace the old lock with the new one.
* Wake up anybody waiting for the old one,
* as the change in lock type might satisfy
* their needs.
*/
locks_wake_up_blocks(fl);
fl->fl_start = request->fl_start;
fl->fl_end = request->fl_end;
fl->fl_type = request->fl_type;
locks_release_private(fl);
locks_copy_private(fl, request);
request = fl;
added = 1;
}
}
/* Go on to next lock.
*/
next_lock:
before = &fl->fl_next;
}
/*
* The above code only modifies existing locks in case of
* merging or replacing. If new lock(s) need to be inserted
* all modifications are done bellow this, so it's safe yet to
* bail out.
*/
error = -ENOLCK; /* "no luck" */
if (right && left == right && !new_fl2)
goto out;
error = 0;
if (!added) {
if (request->fl_type == F_UNLCK) {
if (request->fl_flags & FL_EXISTS)
error = -ENOENT;
goto out;
}
if (!new_fl) {
error = -ENOLCK;
goto out;
}
locks_copy_lock(new_fl, request);
locks_insert_lock(before, new_fl);
new_fl = NULL;
}
if (right) {
if (left == right) {
/* The new lock breaks the old one in two pieces,
* so we have to use the second new lock.
*/
left = new_fl2;
new_fl2 = NULL;
locks_copy_lock(left, right);
locks_insert_lock(before, left);
}
right->fl_start = request->fl_end + 1;
locks_wake_up_blocks(right);
}
if (left) {
left->fl_end = request->fl_start - 1;
locks_wake_up_blocks(left);
}
out:
unlock_kernel();
/*
* Free any unused locks.
*/
if (new_fl)
locks_free_lock(new_fl);
if (new_fl2)
locks_free_lock(new_fl2);
return error;
}
/**
* posix_lock_file - Apply a POSIX-style lock to a file
* @filp: The file to apply the lock to
* @fl: The lock to be applied
* @conflock: Place to return a copy of the conflicting lock, if found.
*
* Add a POSIX style lock to a file.
* We merge adjacent & overlapping locks whenever possible.
* POSIX locks are sorted by owner task, then by starting address
*
* Note that if called with an FL_EXISTS argument, the caller may determine
* whether or not a lock was successfully freed by testing the return
* value for -ENOENT.
*/
int posix_lock_file(struct file *filp, struct file_lock *fl,
struct file_lock *conflock)
{
return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
}
EXPORT_SYMBOL(posix_lock_file);
/**
* posix_lock_file_wait - Apply a POSIX-style lock to a file
* @filp: The file to apply the lock to
* @fl: The lock to be applied
*
* Add a POSIX style lock to a file.
* We merge adjacent & overlapping locks whenever possible.
* POSIX locks are sorted by owner task, then by starting address
*/
int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
{
int error;
might_sleep ();
for (;;) {
error = posix_lock_file(filp, fl, NULL);
if (error != FILE_LOCK_DEFERRED)
break;
error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
if (!error)
continue;
locks_delete_block(fl);
break;
}
return error;
}
EXPORT_SYMBOL(posix_lock_file_wait);
/**
* locks_mandatory_locked - Check for an active lock
* @inode: the file to check
*
* Searches the inode's list of locks to find any POSIX locks which conflict.
* This function is called from locks_verify_locked() only.
*/
int locks_mandatory_locked(struct inode *inode)
{
fl_owner_t owner = current->files;
struct file_lock *fl;
/*
* Search the lock list for this inode for any POSIX locks.
*/
lock_kernel();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (!IS_POSIX(fl))
continue;
if (fl->fl_owner != owner)
break;
}
unlock_kernel();
return fl ? -EAGAIN : 0;
}
/**
* locks_mandatory_area - Check for a conflicting lock
* @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
* for shared
* @inode: the file to check
* @filp: how the file was opened (if it was)
* @offset: start of area to check
* @count: length of area to check
*
* Searches the inode's list of locks to find any POSIX locks which conflict.
* This function is called from rw_verify_area() and
* locks_verify_truncate().
*/
int locks_mandatory_area(int read_write, struct inode *inode,
struct file *filp, loff_t offset,
size_t count)
{
struct file_lock fl;
int error;
locks_init_lock(&fl);
fl.fl_owner = current->files;
fl.fl_pid = current->tgid;
fl.fl_file = filp;
fl.fl_flags = FL_POSIX | FL_ACCESS;
if (filp && !(filp->f_flags & O_NONBLOCK))
fl.fl_flags |= FL_SLEEP;
fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
fl.fl_start = offset;
fl.fl_end = offset + count - 1;
for (;;) {
error = __posix_lock_file(inode, &fl, NULL);
if (error != FILE_LOCK_DEFERRED)
break;
error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
if (!error) {
/*
* If we've been sleeping someone might have
* changed the permissions behind our back.
*/
if (__mandatory_lock(inode))
continue;
}
locks_delete_block(&fl);
break;
}
return error;
}
EXPORT_SYMBOL(locks_mandatory_area);
/* We already had a lease on this file; just change its type */
int lease_modify(struct file_lock **before, int arg)
{
struct file_lock *fl = *before;
int error = assign_type(fl, arg);
if (error)
return error;
locks_wake_up_blocks(fl);
if (arg == F_UNLCK)
locks_delete_lock(before);
return 0;
}
EXPORT_SYMBOL(lease_modify);
static void time_out_leases(struct inode *inode)
{
struct file_lock **before;
struct file_lock *fl;
before = &inode->i_flock;
while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
if ((fl->fl_break_time == 0)
|| time_before(jiffies, fl->fl_break_time)) {
before = &fl->fl_next;
continue;
}
lease_modify(before, fl->fl_type & ~F_INPROGRESS);
if (fl == *before) /* lease_modify may have freed fl */
before = &fl->fl_next;
}
}
/**
* __break_lease - revoke all outstanding leases on file
* @inode: the inode of the file to return
* @mode: the open mode (read or write)
*
* break_lease (inlined for speed) has checked there already is at least
* some kind of lock (maybe a lease) on this file. Leases are broken on
* a call to open() or truncate(). This function can sleep unless you
* specified %O_NONBLOCK to your open().
*/
int __break_lease(struct inode *inode, unsigned int mode)
{
int error = 0, future;
struct file_lock *new_fl, *flock;
struct file_lock *fl;
unsigned long break_time;
int i_have_this_lease = 0;
new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK);
lock_kernel();
time_out_leases(inode);
flock = inode->i_flock;
if ((flock == NULL) || !IS_LEASE(flock))
goto out;
for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
if (fl->fl_owner == current->files)
i_have_this_lease = 1;
if (mode & FMODE_WRITE) {
/* If we want write access, we have to revoke any lease. */
future = F_UNLCK | F_INPROGRESS;
} else if (flock->fl_type & F_INPROGRESS) {
/* If the lease is already being broken, we just leave it */
future = flock->fl_type;
} else if (flock->fl_type & F_WRLCK) {
/* Downgrade the exclusive lease to a read-only lease. */
future = F_RDLCK | F_INPROGRESS;
} else {
/* the existing lease was read-only, so we can read too. */
goto out;
}
if (IS_ERR(new_fl) && !i_have_this_lease
&& ((mode & O_NONBLOCK) == 0)) {
error = PTR_ERR(new_fl);
goto out;
}
break_time = 0;
if (lease_break_time > 0) {
break_time = jiffies + lease_break_time * HZ;
if (break_time == 0)
break_time++; /* so that 0 means no break time */
}
for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
if (fl->fl_type != future) {
fl->fl_type = future;
fl->fl_break_time = break_time;
/* lease must have lmops break callback */
fl->fl_lmops->fl_break(fl);
}
}
if (i_have_this_lease || (mode & O_NONBLOCK)) {
error = -EWOULDBLOCK;
goto out;
}
restart:
break_time = flock->fl_break_time;
if (break_time != 0) {
break_time -= jiffies;
if (break_time == 0)
break_time++;
}
locks_insert_block(flock, new_fl);
error = wait_event_interruptible_timeout(new_fl->fl_wait,
!new_fl->fl_next, break_time);
__locks_delete_block(new_fl);
if (error >= 0) {
if (error == 0)
time_out_leases(inode);
/* Wait for the next lease that has not been broken yet */
for (flock = inode->i_flock; flock && IS_LEASE(flock);
flock = flock->fl_next) {
if (flock->fl_type & F_INPROGRESS)
goto restart;
}
error = 0;
}
out:
unlock_kernel();
if (!IS_ERR(new_fl))
locks_free_lock(new_fl);
return error;
}
EXPORT_SYMBOL(__break_lease);
/**
* lease_get_mtime - get the last modified time of an inode
* @inode: the inode
* @time: pointer to a timespec which will contain the last modified time
*
* This is to force NFS clients to flush their caches for files with
* exclusive leases. The justification is that if someone has an
* exclusive lease, then they could be modifying it.
*/
void lease_get_mtime(struct inode *inode, struct timespec *time)
{
struct file_lock *flock = inode->i_flock;
if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
*time = current_fs_time(inode->i_sb);
else
*time = inode->i_mtime;
}
EXPORT_SYMBOL(lease_get_mtime);
/**
* fcntl_getlease - Enquire what lease is currently active
* @filp: the file
*
* The value returned by this function will be one of
* (if no lease break is pending):
*
* %F_RDLCK to indicate a shared lease is held.
*
* %F_WRLCK to indicate an exclusive lease is held.
*
* %F_UNLCK to indicate no lease is held.
*
* (if a lease break is pending):
*
* %F_RDLCK to indicate an exclusive lease needs to be
* changed to a shared lease (or removed).
*
* %F_UNLCK to indicate the lease needs to be removed.
*
* XXX: sfr & willy disagree over whether F_INPROGRESS
* should be returned to userspace.
*/
int fcntl_getlease(struct file *filp)
{
struct file_lock *fl;
int type = F_UNLCK;
lock_kernel();
time_out_leases(filp->f_path.dentry->d_inode);
for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
fl = fl->fl_next) {
if (fl->fl_file == filp) {
type = fl->fl_type & ~F_INPROGRESS;
break;
}
}
unlock_kernel();
return type;
}
/**
* generic_setlease - sets a lease on an open file
* @filp: file pointer
* @arg: type of lease to obtain
* @flp: input - file_lock to use, output - file_lock inserted
*
* The (input) flp->fl_lmops->fl_break function is required
* by break_lease().
*
* Called with kernel lock held.
*/
int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
{
struct file_lock *fl, **before, **my_before = NULL, *lease;
struct file_lock *new_fl = NULL;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
int error, rdlease_count = 0, wrlease_count = 0;
if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
return -EACCES;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
error = security_file_lock(filp, arg);
if (error)
return error;
time_out_leases(inode);
BUG_ON(!(*flp)->fl_lmops->fl_break);
lease = *flp;
if (arg != F_UNLCK) {
error = -ENOMEM;
new_fl = locks_alloc_lock();
if (new_fl == NULL)
goto out;
error = -EAGAIN;
if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
goto out;
if ((arg == F_WRLCK)
&& ((atomic_read(&dentry->d_count) > 1)
|| (atomic_read(&inode->i_count) > 1)))
goto out;
}
/*
* At this point, we know that if there is an exclusive
* lease on this file, then we hold it on this filp
* (otherwise our open of this file would have blocked).
* And if we are trying to acquire an exclusive lease,
* then the file is not open by anyone (including us)
* except for this filp.
*/
for (before = &inode->i_flock;
((fl = *before) != NULL) && IS_LEASE(fl);
before = &fl->fl_next) {
if (lease->fl_lmops->fl_mylease(fl, lease))
my_before = before;
else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
/*
* Someone is in the process of opening this
* file for writing so we may not take an
* exclusive lease on it.
*/
wrlease_count++;
else
rdlease_count++;
}
error = -EAGAIN;
if ((arg == F_RDLCK && (wrlease_count > 0)) ||
(arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
goto out;
if (my_before != NULL) {
*flp = *my_before;
error = lease->fl_lmops->fl_change(my_before, arg);
goto out;
}
error = 0;
if (arg == F_UNLCK)
goto out;
error = -EINVAL;
if (!leases_enable)
goto out;
locks_copy_lock(new_fl, lease);
locks_insert_lock(before, new_fl);
*flp = new_fl;
return 0;
out:
if (new_fl != NULL)
locks_free_lock(new_fl);
return error;
}
EXPORT_SYMBOL(generic_setlease);
/**
* vfs_setlease - sets a lease on an open file
* @filp: file pointer
* @arg: type of lease to obtain
* @lease: file_lock to use
*
* Call this to establish a lease on the file.
* The (*lease)->fl_lmops->fl_break operation must be set; if not,
* break_lease will oops!
*
* This will call the filesystem's setlease file method, if
* defined. Note that there is no getlease method; instead, the
* filesystem setlease method should call back to setlease() to
* add a lease to the inode's lease list, where fcntl_getlease() can
* find it. Since fcntl_getlease() only reports whether the current
* task holds a lease, a cluster filesystem need only do this for
* leases held by processes on this node.
*
* There is also no break_lease method; filesystems that
* handle their own leases shoud break leases themselves from the
* filesystem's open, create, and (on truncate) setattr methods.
*
* Warning: the only current setlease methods exist only to disable
* leases in certain cases. More vfs changes may be required to
* allow a full filesystem lease implementation.
*/
int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
{
int error;
lock_kernel();
if (filp->f_op && filp->f_op->setlease)
error = filp->f_op->setlease(filp, arg, lease);
else
error = generic_setlease(filp, arg, lease);
unlock_kernel();
return error;
}
EXPORT_SYMBOL_GPL(vfs_setlease);
/**
* fcntl_setlease - sets a lease on an open file
* @fd: open file descriptor
* @filp: file pointer
* @arg: type of lease to obtain
*
* Call this fcntl to establish a lease on the file.
* Note that you also need to call %F_SETSIG to
* receive a signal when the lease is broken.
*/
int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
struct file_lock fl, *flp = &fl;
struct inode *inode = filp->f_path.dentry->d_inode;
int error;
locks_init_lock(&fl);
error = lease_init(filp, arg, &fl);
if (error)
return error;
lock_kernel();
error = vfs_setlease(filp, arg, &flp);
if (error || arg == F_UNLCK)
goto out_unlock;
error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
if (error < 0) {
/* remove lease just inserted by setlease */
flp->fl_type = F_UNLCK | F_INPROGRESS;
flp->fl_break_time = jiffies - 10;
time_out_leases(inode);
goto out_unlock;
}
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
out_unlock:
unlock_kernel();
return error;
}
/**
* flock_lock_file_wait - Apply a FLOCK-style lock to a file
* @filp: The file to apply the lock to
* @fl: The lock to be applied
*
* Add a FLOCK style lock to a file.
*/
int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
{
int error;
might_sleep();
for (;;) {
error = flock_lock_file(filp, fl);
if (error != FILE_LOCK_DEFERRED)
break;
error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
if (!error)
continue;
locks_delete_block(fl);
break;
}
return error;
}
EXPORT_SYMBOL(flock_lock_file_wait);
/**
* sys_flock: - flock() system call.
* @fd: the file descriptor to lock.
* @cmd: the type of lock to apply.
*
* Apply a %FL_FLOCK style lock to an open file descriptor.
* The @cmd can be one of
*
* %LOCK_SH -- a shared lock.
*
* %LOCK_EX -- an exclusive lock.
*
* %LOCK_UN -- remove an existing lock.
*
* %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes.
*
* %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
* processes read and write access respectively.
*/
SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
{
struct file *filp;
struct file_lock *lock;
int can_sleep, unlock;
int error;
error = -EBADF;
filp = fget(fd);
if (!filp)
goto out;
can_sleep = !(cmd & LOCK_NB);
cmd &= ~LOCK_NB;
unlock = (cmd == LOCK_UN);
if (!unlock && !(cmd & LOCK_MAND) &&
!(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
goto out_putf;
error = flock_make_lock(filp, &lock, cmd);
if (error)
goto out_putf;
if (can_sleep)
lock->fl_flags |= FL_SLEEP;
error = security_file_lock(filp, cmd);
if (error)
goto out_free;
if (filp->f_op && filp->f_op->flock)
error = filp->f_op->flock(filp,
(can_sleep) ? F_SETLKW : F_SETLK,
lock);
else
error = flock_lock_file_wait(filp, lock);
out_free:
locks_free_lock(lock);
out_putf:
fput(filp);
out:
return error;
}
/**
* vfs_test_lock - test file byte range lock
* @filp: The file to test lock for
* @fl: The lock to test; also used to hold result
*
* Returns -ERRNO on failure. Indicates presence of conflicting lock by
* setting conf->fl_type to something other than F_UNLCK.
*/
int vfs_test_lock(struct file *filp, struct file_lock *fl)
{
if (filp->f_op && filp->f_op->lock)
return filp->f_op->lock(filp, F_GETLK, fl);
posix_test_lock(filp, fl);
return 0;
}
EXPORT_SYMBOL_GPL(vfs_test_lock);
static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
{
flock->l_pid = fl->fl_pid;
#if BITS_PER_LONG == 32
/*
* Make sure we can represent the posix lock via
* legacy 32bit flock.
*/
if (fl->fl_start > OFFT_OFFSET_MAX)
return -EOVERFLOW;
if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
return -EOVERFLOW;
#endif
flock->l_start = fl->fl_start;
flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
fl->fl_end - fl->fl_start + 1;
flock->l_whence = 0;
flock->l_type = fl->fl_type;
return 0;
}
#if BITS_PER_LONG == 32
static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
{
flock->l_pid = fl->fl_pid;
flock->l_start = fl->fl_start;
flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
fl->fl_end - fl->fl_start + 1;
flock->l_whence = 0;
flock->l_type = fl->fl_type;
}
#endif
/* Report the first existing lock that would conflict with l.
* This implements the F_GETLK command of fcntl().
*/
int fcntl_getlk(struct file *filp, struct flock __user *l)
{
struct file_lock file_lock;
struct flock flock;
int error;
error = -EFAULT;
if (copy_from_user(&flock, l, sizeof(flock)))
goto out;
error = -EINVAL;
if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
goto out;
error = flock_to_posix_lock(filp, &file_lock, &flock);
if (error)
goto out;
error = vfs_test_lock(filp, &file_lock);
if (error)
goto out;
flock.l_type = file_lock.fl_type;
if (file_lock.fl_type != F_UNLCK) {
error = posix_lock_to_flock(&flock, &file_lock);
if (error)
goto out;
}
error = -EFAULT;
if (!copy_to_user(l, &flock, sizeof(flock)))
error = 0;
out:
return error;
}
/**
* vfs_lock_file - file byte range lock
* @filp: The file to apply the lock to
* @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
* @fl: The lock to be applied
* @conf: Place to return a copy of the conflicting lock, if found.
*
* A caller that doesn't care about the conflicting lock may pass NULL
* as the final argument.
*
* If the filesystem defines a private ->lock() method, then @conf will
* be left unchanged; so a caller that cares should initialize it to
* some acceptable default.
*
* To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
* locks, the ->lock() interface may return asynchronously, before the lock has
* been granted or denied by the underlying filesystem, if (and only if)
* fl_grant is set. Callers expecting ->lock() to return asynchronously
* will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
* the request is for a blocking lock. When ->lock() does return asynchronously,
* it must return FILE_LOCK_DEFERRED, and call ->fl_grant() when the lock
* request completes.
* If the request is for non-blocking lock the file system should return
* FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
* with the result. If the request timed out the callback routine will return a
* nonzero return code and the file system should release the lock. The file
* system is also responsible to keep a corresponding posix lock when it
* grants a lock so the VFS can find out which locks are locally held and do
* the correct lock cleanup when required.
* The underlying filesystem must not drop the kernel lock or call
* ->fl_grant() before returning to the caller with a FILE_LOCK_DEFERRED
* return code.
*/
int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
{
if (filp->f_op && filp->f_op->lock)
return filp->f_op->lock(filp, cmd, fl);
else
return posix_lock_file(filp, fl, conf);
}
EXPORT_SYMBOL_GPL(vfs_lock_file);
static int do_lock_file_wait(struct file *filp, unsigned int cmd,
struct file_lock *fl)
{
int error;
error = security_file_lock(filp, fl->fl_type);
if (error)
return error;
for (;;) {
error = vfs_lock_file(filp, cmd, fl, NULL);
if (error != FILE_LOCK_DEFERRED)
break;
error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
if (!error)
continue;
locks_delete_block(fl);
break;
}
return error;
}
/* Apply the lock described by l to an open file descriptor.
* This implements both the F_SETLK and F_SETLKW commands of fcntl().
*/
int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
struct flock __user *l)
{
struct file_lock *file_lock = locks_alloc_lock();
struct flock flock;
struct inode *inode;
struct file *f;
int error;
if (file_lock == NULL)
return -ENOLCK;
/*
* This might block, so we do it before checking the inode.
*/
error = -EFAULT;
if (copy_from_user(&flock, l, sizeof(flock)))
goto out;
inode = filp->f_path.dentry->d_inode;
/* Don't allow mandatory locks on files that may be memory mapped
* and shared.
*/
if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
error = -EAGAIN;
goto out;
}
again:
error = flock_to_posix_lock(filp, file_lock, &flock);
if (error)
goto out;
if (cmd == F_SETLKW) {
file_lock->fl_flags |= FL_SLEEP;
}
error = -EBADF;
switch (flock.l_type) {
case F_RDLCK:
if (!(filp->f_mode & FMODE_READ))
goto out;
break;
case F_WRLCK:
if (!(filp->f_mode & FMODE_WRITE))
goto out;
break;
case F_UNLCK:
break;
default:
error = -EINVAL;
goto out;
}
error = do_lock_file_wait(filp, cmd, file_lock);
/*
* Attempt to detect a close/fcntl race and recover by
* releasing the lock that was just acquired.
*/
/*
* we need that spin_lock here - it prevents reordering between
* update of inode->i_flock and check for it done in close().
* rcu_read_lock() wouldn't do.
*/
spin_lock(¤t->files->file_lock);
f = fcheck(fd);
spin_unlock(¤t->files->file_lock);
if (!error && f != filp && flock.l_type != F_UNLCK) {
flock.l_type = F_UNLCK;
goto again;
}
out:
locks_free_lock(file_lock);
return error;
}
#if BITS_PER_LONG == 32
/* Report the first existing lock that would conflict with l.
* This implements the F_GETLK command of fcntl().
*/
int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
{
struct file_lock file_lock;
struct flock64 flock;
int error;
error = -EFAULT;
if (copy_from_user(&flock, l, sizeof(flock)))
goto out;
error = -EINVAL;
if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
goto out;
error = flock64_to_posix_lock(filp, &file_lock, &flock);
if (error)
goto out;
error = vfs_test_lock(filp, &file_lock);
if (error)
goto out;
flock.l_type = file_lock.fl_type;
if (file_lock.fl_type != F_UNLCK)
posix_lock_to_flock64(&flock, &file_lock);
error = -EFAULT;
if (!copy_to_user(l, &flock, sizeof(flock)))
error = 0;
out:
return error;
}
/* Apply the lock described by l to an open file descriptor.
* This implements both the F_SETLK and F_SETLKW commands of fcntl().
*/
int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
struct flock64 __user *l)
{
struct file_lock *file_lock = locks_alloc_lock();
struct flock64 flock;
struct inode *inode;
struct file *f;
int error;
if (file_lock == NULL)
return -ENOLCK;
/*
* This might block, so we do it before checking the inode.
*/
error = -EFAULT;
if (copy_from_user(&flock, l, sizeof(flock)))
goto out;
inode = filp->f_path.dentry->d_inode;
/* Don't allow mandatory locks on files that may be memory mapped
* and shared.
*/
if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
error = -EAGAIN;
goto out;
}
again:
error = flock64_to_posix_lock(filp, file_lock, &flock);
if (error)
goto out;
if (cmd == F_SETLKW64) {
file_lock->fl_flags |= FL_SLEEP;
}
error = -EBADF;
switch (flock.l_type) {
case F_RDLCK:
if (!(filp->f_mode & FMODE_READ))
goto out;
break;
case F_WRLCK:
if (!(filp->f_mode & FMODE_WRITE))
goto out;
break;
case F_UNLCK:
break;
default:
error = -EINVAL;
goto out;
}
error = do_lock_file_wait(filp, cmd, file_lock);
/*
* Attempt to detect a close/fcntl race and recover by
* releasing the lock that was just acquired.
*/
spin_lock(¤t->files->file_lock);
f = fcheck(fd);
spin_unlock(¤t->files->file_lock);
if (!error && f != filp && flock.l_type != F_UNLCK) {
flock.l_type = F_UNLCK;
goto again;
}
out:
locks_free_lock(file_lock);
return error;
}
#endif /* BITS_PER_LONG == 32 */
/*
* This function is called when the file is being removed
* from the task's fd array. POSIX locks belonging to this task
* are deleted at this time.
*/
void locks_remove_posix(struct file *filp, fl_owner_t owner)
{
struct file_lock lock;
/*
* If there are no locks held on this file, we don't need to call
* posix_lock_file(). Another process could be setting a lock on this
* file at the same time, but we wouldn't remove that lock anyway.
*/
if (!filp->f_path.dentry->d_inode->i_flock)
return;
lock.fl_type = F_UNLCK;
lock.fl_flags = FL_POSIX | FL_CLOSE;
lock.fl_start = 0;
lock.fl_end = OFFSET_MAX;
lock.fl_owner = owner;
lock.fl_pid = current->tgid;
lock.fl_file = filp;
lock.fl_ops = NULL;
lock.fl_lmops = NULL;
vfs_lock_file(filp, F_SETLK, &lock, NULL);
if (lock.fl_ops && lock.fl_ops->fl_release_private)
lock.fl_ops->fl_release_private(&lock);
}
EXPORT_SYMBOL(locks_remove_posix);
/*
* This function is called on the last close of an open file.
*/
void locks_remove_flock(struct file *filp)
{
struct inode * inode = filp->f_path.dentry->d_inode;
struct file_lock *fl;
struct file_lock **before;
if (!inode->i_flock)
return;
if (filp->f_op && filp->f_op->flock) {
struct file_lock fl = {
.fl_pid = current->tgid,
.fl_file = filp,
.fl_flags = FL_FLOCK,
.fl_type = F_UNLCK,
.fl_end = OFFSET_MAX,
};
filp->f_op->flock(filp, F_SETLKW, &fl);
if (fl.fl_ops && fl.fl_ops->fl_release_private)
fl.fl_ops->fl_release_private(&fl);
}
lock_kernel();
before = &inode->i_flock;
while ((fl = *before) != NULL) {
if (fl->fl_file == filp) {
if (IS_FLOCK(fl)) {
locks_delete_lock(before);
continue;
}
if (IS_LEASE(fl)) {
lease_modify(before, F_UNLCK);
continue;
}
/* What? */
BUG();
}
before = &fl->fl_next;
}
unlock_kernel();
}
/**
* posix_unblock_lock - stop waiting for a file lock
* @filp: how the file was opened
* @waiter: the lock which was waiting
*
* lockd needs to block waiting for locks.
*/
int
posix_unblock_lock(struct file *filp, struct file_lock *waiter)
{
int status = 0;
lock_kernel();
if (waiter->fl_next)
__locks_delete_block(waiter);
else
status = -ENOENT;
unlock_kernel();
return status;
}
EXPORT_SYMBOL(posix_unblock_lock);
/**
* vfs_cancel_lock - file byte range unblock lock
* @filp: The file to apply the unblock to
* @fl: The lock to be unblocked
*
* Used by lock managers to cancel blocked requests
*/
int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
{
if (filp->f_op && filp->f_op->lock)
return filp->f_op->lock(filp, F_CANCELLK, fl);
return 0;
}
EXPORT_SYMBOL_GPL(vfs_cancel_lock);
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
static void lock_get_status(struct seq_file *f, struct file_lock *fl,
int id, char *pfx)
{
struct inode *inode = NULL;
unsigned int fl_pid;
if (fl->fl_nspid)
fl_pid = pid_vnr(fl->fl_nspid);
else
fl_pid = fl->fl_pid;
if (fl->fl_file != NULL)
inode = fl->fl_file->f_path.dentry->d_inode;
seq_printf(f, "%d:%s ", id, pfx);
if (IS_POSIX(fl)) {
seq_printf(f, "%6s %s ",
(fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
(inode == NULL) ? "*NOINODE*" :
mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
} else if (IS_FLOCK(fl)) {
if (fl->fl_type & LOCK_MAND) {
seq_printf(f, "FLOCK MSNFS ");
} else {
seq_printf(f, "FLOCK ADVISORY ");
}
} else if (IS_LEASE(fl)) {
seq_printf(f, "LEASE ");
if (fl->fl_type & F_INPROGRESS)
seq_printf(f, "BREAKING ");
else if (fl->fl_file)
seq_printf(f, "ACTIVE ");
else
seq_printf(f, "BREAKER ");
} else {
seq_printf(f, "UNKNOWN UNKNOWN ");
}
if (fl->fl_type & LOCK_MAND) {
seq_printf(f, "%s ",
(fl->fl_type & LOCK_READ)
? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
: (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
} else {
seq_printf(f, "%s ",
(fl->fl_type & F_INPROGRESS)
? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
: (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
}
if (inode) {
#ifdef WE_CAN_BREAK_LSLK_NOW
seq_printf(f, "%d %s:%ld ", fl_pid,
inode->i_sb->s_id, inode->i_ino);
#else
/* userspace relies on this representation of dev_t ;-( */
seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
MAJOR(inode->i_sb->s_dev),
MINOR(inode->i_sb->s_dev), inode->i_ino);
#endif
} else {
seq_printf(f, "%d <none>:0 ", fl_pid);
}
if (IS_POSIX(fl)) {
if (fl->fl_end == OFFSET_MAX)
seq_printf(f, "%Ld EOF\n", fl->fl_start);
else
seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
} else {
seq_printf(f, "0 EOF\n");
}
}
static int locks_show(struct seq_file *f, void *v)
{
struct file_lock *fl, *bfl;
fl = list_entry(v, struct file_lock, fl_link);
lock_get_status(f, fl, (long)f->private, "");
list_for_each_entry(bfl, &fl->fl_block, fl_block)
lock_get_status(f, bfl, (long)f->private, " ->");
f->private++;
return 0;
}
static void *locks_start(struct seq_file *f, loff_t *pos)
{
lock_kernel();
f->private = (void *)1;
return seq_list_start(&file_lock_list, *pos);
}
static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
{
return seq_list_next(v, &file_lock_list, pos);
}
static void locks_stop(struct seq_file *f, void *v)
{
unlock_kernel();
}
static const struct seq_operations locks_seq_operations = {
.start = locks_start,
.next = locks_next,
.stop = locks_stop,
.show = locks_show,
};
static int locks_open(struct inode *inode, struct file *filp)
{
return seq_open(filp, &locks_seq_operations);
}
static const struct file_operations proc_locks_operations = {
.open = locks_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int __init proc_locks_init(void)
{
proc_create("locks", 0, NULL, &proc_locks_operations);
return 0;
}
module_init(proc_locks_init);
#endif
/**
* lock_may_read - checks that the region is free of locks
* @inode: the inode that is being read
* @start: the first byte to read
* @len: the number of bytes to read
*
* Emulates Windows locking requirements. Whole-file
* mandatory locks (share modes) can prohibit a read and
* byte-range POSIX locks can prohibit a read if they overlap.
*
* N.B. this function is only ever called
* from knfsd and ownership of locks is never checked.
*/
int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
{
struct file_lock *fl;
int result = 1;
lock_kernel();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (IS_POSIX(fl)) {
if (fl->fl_type == F_RDLCK)
continue;
if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
continue;
} else if (IS_FLOCK(fl)) {
if (!(fl->fl_type & LOCK_MAND))
continue;
if (fl->fl_type & LOCK_READ)
continue;
} else
continue;
result = 0;
break;
}
unlock_kernel();
return result;
}
EXPORT_SYMBOL(lock_may_read);
/**
* lock_may_write - checks that the region is free of locks
* @inode: the inode that is being written
* @start: the first byte to write
* @len: the number of bytes to write
*
* Emulates Windows locking requirements. Whole-file
* mandatory locks (share modes) can prohibit a write and
* byte-range POSIX locks can prohibit a write if they overlap.
*
* N.B. this function is only ever called
* from knfsd and ownership of locks is never checked.
*/
int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
{
struct file_lock *fl;
int result = 1;
lock_kernel();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (IS_POSIX(fl)) {
if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
continue;
} else if (IS_FLOCK(fl)) {
if (!(fl->fl_type & LOCK_MAND))
continue;
if (fl->fl_type & LOCK_WRITE)
continue;
} else
continue;
result = 0;
break;
}
unlock_kernel();
return result;
}
EXPORT_SYMBOL(lock_may_write);
static int __init filelock_init(void)
{
filelock_cache = kmem_cache_create("file_lock_cache",
sizeof(struct file_lock), 0, SLAB_PANIC,
init_once);
return 0;
}
core_initcall(filelock_init);
| gpl-2.0 |
Jeongseob/page-coloring | arch/um/kernel/ptrace.c | 423 | 4316 | /*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <linux/audit.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/tracehook.h>
#include <asm/uaccess.h>
#include <skas_ptrace.h>
void user_enable_single_step(struct task_struct *child)
{
child->ptrace |= PT_DTRACE;
child->thread.singlestep_syscall = 0;
#ifdef SUBARCH_SET_SINGLESTEPPING
SUBARCH_SET_SINGLESTEPPING(child, 1);
#endif
}
void user_disable_single_step(struct task_struct *child)
{
child->ptrace &= ~PT_DTRACE;
child->thread.singlestep_syscall = 0;
#ifdef SUBARCH_SET_SINGLESTEPPING
SUBARCH_SET_SINGLESTEPPING(child, 0);
#endif
}
/*
* Called by kernel/ptrace.c when detaching..
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}
extern int peek_user(struct task_struct * child, long addr, long data);
extern int poke_user(struct task_struct * child, long addr, long data);
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int i, ret;
unsigned long __user *p = (void __user *)data;
void __user *vp = p;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
ret = peek_user(child, addr, data);
break;
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR:
ret = poke_user(child, addr, data);
break;
case PTRACE_SYSEMU:
case PTRACE_SYSEMU_SINGLESTEP:
ret = -EIO;
break;
#ifdef PTRACE_GETREGS
case PTRACE_GETREGS: { /* Get all gp regs from the child. */
if (!access_ok(VERIFY_WRITE, p, MAX_REG_OFFSET)) {
ret = -EIO;
break;
}
for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
__put_user(getreg(child, i), p);
p++;
}
ret = 0;
break;
}
#endif
#ifdef PTRACE_SETREGS
case PTRACE_SETREGS: { /* Set all gp regs in the child. */
unsigned long tmp = 0;
if (!access_ok(VERIFY_READ, p, MAX_REG_OFFSET)) {
ret = -EIO;
break;
}
for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
__get_user(tmp, p);
putreg(child, i, tmp);
p++;
}
ret = 0;
break;
}
#endif
case PTRACE_GET_THREAD_AREA:
ret = ptrace_get_thread_area(child, addr, vp);
break;
case PTRACE_SET_THREAD_AREA:
ret = ptrace_set_thread_area(child, addr, vp);
break;
case PTRACE_FAULTINFO: {
/*
* Take the info from thread->arch->faultinfo,
* but transfer max. sizeof(struct ptrace_faultinfo).
* On i386, ptrace_faultinfo is smaller!
*/
ret = copy_to_user(p, &child->thread.arch.faultinfo,
sizeof(struct ptrace_faultinfo)) ?
-EIO : 0;
break;
}
#ifdef PTRACE_LDT
case PTRACE_LDT: {
struct ptrace_ldt ldt;
if (copy_from_user(&ldt, p, sizeof(ldt))) {
ret = -EIO;
break;
}
/*
* This one is confusing, so just punt and return -EIO for
* now
*/
ret = -EIO;
break;
}
#endif
default:
ret = ptrace_request(child, request, addr, data);
if (ret == -EIO)
ret = subarch_ptrace(child, request, addr, data);
break;
}
return ret;
}
static void send_sigtrap(struct task_struct *tsk, struct uml_pt_regs *regs,
int error_code)
{
struct siginfo info;
memset(&info, 0, sizeof(info));
info.si_signo = SIGTRAP;
info.si_code = TRAP_BRKPT;
/* User-mode eip? */
info.si_addr = UPT_IS_USER(regs) ? (void __user *) UPT_IP(regs) : NULL;
/* Send us the fake SIGTRAP */
force_sig_info(SIGTRAP, &info, tsk);
}
/*
* XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and
* PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check
*/
void syscall_trace_enter(struct pt_regs *regs)
{
audit_syscall_entry(UPT_SYSCALL_NR(®s->regs),
UPT_SYSCALL_ARG1(®s->regs),
UPT_SYSCALL_ARG2(®s->regs),
UPT_SYSCALL_ARG3(®s->regs),
UPT_SYSCALL_ARG4(®s->regs));
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
tracehook_report_syscall_entry(regs);
}
void syscall_trace_leave(struct pt_regs *regs)
{
int ptraced = current->ptrace;
audit_syscall_exit(regs);
/* Fake a debug trap */
if (ptraced & PT_DTRACE)
send_sigtrap(current, ®s->regs, 0);
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
tracehook_report_syscall_exit(regs, 0);
/* force do_signal() --> is_syscall() */
if (ptraced & PT_PTRACED)
set_thread_flag(TIF_SIGPENDING);
}
| gpl-2.0 |
ashleyjune/SM-G360T1_kernel | fs/nfsd/nfs4xdr.c | 679 | 91570 | /*
* Server-side XDR for NFSv4
*
* Copyright (c) 2002 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <kmsmith@umich.edu>
* Andy Adamson <andros@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* TODO: Neil Brown made the following observation: We currently
* initially reserve NFSD_BUFSIZE space on the transmit queue and
* never release any of that until the request is complete.
* It would be good to calculate a new maximum response size while
* decoding the COMPOUND, and call svc_reserve with this number
* at the end of nfs4svc_decode_compoundargs.
*/
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/statfs.h>
#include <linux/utsname.h>
#include <linux/pagemap.h>
#include <linux/sunrpc/svcauth_gss.h>
#include "idmap.h"
#include "acl.h"
#include "xdr4.h"
#include "vfs.h"
#include "state.h"
#include "cache.h"
#include "netns.h"
#define NFSDDBG_FACILITY NFSDDBG_XDR
/*
* As per referral draft, the fsid for a referral MUST be different from the fsid of the containing
* directory in order to indicate to the client that a filesystem boundary is present
* We use a fixed fsid for a referral
*/
#define NFS4_REFERRAL_FSID_MAJOR 0x8000000ULL
#define NFS4_REFERRAL_FSID_MINOR 0x8000000ULL
static __be32
check_filename(char *str, int len)
{
int i;
if (len == 0)
return nfserr_inval;
if (isdotent(str, len))
return nfserr_badname;
for (i = 0; i < len; i++)
if (str[i] == '/')
return nfserr_badname;
return 0;
}
#define DECODE_HEAD \
__be32 *p; \
__be32 status
#define DECODE_TAIL \
status = 0; \
out: \
return status; \
xdr_error: \
dprintk("NFSD: xdr error (%s:%d)\n", \
__FILE__, __LINE__); \
status = nfserr_bad_xdr; \
goto out
#define READ32(x) (x) = ntohl(*p++)
#define READ64(x) do { \
(x) = (u64)ntohl(*p++) << 32; \
(x) |= ntohl(*p++); \
} while (0)
#define READTIME(x) do { \
p++; \
(x) = ntohl(*p++); \
p++; \
} while (0)
#define READMEM(x,nbytes) do { \
x = (char *)p; \
p += XDR_QUADLEN(nbytes); \
} while (0)
#define SAVEMEM(x,nbytes) do { \
if (!(x = (p==argp->tmp || p == argp->tmpp) ? \
savemem(argp, p, nbytes) : \
(char *)p)) { \
dprintk("NFSD: xdr error (%s:%d)\n", \
__FILE__, __LINE__); \
goto xdr_error; \
} \
p += XDR_QUADLEN(nbytes); \
} while (0)
#define COPYMEM(x,nbytes) do { \
memcpy((x), p, nbytes); \
p += XDR_QUADLEN(nbytes); \
} while (0)
/* READ_BUF, read_buf(): nbytes must be <= PAGE_SIZE */
#define READ_BUF(nbytes) do { \
if (nbytes <= (u32)((char *)argp->end - (char *)argp->p)) { \
p = argp->p; \
argp->p += XDR_QUADLEN(nbytes); \
} else if (!(p = read_buf(argp, nbytes))) { \
dprintk("NFSD: xdr error (%s:%d)\n", \
__FILE__, __LINE__); \
goto xdr_error; \
} \
} while (0)
static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
{
/* We want more bytes than seem to be available.
* Maybe we need a new page, maybe we have just run out
*/
unsigned int avail = (char *)argp->end - (char *)argp->p;
__be32 *p;
if (avail + argp->pagelen < nbytes)
return NULL;
if (avail + PAGE_SIZE < nbytes) /* need more than a page !! */
return NULL;
/* ok, we can do it with the current plus the next page */
if (nbytes <= sizeof(argp->tmp))
p = argp->tmp;
else {
kfree(argp->tmpp);
p = argp->tmpp = kmalloc(nbytes, GFP_KERNEL);
if (!p)
return NULL;
}
/*
* The following memcpy is safe because read_buf is always
* called with nbytes > avail, and the two cases above both
* guarantee p points to at least nbytes bytes.
*/
memcpy(p, argp->p, avail);
/* step to next page */
argp->pagelist++;
argp->p = page_address(argp->pagelist[0]);
if (argp->pagelen < PAGE_SIZE) {
argp->end = argp->p + (argp->pagelen>>2);
argp->pagelen = 0;
} else {
argp->end = argp->p + (PAGE_SIZE>>2);
argp->pagelen -= PAGE_SIZE;
}
memcpy(((char*)p)+avail, argp->p, (nbytes - avail));
argp->p += XDR_QUADLEN(nbytes - avail);
return p;
}
static int zero_clientid(clientid_t *clid)
{
return (clid->cl_boot == 0) && (clid->cl_id == 0);
}
static int
defer_free(struct nfsd4_compoundargs *argp,
void (*release)(const void *), void *p)
{
struct tmpbuf *tb;
tb = kmalloc(sizeof(*tb), GFP_KERNEL);
if (!tb)
return -ENOMEM;
tb->buf = p;
tb->release = release;
tb->next = argp->to_free;
argp->to_free = tb;
return 0;
}
static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes)
{
if (p == argp->tmp) {
p = kmemdup(argp->tmp, nbytes, GFP_KERNEL);
if (!p)
return NULL;
} else {
BUG_ON(p != argp->tmpp);
argp->tmpp = NULL;
}
if (defer_free(argp, kfree, p)) {
kfree(p);
return NULL;
} else
return (char *)p;
}
static __be32
nfsd4_decode_bitmap(struct nfsd4_compoundargs *argp, u32 *bmval)
{
u32 bmlen;
DECODE_HEAD;
bmval[0] = 0;
bmval[1] = 0;
bmval[2] = 0;
READ_BUF(4);
READ32(bmlen);
if (bmlen > 1000)
goto xdr_error;
READ_BUF(bmlen << 2);
if (bmlen > 0)
READ32(bmval[0]);
if (bmlen > 1)
READ32(bmval[1]);
if (bmlen > 2)
READ32(bmval[2]);
DECODE_TAIL;
}
static __be32
nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
struct iattr *iattr, struct nfs4_acl **acl)
{
int expected_len, len = 0;
u32 dummy32;
char *buf;
int host_err;
DECODE_HEAD;
iattr->ia_valid = 0;
if ((status = nfsd4_decode_bitmap(argp, bmval)))
return status;
READ_BUF(4);
READ32(expected_len);
if (bmval[0] & FATTR4_WORD0_SIZE) {
READ_BUF(8);
len += 8;
READ64(iattr->ia_size);
iattr->ia_valid |= ATTR_SIZE;
}
if (bmval[0] & FATTR4_WORD0_ACL) {
u32 nace;
struct nfs4_ace *ace;
READ_BUF(4); len += 4;
READ32(nace);
if (nace > NFS4_ACL_MAX)
return nfserr_resource;
*acl = nfs4_acl_new(nace);
if (*acl == NULL) {
host_err = -ENOMEM;
goto out_nfserr;
}
defer_free(argp, kfree, *acl);
(*acl)->naces = nace;
for (ace = (*acl)->aces; ace < (*acl)->aces + nace; ace++) {
READ_BUF(16); len += 16;
READ32(ace->type);
READ32(ace->flag);
READ32(ace->access_mask);
READ32(dummy32);
READ_BUF(dummy32);
len += XDR_QUADLEN(dummy32) << 2;
READMEM(buf, dummy32);
ace->whotype = nfs4_acl_get_whotype(buf, dummy32);
status = nfs_ok;
if (ace->whotype != NFS4_ACL_WHO_NAMED)
;
else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
status = nfsd_map_name_to_gid(argp->rqstp,
buf, dummy32, &ace->who_gid);
else
status = nfsd_map_name_to_uid(argp->rqstp,
buf, dummy32, &ace->who_uid);
if (status)
return status;
}
} else
*acl = NULL;
if (bmval[1] & FATTR4_WORD1_MODE) {
READ_BUF(4);
len += 4;
READ32(iattr->ia_mode);
iattr->ia_mode &= (S_IFMT | S_IALLUGO);
iattr->ia_valid |= ATTR_MODE;
}
if (bmval[1] & FATTR4_WORD1_OWNER) {
READ_BUF(4);
len += 4;
READ32(dummy32);
READ_BUF(dummy32);
len += (XDR_QUADLEN(dummy32) << 2);
READMEM(buf, dummy32);
if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
return status;
iattr->ia_valid |= ATTR_UID;
}
if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) {
READ_BUF(4);
len += 4;
READ32(dummy32);
READ_BUF(dummy32);
len += (XDR_QUADLEN(dummy32) << 2);
READMEM(buf, dummy32);
if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
return status;
iattr->ia_valid |= ATTR_GID;
}
if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
READ_BUF(4);
len += 4;
READ32(dummy32);
switch (dummy32) {
case NFS4_SET_TO_CLIENT_TIME:
/* We require the high 32 bits of 'seconds' to be 0, and we ignore
all 32 bits of 'nseconds'. */
READ_BUF(12);
len += 12;
READ64(iattr->ia_atime.tv_sec);
READ32(iattr->ia_atime.tv_nsec);
if (iattr->ia_atime.tv_nsec >= (u32)1000000000)
return nfserr_inval;
iattr->ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
break;
case NFS4_SET_TO_SERVER_TIME:
iattr->ia_valid |= ATTR_ATIME;
break;
default:
goto xdr_error;
}
}
if (bmval[1] & FATTR4_WORD1_TIME_MODIFY_SET) {
READ_BUF(4);
len += 4;
READ32(dummy32);
switch (dummy32) {
case NFS4_SET_TO_CLIENT_TIME:
/* We require the high 32 bits of 'seconds' to be 0, and we ignore
all 32 bits of 'nseconds'. */
READ_BUF(12);
len += 12;
READ64(iattr->ia_mtime.tv_sec);
READ32(iattr->ia_mtime.tv_nsec);
if (iattr->ia_mtime.tv_nsec >= (u32)1000000000)
return nfserr_inval;
iattr->ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
break;
case NFS4_SET_TO_SERVER_TIME:
iattr->ia_valid |= ATTR_MTIME;
break;
default:
goto xdr_error;
}
}
if (bmval[0] & ~NFSD_WRITEABLE_ATTRS_WORD0
|| bmval[1] & ~NFSD_WRITEABLE_ATTRS_WORD1
|| bmval[2] & ~NFSD_WRITEABLE_ATTRS_WORD2)
READ_BUF(expected_len - len);
else if (len != expected_len)
goto xdr_error;
DECODE_TAIL;
out_nfserr:
status = nfserrno(host_err);
goto out;
}
static __be32
nfsd4_decode_stateid(struct nfsd4_compoundargs *argp, stateid_t *sid)
{
DECODE_HEAD;
READ_BUF(sizeof(stateid_t));
READ32(sid->si_generation);
COPYMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
DECODE_TAIL;
}
static __be32
nfsd4_decode_access(struct nfsd4_compoundargs *argp, struct nfsd4_access *access)
{
DECODE_HEAD;
READ_BUF(4);
READ32(access->ac_req_access);
DECODE_TAIL;
}
static __be32 nfsd4_decode_cb_sec(struct nfsd4_compoundargs *argp, struct nfsd4_cb_sec *cbs)
{
DECODE_HEAD;
u32 dummy, uid, gid;
char *machine_name;
int i;
int nr_secflavs;
/* callback_sec_params4 */
READ_BUF(4);
READ32(nr_secflavs);
cbs->flavor = (u32)(-1);
for (i = 0; i < nr_secflavs; ++i) {
READ_BUF(4);
READ32(dummy);
switch (dummy) {
case RPC_AUTH_NULL:
/* Nothing to read */
if (cbs->flavor == (u32)(-1))
cbs->flavor = RPC_AUTH_NULL;
break;
case RPC_AUTH_UNIX:
READ_BUF(8);
/* stamp */
READ32(dummy);
/* machine name */
READ32(dummy);
READ_BUF(dummy);
SAVEMEM(machine_name, dummy);
/* uid, gid */
READ_BUF(8);
READ32(uid);
READ32(gid);
/* more gids */
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy * 4);
if (cbs->flavor == (u32)(-1)) {
kuid_t kuid = make_kuid(&init_user_ns, uid);
kgid_t kgid = make_kgid(&init_user_ns, gid);
if (uid_valid(kuid) && gid_valid(kgid)) {
cbs->uid = kuid;
cbs->gid = kgid;
cbs->flavor = RPC_AUTH_UNIX;
} else {
dprintk("RPC_AUTH_UNIX with invalid"
"uid or gid ignoring!\n");
}
}
break;
case RPC_AUTH_GSS:
dprintk("RPC_AUTH_GSS callback secflavor "
"not supported!\n");
READ_BUF(8);
/* gcbp_service */
READ32(dummy);
/* gcbp_handle_from_server */
READ32(dummy);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
/* gcbp_handle_from_client */
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy);
break;
default:
dprintk("Illegal callback secflavor\n");
return nfserr_inval;
}
}
DECODE_TAIL;
}
static __be32 nfsd4_decode_backchannel_ctl(struct nfsd4_compoundargs *argp, struct nfsd4_backchannel_ctl *bc)
{
DECODE_HEAD;
READ_BUF(4);
READ32(bc->bc_cb_program);
nfsd4_decode_cb_sec(argp, &bc->bc_cb_sec);
DECODE_TAIL;
}
static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, struct nfsd4_bind_conn_to_session *bcts)
{
DECODE_HEAD;
READ_BUF(NFS4_MAX_SESSIONID_LEN + 8);
COPYMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN);
READ32(bcts->dir);
/* XXX: skipping ctsa_use_conn_in_rdma_mode. Perhaps Tom Tucker
* could help us figure out we should be using it. */
DECODE_TAIL;
}
static __be32
nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
{
DECODE_HEAD;
READ_BUF(4);
READ32(close->cl_seqid);
return nfsd4_decode_stateid(argp, &close->cl_stateid);
DECODE_TAIL;
}
static __be32
nfsd4_decode_commit(struct nfsd4_compoundargs *argp, struct nfsd4_commit *commit)
{
DECODE_HEAD;
READ_BUF(12);
READ64(commit->co_offset);
READ32(commit->co_count);
DECODE_TAIL;
}
static __be32
nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create)
{
DECODE_HEAD;
READ_BUF(4);
READ32(create->cr_type);
switch (create->cr_type) {
case NF4LNK:
READ_BUF(4);
READ32(create->cr_linklen);
READ_BUF(create->cr_linklen);
/*
* The VFS will want a null-terminated string, and
* null-terminating in place isn't safe since this might
* end on a page boundary:
*/
create->cr_linkname =
kmalloc(create->cr_linklen + 1, GFP_KERNEL);
if (!create->cr_linkname)
return nfserr_jukebox;
memcpy(create->cr_linkname, p, create->cr_linklen);
create->cr_linkname[create->cr_linklen] = '\0';
defer_free(argp, kfree, create->cr_linkname);
break;
case NF4BLK:
case NF4CHR:
READ_BUF(8);
READ32(create->cr_specdata1);
READ32(create->cr_specdata2);
break;
case NF4SOCK:
case NF4FIFO:
case NF4DIR:
default:
break;
}
READ_BUF(4);
READ32(create->cr_namelen);
READ_BUF(create->cr_namelen);
SAVEMEM(create->cr_name, create->cr_namelen);
if ((status = check_filename(create->cr_name, create->cr_namelen)))
return status;
status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr,
&create->cr_acl);
if (status)
goto out;
DECODE_TAIL;
}
static inline __be32
nfsd4_decode_delegreturn(struct nfsd4_compoundargs *argp, struct nfsd4_delegreturn *dr)
{
return nfsd4_decode_stateid(argp, &dr->dr_stateid);
}
static inline __be32
nfsd4_decode_getattr(struct nfsd4_compoundargs *argp, struct nfsd4_getattr *getattr)
{
return nfsd4_decode_bitmap(argp, getattr->ga_bmval);
}
static __be32
nfsd4_decode_link(struct nfsd4_compoundargs *argp, struct nfsd4_link *link)
{
DECODE_HEAD;
READ_BUF(4);
READ32(link->li_namelen);
READ_BUF(link->li_namelen);
SAVEMEM(link->li_name, link->li_namelen);
if ((status = check_filename(link->li_name, link->li_namelen)))
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
{
DECODE_HEAD;
/*
* type, reclaim(boolean), offset, length, new_lock_owner(boolean)
*/
READ_BUF(28);
READ32(lock->lk_type);
if ((lock->lk_type < NFS4_READ_LT) || (lock->lk_type > NFS4_WRITEW_LT))
goto xdr_error;
READ32(lock->lk_reclaim);
READ64(lock->lk_offset);
READ64(lock->lk_length);
READ32(lock->lk_is_new);
if (lock->lk_is_new) {
READ_BUF(4);
READ32(lock->lk_new_open_seqid);
status = nfsd4_decode_stateid(argp, &lock->lk_new_open_stateid);
if (status)
return status;
READ_BUF(8 + sizeof(clientid_t));
READ32(lock->lk_new_lock_seqid);
COPYMEM(&lock->lk_new_clientid, sizeof(clientid_t));
READ32(lock->lk_new_owner.len);
READ_BUF(lock->lk_new_owner.len);
READMEM(lock->lk_new_owner.data, lock->lk_new_owner.len);
} else {
status = nfsd4_decode_stateid(argp, &lock->lk_old_lock_stateid);
if (status)
return status;
READ_BUF(4);
READ32(lock->lk_old_lock_seqid);
}
DECODE_TAIL;
}
static __be32
nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, struct nfsd4_lockt *lockt)
{
DECODE_HEAD;
READ_BUF(32);
READ32(lockt->lt_type);
if((lockt->lt_type < NFS4_READ_LT) || (lockt->lt_type > NFS4_WRITEW_LT))
goto xdr_error;
READ64(lockt->lt_offset);
READ64(lockt->lt_length);
COPYMEM(&lockt->lt_clientid, 8);
READ32(lockt->lt_owner.len);
READ_BUF(lockt->lt_owner.len);
READMEM(lockt->lt_owner.data, lockt->lt_owner.len);
DECODE_TAIL;
}
static __be32
nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku)
{
DECODE_HEAD;
READ_BUF(8);
READ32(locku->lu_type);
if ((locku->lu_type < NFS4_READ_LT) || (locku->lu_type > NFS4_WRITEW_LT))
goto xdr_error;
READ32(locku->lu_seqid);
status = nfsd4_decode_stateid(argp, &locku->lu_stateid);
if (status)
return status;
READ_BUF(16);
READ64(locku->lu_offset);
READ64(locku->lu_length);
DECODE_TAIL;
}
static __be32
nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup)
{
DECODE_HEAD;
READ_BUF(4);
READ32(lookup->lo_len);
READ_BUF(lookup->lo_len);
SAVEMEM(lookup->lo_name, lookup->lo_len);
if ((status = check_filename(lookup->lo_name, lookup->lo_len)))
return status;
DECODE_TAIL;
}
static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *share_access, u32 *deleg_want, u32 *deleg_when)
{
__be32 *p;
u32 w;
READ_BUF(4);
READ32(w);
*share_access = w & NFS4_SHARE_ACCESS_MASK;
*deleg_want = w & NFS4_SHARE_WANT_MASK;
if (deleg_when)
*deleg_when = w & NFS4_SHARE_WHEN_MASK;
switch (w & NFS4_SHARE_ACCESS_MASK) {
case NFS4_SHARE_ACCESS_READ:
case NFS4_SHARE_ACCESS_WRITE:
case NFS4_SHARE_ACCESS_BOTH:
break;
default:
return nfserr_bad_xdr;
}
w &= ~NFS4_SHARE_ACCESS_MASK;
if (!w)
return nfs_ok;
if (!argp->minorversion)
return nfserr_bad_xdr;
switch (w & NFS4_SHARE_WANT_MASK) {
case NFS4_SHARE_WANT_NO_PREFERENCE:
case NFS4_SHARE_WANT_READ_DELEG:
case NFS4_SHARE_WANT_WRITE_DELEG:
case NFS4_SHARE_WANT_ANY_DELEG:
case NFS4_SHARE_WANT_NO_DELEG:
case NFS4_SHARE_WANT_CANCEL:
break;
default:
return nfserr_bad_xdr;
}
w &= ~NFS4_SHARE_WANT_MASK;
if (!w)
return nfs_ok;
if (!deleg_when) /* open_downgrade */
return nfserr_inval;
switch (w) {
case NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL:
case NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED:
case (NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL |
NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED):
return nfs_ok;
}
xdr_error:
return nfserr_bad_xdr;
}
static __be32 nfsd4_decode_share_deny(struct nfsd4_compoundargs *argp, u32 *x)
{
__be32 *p;
READ_BUF(4);
READ32(*x);
/* Note: unlinke access bits, deny bits may be zero. */
if (*x & ~NFS4_SHARE_DENY_BOTH)
return nfserr_bad_xdr;
return nfs_ok;
xdr_error:
return nfserr_bad_xdr;
}
static __be32 nfsd4_decode_opaque(struct nfsd4_compoundargs *argp, struct xdr_netobj *o)
{
__be32 *p;
READ_BUF(4);
READ32(o->len);
if (o->len == 0 || o->len > NFS4_OPAQUE_LIMIT)
return nfserr_bad_xdr;
READ_BUF(o->len);
SAVEMEM(o->data, o->len);
return nfs_ok;
xdr_error:
return nfserr_bad_xdr;
}
static __be32
nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
{
DECODE_HEAD;
u32 dummy;
memset(open->op_bmval, 0, sizeof(open->op_bmval));
open->op_iattr.ia_valid = 0;
open->op_openowner = NULL;
open->op_xdr_error = 0;
/* seqid, share_access, share_deny, clientid, ownerlen */
READ_BUF(4);
READ32(open->op_seqid);
/* decode, yet ignore deleg_when until supported */
status = nfsd4_decode_share_access(argp, &open->op_share_access,
&open->op_deleg_want, &dummy);
if (status)
goto xdr_error;
status = nfsd4_decode_share_deny(argp, &open->op_share_deny);
if (status)
goto xdr_error;
READ_BUF(sizeof(clientid_t));
COPYMEM(&open->op_clientid, sizeof(clientid_t));
status = nfsd4_decode_opaque(argp, &open->op_owner);
if (status)
goto xdr_error;
READ_BUF(4);
READ32(open->op_create);
switch (open->op_create) {
case NFS4_OPEN_NOCREATE:
break;
case NFS4_OPEN_CREATE:
READ_BUF(4);
READ32(open->op_createmode);
switch (open->op_createmode) {
case NFS4_CREATE_UNCHECKED:
case NFS4_CREATE_GUARDED:
status = nfsd4_decode_fattr(argp, open->op_bmval,
&open->op_iattr, &open->op_acl);
if (status)
goto out;
break;
case NFS4_CREATE_EXCLUSIVE:
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
break;
case NFS4_CREATE_EXCLUSIVE4_1:
if (argp->minorversion < 1)
goto xdr_error;
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_fattr(argp, open->op_bmval,
&open->op_iattr, &open->op_acl);
if (status)
goto out;
break;
default:
goto xdr_error;
}
break;
default:
goto xdr_error;
}
/* open_claim */
READ_BUF(4);
READ32(open->op_claim_type);
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_NULL:
case NFS4_OPEN_CLAIM_DELEGATE_PREV:
READ_BUF(4);
READ32(open->op_fname.len);
READ_BUF(open->op_fname.len);
SAVEMEM(open->op_fname.data, open->op_fname.len);
if ((status = check_filename(open->op_fname.data, open->op_fname.len)))
return status;
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
READ_BUF(4);
READ32(open->op_delegate_type);
break;
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
status = nfsd4_decode_stateid(argp, &open->op_delegate_stateid);
if (status)
return status;
READ_BUF(4);
READ32(open->op_fname.len);
READ_BUF(open->op_fname.len);
SAVEMEM(open->op_fname.data, open->op_fname.len);
if ((status = check_filename(open->op_fname.data, open->op_fname.len)))
return status;
break;
case NFS4_OPEN_CLAIM_FH:
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
if (argp->minorversion < 1)
goto xdr_error;
/* void */
break;
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
if (argp->minorversion < 1)
goto xdr_error;
status = nfsd4_decode_stateid(argp, &open->op_delegate_stateid);
if (status)
return status;
break;
default:
goto xdr_error;
}
DECODE_TAIL;
}
static __be32
nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_confirm *open_conf)
{
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &open_conf->oc_req_stateid);
if (status)
return status;
READ_BUF(4);
READ32(open_conf->oc_seqid);
DECODE_TAIL;
}
static __be32
nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_downgrade *open_down)
{
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &open_down->od_stateid);
if (status)
return status;
READ_BUF(4);
READ32(open_down->od_seqid);
status = nfsd4_decode_share_access(argp, &open_down->od_share_access,
&open_down->od_deleg_want, NULL);
if (status)
return status;
status = nfsd4_decode_share_deny(argp, &open_down->od_share_deny);
if (status)
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
{
DECODE_HEAD;
READ_BUF(4);
READ32(putfh->pf_fhlen);
if (putfh->pf_fhlen > NFS4_FHSIZE)
goto xdr_error;
READ_BUF(putfh->pf_fhlen);
SAVEMEM(putfh->pf_fhval, putfh->pf_fhlen);
DECODE_TAIL;
}
static __be32
nfsd4_decode_read(struct nfsd4_compoundargs *argp, struct nfsd4_read *read)
{
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &read->rd_stateid);
if (status)
return status;
READ_BUF(12);
READ64(read->rd_offset);
READ32(read->rd_length);
DECODE_TAIL;
}
static __be32
nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, struct nfsd4_readdir *readdir)
{
DECODE_HEAD;
READ_BUF(24);
READ64(readdir->rd_cookie);
COPYMEM(readdir->rd_verf.data, sizeof(readdir->rd_verf.data));
READ32(readdir->rd_dircount); /* just in case you needed a useless field... */
READ32(readdir->rd_maxcount);
if ((status = nfsd4_decode_bitmap(argp, readdir->rd_bmval)))
goto out;
DECODE_TAIL;
}
static __be32
nfsd4_decode_remove(struct nfsd4_compoundargs *argp, struct nfsd4_remove *remove)
{
DECODE_HEAD;
READ_BUF(4);
READ32(remove->rm_namelen);
READ_BUF(remove->rm_namelen);
SAVEMEM(remove->rm_name, remove->rm_namelen);
if ((status = check_filename(remove->rm_name, remove->rm_namelen)))
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename)
{
DECODE_HEAD;
READ_BUF(4);
READ32(rename->rn_snamelen);
READ_BUF(rename->rn_snamelen + 4);
SAVEMEM(rename->rn_sname, rename->rn_snamelen);
READ32(rename->rn_tnamelen);
READ_BUF(rename->rn_tnamelen);
SAVEMEM(rename->rn_tname, rename->rn_tnamelen);
if ((status = check_filename(rename->rn_sname, rename->rn_snamelen)))
return status;
if ((status = check_filename(rename->rn_tname, rename->rn_tnamelen)))
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_renew(struct nfsd4_compoundargs *argp, clientid_t *clientid)
{
DECODE_HEAD;
READ_BUF(sizeof(clientid_t));
COPYMEM(clientid, sizeof(clientid_t));
DECODE_TAIL;
}
static __be32
nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp,
struct nfsd4_secinfo *secinfo)
{
DECODE_HEAD;
READ_BUF(4);
READ32(secinfo->si_namelen);
READ_BUF(secinfo->si_namelen);
SAVEMEM(secinfo->si_name, secinfo->si_namelen);
status = check_filename(secinfo->si_name, secinfo->si_namelen);
if (status)
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp,
struct nfsd4_secinfo_no_name *sin)
{
DECODE_HEAD;
READ_BUF(4);
READ32(sin->sin_style);
DECODE_TAIL;
}
static __be32
nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr)
{
__be32 status;
status = nfsd4_decode_stateid(argp, &setattr->sa_stateid);
if (status)
return status;
return nfsd4_decode_fattr(argp, setattr->sa_bmval, &setattr->sa_iattr,
&setattr->sa_acl);
}
static __be32
nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid *setclientid)
{
DECODE_HEAD;
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(setclientid->se_verf.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_opaque(argp, &setclientid->se_name);
if (status)
return nfserr_bad_xdr;
READ_BUF(8);
READ32(setclientid->se_callback_prog);
READ32(setclientid->se_callback_netid_len);
READ_BUF(setclientid->se_callback_netid_len + 4);
SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len);
READ32(setclientid->se_callback_addr_len);
READ_BUF(setclientid->se_callback_addr_len + 4);
SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len);
READ32(setclientid->se_callback_ident);
DECODE_TAIL;
}
static __be32
nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid_confirm *scd_c)
{
DECODE_HEAD;
READ_BUF(8 + NFS4_VERIFIER_SIZE);
COPYMEM(&scd_c->sc_clientid, 8);
COPYMEM(&scd_c->sc_confirm, NFS4_VERIFIER_SIZE);
DECODE_TAIL;
}
/* Also used for NVERIFY */
static __be32
nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify)
{
DECODE_HEAD;
if ((status = nfsd4_decode_bitmap(argp, verify->ve_bmval)))
goto out;
/* For convenience's sake, we compare raw xdr'd attributes in
* nfsd4_proc_verify */
READ_BUF(4);
READ32(verify->ve_attrlen);
READ_BUF(verify->ve_attrlen);
SAVEMEM(verify->ve_attrval, verify->ve_attrlen);
DECODE_TAIL;
}
static __be32
nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
{
int avail;
int len;
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &write->wr_stateid);
if (status)
return status;
READ_BUF(16);
READ64(write->wr_offset);
READ32(write->wr_stable_how);
if (write->wr_stable_how > 2)
goto xdr_error;
READ32(write->wr_buflen);
/* Sorry .. no magic macros for this.. *
* READ_BUF(write->wr_buflen);
* SAVEMEM(write->wr_buf, write->wr_buflen);
*/
avail = (char*)argp->end - (char*)argp->p;
if (avail + argp->pagelen < write->wr_buflen) {
dprintk("NFSD: xdr error (%s:%d)\n",
__FILE__, __LINE__);
goto xdr_error;
}
write->wr_head.iov_base = p;
write->wr_head.iov_len = avail;
WARN_ON(avail != (XDR_QUADLEN(avail) << 2));
write->wr_pagelist = argp->pagelist;
len = XDR_QUADLEN(write->wr_buflen) << 2;
if (len >= avail) {
int pages;
len -= avail;
pages = len >> PAGE_SHIFT;
argp->pagelist += pages;
argp->pagelen -= pages * PAGE_SIZE;
len -= pages * PAGE_SIZE;
argp->p = (__be32 *)page_address(argp->pagelist[0]);
argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
}
argp->p += XDR_QUADLEN(len);
DECODE_TAIL;
}
static __be32
nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp, struct nfsd4_release_lockowner *rlockowner)
{
DECODE_HEAD;
READ_BUF(12);
COPYMEM(&rlockowner->rl_clientid, sizeof(clientid_t));
READ32(rlockowner->rl_owner.len);
READ_BUF(rlockowner->rl_owner.len);
READMEM(rlockowner->rl_owner.data, rlockowner->rl_owner.len);
if (argp->minorversion && !zero_clientid(&rlockowner->rl_clientid))
return nfserr_inval;
DECODE_TAIL;
}
static __be32
nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
struct nfsd4_exchange_id *exid)
{
int dummy, tmp;
DECODE_HEAD;
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(exid->verifier.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_opaque(argp, &exid->clname);
if (status)
return nfserr_bad_xdr;
READ_BUF(4);
READ32(exid->flags);
/* Ignore state_protect4_a */
READ_BUF(4);
READ32(exid->spa_how);
switch (exid->spa_how) {
case SP4_NONE:
break;
case SP4_MACH_CRED:
/* spo_must_enforce */
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy * 4);
p += dummy;
/* spo_must_allow */
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy * 4);
p += dummy;
break;
case SP4_SSV:
/* ssp_ops */
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy * 4);
p += dummy;
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy * 4);
p += dummy;
/* ssp_hash_algs<> */
READ_BUF(4);
READ32(tmp);
while (tmp--) {
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
}
/* ssp_encr_algs<> */
READ_BUF(4);
READ32(tmp);
while (tmp--) {
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
}
/* ssp_window and ssp_num_gss_handles */
READ_BUF(8);
READ32(dummy);
READ32(dummy);
break;
default:
goto xdr_error;
}
/* Ignore Implementation ID */
READ_BUF(4); /* nfs_impl_id4 array length */
READ32(dummy);
if (dummy > 1)
goto xdr_error;
if (dummy == 1) {
/* nii_domain */
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
/* nii_name */
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
/* nii_date */
READ_BUF(12);
p += 3;
}
DECODE_TAIL;
}
static __be32
nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
struct nfsd4_create_session *sess)
{
DECODE_HEAD;
u32 dummy;
READ_BUF(16);
COPYMEM(&sess->clientid, 8);
READ32(sess->seqid);
READ32(sess->flags);
/* Fore channel attrs */
READ_BUF(28);
READ32(dummy); /* headerpadsz is always 0 */
READ32(sess->fore_channel.maxreq_sz);
READ32(sess->fore_channel.maxresp_sz);
READ32(sess->fore_channel.maxresp_cached);
READ32(sess->fore_channel.maxops);
READ32(sess->fore_channel.maxreqs);
READ32(sess->fore_channel.nr_rdma_attrs);
if (sess->fore_channel.nr_rdma_attrs == 1) {
READ_BUF(4);
READ32(sess->fore_channel.rdma_attrs);
} else if (sess->fore_channel.nr_rdma_attrs > 1) {
dprintk("Too many fore channel attr bitmaps!\n");
goto xdr_error;
}
/* Back channel attrs */
READ_BUF(28);
READ32(dummy); /* headerpadsz is always 0 */
READ32(sess->back_channel.maxreq_sz);
READ32(sess->back_channel.maxresp_sz);
READ32(sess->back_channel.maxresp_cached);
READ32(sess->back_channel.maxops);
READ32(sess->back_channel.maxreqs);
READ32(sess->back_channel.nr_rdma_attrs);
if (sess->back_channel.nr_rdma_attrs == 1) {
READ_BUF(4);
READ32(sess->back_channel.rdma_attrs);
} else if (sess->back_channel.nr_rdma_attrs > 1) {
dprintk("Too many back channel attr bitmaps!\n");
goto xdr_error;
}
READ_BUF(4);
READ32(sess->callback_prog);
nfsd4_decode_cb_sec(argp, &sess->cb_sec);
DECODE_TAIL;
}
static __be32
nfsd4_decode_destroy_session(struct nfsd4_compoundargs *argp,
struct nfsd4_destroy_session *destroy_session)
{
DECODE_HEAD;
READ_BUF(NFS4_MAX_SESSIONID_LEN);
COPYMEM(destroy_session->sessionid.data, NFS4_MAX_SESSIONID_LEN);
DECODE_TAIL;
}
static __be32
nfsd4_decode_free_stateid(struct nfsd4_compoundargs *argp,
struct nfsd4_free_stateid *free_stateid)
{
DECODE_HEAD;
READ_BUF(sizeof(stateid_t));
READ32(free_stateid->fr_stateid.si_generation);
COPYMEM(&free_stateid->fr_stateid.si_opaque, sizeof(stateid_opaque_t));
DECODE_TAIL;
}
static __be32
nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
struct nfsd4_sequence *seq)
{
DECODE_HEAD;
READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
COPYMEM(seq->sessionid.data, NFS4_MAX_SESSIONID_LEN);
READ32(seq->seqid);
READ32(seq->slotid);
READ32(seq->maxslots);
READ32(seq->cachethis);
DECODE_TAIL;
}
static __be32
nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_stateid *test_stateid)
{
int i;
__be32 *p, status;
struct nfsd4_test_stateid_id *stateid;
READ_BUF(4);
test_stateid->ts_num_ids = ntohl(*p++);
INIT_LIST_HEAD(&test_stateid->ts_stateid_list);
for (i = 0; i < test_stateid->ts_num_ids; i++) {
stateid = kmalloc(sizeof(struct nfsd4_test_stateid_id), GFP_KERNEL);
if (!stateid) {
status = nfserrno(-ENOMEM);
goto out;
}
defer_free(argp, kfree, stateid);
INIT_LIST_HEAD(&stateid->ts_id_list);
list_add_tail(&stateid->ts_id_list, &test_stateid->ts_stateid_list);
status = nfsd4_decode_stateid(argp, &stateid->ts_id_stateid);
if (status)
goto out;
}
status = 0;
out:
return status;
xdr_error:
dprintk("NFSD: xdr error (%s:%d)\n", __FILE__, __LINE__);
status = nfserr_bad_xdr;
goto out;
}
static __be32 nfsd4_decode_destroy_clientid(struct nfsd4_compoundargs *argp, struct nfsd4_destroy_clientid *dc)
{
DECODE_HEAD;
READ_BUF(8);
COPYMEM(&dc->clientid, 8);
DECODE_TAIL;
}
static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, struct nfsd4_reclaim_complete *rc)
{
DECODE_HEAD;
READ_BUF(4);
READ32(rc->rca_one_fs);
DECODE_TAIL;
}
static __be32
nfsd4_decode_noop(struct nfsd4_compoundargs *argp, void *p)
{
return nfs_ok;
}
static __be32
nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
{
return nfserr_notsupp;
}
typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
static nfsd4_dec nfsd4_dec_ops[] = {
[OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
[OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
[OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
[OP_CREATE] = (nfsd4_dec)nfsd4_decode_create,
[OP_DELEGPURGE] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_DELEGRETURN] = (nfsd4_dec)nfsd4_decode_delegreturn,
[OP_GETATTR] = (nfsd4_dec)nfsd4_decode_getattr,
[OP_GETFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_LINK] = (nfsd4_dec)nfsd4_decode_link,
[OP_LOCK] = (nfsd4_dec)nfsd4_decode_lock,
[OP_LOCKT] = (nfsd4_dec)nfsd4_decode_lockt,
[OP_LOCKU] = (nfsd4_dec)nfsd4_decode_locku,
[OP_LOOKUP] = (nfsd4_dec)nfsd4_decode_lookup,
[OP_LOOKUPP] = (nfsd4_dec)nfsd4_decode_noop,
[OP_NVERIFY] = (nfsd4_dec)nfsd4_decode_verify,
[OP_OPEN] = (nfsd4_dec)nfsd4_decode_open,
[OP_OPENATTR] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_OPEN_CONFIRM] = (nfsd4_dec)nfsd4_decode_open_confirm,
[OP_OPEN_DOWNGRADE] = (nfsd4_dec)nfsd4_decode_open_downgrade,
[OP_PUTFH] = (nfsd4_dec)nfsd4_decode_putfh,
[OP_PUTPUBFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_PUTROOTFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_READ] = (nfsd4_dec)nfsd4_decode_read,
[OP_READDIR] = (nfsd4_dec)nfsd4_decode_readdir,
[OP_READLINK] = (nfsd4_dec)nfsd4_decode_noop,
[OP_REMOVE] = (nfsd4_dec)nfsd4_decode_remove,
[OP_RENAME] = (nfsd4_dec)nfsd4_decode_rename,
[OP_RENEW] = (nfsd4_dec)nfsd4_decode_renew,
[OP_RESTOREFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_SAVEFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_SECINFO] = (nfsd4_dec)nfsd4_decode_secinfo,
[OP_SETATTR] = (nfsd4_dec)nfsd4_decode_setattr,
[OP_SETCLIENTID] = (nfsd4_dec)nfsd4_decode_setclientid,
[OP_SETCLIENTID_CONFIRM] = (nfsd4_dec)nfsd4_decode_setclientid_confirm,
[OP_VERIFY] = (nfsd4_dec)nfsd4_decode_verify,
[OP_WRITE] = (nfsd4_dec)nfsd4_decode_write,
[OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
};
static nfsd4_dec nfsd41_dec_ops[] = {
[OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
[OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
[OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
[OP_CREATE] = (nfsd4_dec)nfsd4_decode_create,
[OP_DELEGPURGE] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_DELEGRETURN] = (nfsd4_dec)nfsd4_decode_delegreturn,
[OP_GETATTR] = (nfsd4_dec)nfsd4_decode_getattr,
[OP_GETFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_LINK] = (nfsd4_dec)nfsd4_decode_link,
[OP_LOCK] = (nfsd4_dec)nfsd4_decode_lock,
[OP_LOCKT] = (nfsd4_dec)nfsd4_decode_lockt,
[OP_LOCKU] = (nfsd4_dec)nfsd4_decode_locku,
[OP_LOOKUP] = (nfsd4_dec)nfsd4_decode_lookup,
[OP_LOOKUPP] = (nfsd4_dec)nfsd4_decode_noop,
[OP_NVERIFY] = (nfsd4_dec)nfsd4_decode_verify,
[OP_OPEN] = (nfsd4_dec)nfsd4_decode_open,
[OP_OPENATTR] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_OPEN_CONFIRM] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_OPEN_DOWNGRADE] = (nfsd4_dec)nfsd4_decode_open_downgrade,
[OP_PUTFH] = (nfsd4_dec)nfsd4_decode_putfh,
[OP_PUTPUBFH] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_PUTROOTFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_READ] = (nfsd4_dec)nfsd4_decode_read,
[OP_READDIR] = (nfsd4_dec)nfsd4_decode_readdir,
[OP_READLINK] = (nfsd4_dec)nfsd4_decode_noop,
[OP_REMOVE] = (nfsd4_dec)nfsd4_decode_remove,
[OP_RENAME] = (nfsd4_dec)nfsd4_decode_rename,
[OP_RENEW] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_RESTOREFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_SAVEFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_SECINFO] = (nfsd4_dec)nfsd4_decode_secinfo,
[OP_SETATTR] = (nfsd4_dec)nfsd4_decode_setattr,
[OP_SETCLIENTID] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_SETCLIENTID_CONFIRM]= (nfsd4_dec)nfsd4_decode_notsupp,
[OP_VERIFY] = (nfsd4_dec)nfsd4_decode_verify,
[OP_WRITE] = (nfsd4_dec)nfsd4_decode_write,
[OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_notsupp,
/* new operations for NFSv4.1 */
[OP_BACKCHANNEL_CTL] = (nfsd4_dec)nfsd4_decode_backchannel_ctl,
[OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_bind_conn_to_session,
[OP_EXCHANGE_ID] = (nfsd4_dec)nfsd4_decode_exchange_id,
[OP_CREATE_SESSION] = (nfsd4_dec)nfsd4_decode_create_session,
[OP_DESTROY_SESSION] = (nfsd4_dec)nfsd4_decode_destroy_session,
[OP_FREE_STATEID] = (nfsd4_dec)nfsd4_decode_free_stateid,
[OP_GET_DIR_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_secinfo_no_name,
[OP_SEQUENCE] = (nfsd4_dec)nfsd4_decode_sequence,
[OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_test_stateid,
[OP_WANT_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_DESTROY_CLIENTID] = (nfsd4_dec)nfsd4_decode_destroy_clientid,
[OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_reclaim_complete,
};
struct nfsd4_minorversion_ops {
nfsd4_dec *decoders;
int nops;
};
static struct nfsd4_minorversion_ops nfsd4_minorversion[] = {
[0] = { nfsd4_dec_ops, ARRAY_SIZE(nfsd4_dec_ops) },
[1] = { nfsd41_dec_ops, ARRAY_SIZE(nfsd41_dec_ops) },
};
static __be32
nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
{
DECODE_HEAD;
struct nfsd4_op *op;
struct nfsd4_minorversion_ops *ops;
bool cachethis = false;
int i;
READ_BUF(4);
READ32(argp->taglen);
READ_BUF(argp->taglen + 8);
SAVEMEM(argp->tag, argp->taglen);
READ32(argp->minorversion);
READ32(argp->opcnt);
if (argp->taglen > NFSD4_MAX_TAGLEN)
goto xdr_error;
if (argp->opcnt > 100)
goto xdr_error;
if (argp->opcnt > ARRAY_SIZE(argp->iops)) {
argp->ops = kmalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL);
if (!argp->ops) {
argp->ops = argp->iops;
dprintk("nfsd: couldn't allocate room for COMPOUND\n");
goto xdr_error;
}
}
if (argp->minorversion >= ARRAY_SIZE(nfsd4_minorversion))
argp->opcnt = 0;
ops = &nfsd4_minorversion[argp->minorversion];
for (i = 0; i < argp->opcnt; i++) {
op = &argp->ops[i];
op->replay = NULL;
READ_BUF(4);
READ32(op->opnum);
if (op->opnum >= FIRST_NFS4_OP && op->opnum <= LAST_NFS4_OP)
op->status = ops->decoders[op->opnum](argp, &op->u);
else {
op->opnum = OP_ILLEGAL;
op->status = nfserr_op_illegal;
}
if (op->status) {
argp->opcnt = i+1;
break;
}
/*
* We'll try to cache the result in the DRC if any one
* op in the compound wants to be cached:
*/
cachethis |= nfsd4_cache_this_op(op);
}
/* Sessions make the DRC unnecessary: */
if (argp->minorversion)
cachethis = false;
argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
DECODE_TAIL;
}
#define WRITE32(n) *p++ = htonl(n)
#define WRITE64(n) do { \
*p++ = htonl((u32)((n) >> 32)); \
*p++ = htonl((u32)(n)); \
} while (0)
#define WRITEMEM(ptr,nbytes) do { if (nbytes > 0) { \
*(p + XDR_QUADLEN(nbytes) -1) = 0; \
memcpy(p, ptr, nbytes); \
p += XDR_QUADLEN(nbytes); \
}} while (0)
static void write32(__be32 **p, u32 n)
{
*(*p)++ = htonl(n);
}
static void write64(__be32 **p, u64 n)
{
write32(p, (n >> 32));
write32(p, (u32)n);
}
static void write_change(__be32 **p, struct kstat *stat, struct inode *inode)
{
if (IS_I_VERSION(inode)) {
write64(p, inode->i_version);
} else {
write32(p, stat->ctime.tv_sec);
write32(p, stat->ctime.tv_nsec);
}
}
static void write_cinfo(__be32 **p, struct nfsd4_change_info *c)
{
write32(p, c->atomic);
if (c->change_supported) {
write64(p, c->before_change);
write64(p, c->after_change);
} else {
write32(p, c->before_ctime_sec);
write32(p, c->before_ctime_nsec);
write32(p, c->after_ctime_sec);
write32(p, c->after_ctime_nsec);
}
}
#define RESERVE_SPACE(nbytes) do { \
p = resp->p; \
BUG_ON(p + XDR_QUADLEN(nbytes) > resp->end); \
} while (0)
#define ADJUST_ARGS() resp->p = p
/* Encode as an array of strings the string given with components
* separated @sep, escaped with esc_enter and esc_exit.
*/
static __be32 nfsd4_encode_components_esc(char sep, char *components,
__be32 **pp, int *buflen,
char esc_enter, char esc_exit)
{
__be32 *p = *pp;
__be32 *countp = p;
int strlen, count=0;
char *str, *end, *next;
dprintk("nfsd4_encode_components(%s)\n", components);
if ((*buflen -= 4) < 0)
return nfserr_resource;
WRITE32(0); /* We will fill this in with @count later */
end = str = components;
while (*end) {
bool found_esc = false;
/* try to parse as esc_start, ..., esc_end, sep */
if (*str == esc_enter) {
for (; *end && (*end != esc_exit); end++)
/* find esc_exit or end of string */;
next = end + 1;
if (*end && (!*next || *next == sep)) {
str++;
found_esc = true;
}
}
if (!found_esc)
for (; *end && (*end != sep); end++)
/* find sep or end of string */;
strlen = end - str;
if (strlen) {
if ((*buflen -= ((XDR_QUADLEN(strlen) << 2) + 4)) < 0)
return nfserr_resource;
WRITE32(strlen);
WRITEMEM(str, strlen);
count++;
}
else
end++;
str = end;
}
*pp = p;
p = countp;
WRITE32(count);
return 0;
}
/* Encode as an array of strings the string given with components
* separated @sep.
*/
static __be32 nfsd4_encode_components(char sep, char *components,
__be32 **pp, int *buflen)
{
return nfsd4_encode_components_esc(sep, components, pp, buflen, 0, 0);
}
/*
* encode a location element of a fs_locations structure
*/
static __be32 nfsd4_encode_fs_location4(struct nfsd4_fs_location *location,
__be32 **pp, int *buflen)
{
__be32 status;
__be32 *p = *pp;
status = nfsd4_encode_components_esc(':', location->hosts, &p, buflen,
'[', ']');
if (status)
return status;
status = nfsd4_encode_components('/', location->path, &p, buflen);
if (status)
return status;
*pp = p;
return 0;
}
/*
* Encode a path in RFC3530 'pathname4' format
*/
static __be32 nfsd4_encode_path(const struct path *root,
const struct path *path, __be32 **pp, int *buflen)
{
struct path cur = {
.mnt = path->mnt,
.dentry = path->dentry,
};
__be32 *p = *pp;
struct dentry **components = NULL;
unsigned int ncomponents = 0;
__be32 err = nfserr_jukebox;
dprintk("nfsd4_encode_components(");
path_get(&cur);
/* First walk the path up to the nfsd root, and store the
* dentries/path components in an array.
*/
for (;;) {
if (cur.dentry == root->dentry && cur.mnt == root->mnt)
break;
if (cur.dentry == cur.mnt->mnt_root) {
if (follow_up(&cur))
continue;
goto out_free;
}
if ((ncomponents & 15) == 0) {
struct dentry **new;
new = krealloc(components,
sizeof(*new) * (ncomponents + 16),
GFP_KERNEL);
if (!new)
goto out_free;
components = new;
}
components[ncomponents++] = cur.dentry;
cur.dentry = dget_parent(cur.dentry);
}
*buflen -= 4;
if (*buflen < 0)
goto out_free;
WRITE32(ncomponents);
while (ncomponents) {
struct dentry *dentry = components[ncomponents - 1];
unsigned int len = dentry->d_name.len;
*buflen -= 4 + (XDR_QUADLEN(len) << 2);
if (*buflen < 0)
goto out_free;
WRITE32(len);
WRITEMEM(dentry->d_name.name, len);
dprintk("/%s", dentry->d_name.name);
dput(dentry);
ncomponents--;
}
*pp = p;
err = 0;
out_free:
dprintk(")\n");
while (ncomponents)
dput(components[--ncomponents]);
kfree(components);
path_put(&cur);
return err;
}
static __be32 nfsd4_encode_fsloc_fsroot(struct svc_rqst *rqstp,
const struct path *path, __be32 **pp, int *buflen)
{
struct svc_export *exp_ps;
__be32 res;
exp_ps = rqst_find_fsidzero_export(rqstp);
if (IS_ERR(exp_ps))
return nfserrno(PTR_ERR(exp_ps));
res = nfsd4_encode_path(&exp_ps->ex_path, path, pp, buflen);
exp_put(exp_ps);
return res;
}
/*
* encode a fs_locations structure
*/
static __be32 nfsd4_encode_fs_locations(struct svc_rqst *rqstp,
struct svc_export *exp,
__be32 **pp, int *buflen)
{
__be32 status;
int i;
__be32 *p = *pp;
struct nfsd4_fs_locations *fslocs = &exp->ex_fslocs;
status = nfsd4_encode_fsloc_fsroot(rqstp, &exp->ex_path, &p, buflen);
if (status)
return status;
if ((*buflen -= 4) < 0)
return nfserr_resource;
WRITE32(fslocs->locations_count);
for (i=0; i<fslocs->locations_count; i++) {
status = nfsd4_encode_fs_location4(&fslocs->locations[i],
&p, buflen);
if (status)
return status;
}
*pp = p;
return 0;
}
static u32 nfs4_file_type(umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFIFO: return NF4FIFO;
case S_IFCHR: return NF4CHR;
case S_IFDIR: return NF4DIR;
case S_IFBLK: return NF4BLK;
case S_IFLNK: return NF4LNK;
case S_IFREG: return NF4REG;
case S_IFSOCK: return NF4SOCK;
default: return NF4BAD;
};
}
static __be32
nfsd4_encode_name(struct svc_rqst *rqstp, int whotype, kuid_t uid, kgid_t gid,
__be32 **p, int *buflen)
{
int status;
if (*buflen < (XDR_QUADLEN(IDMAP_NAMESZ) << 2) + 4)
return nfserr_resource;
if (whotype != NFS4_ACL_WHO_NAMED)
status = nfs4_acl_write_who(whotype, (u8 *)(*p + 1));
else if (gid_valid(gid))
status = nfsd_map_gid_to_name(rqstp, gid, (u8 *)(*p + 1));
else
status = nfsd_map_uid_to_name(rqstp, uid, (u8 *)(*p + 1));
if (status < 0)
return nfserrno(status);
*p = xdr_encode_opaque(*p, NULL, status);
*buflen -= (XDR_QUADLEN(status) << 2) + 4;
BUG_ON(*buflen < 0);
return 0;
}
static inline __be32
nfsd4_encode_user(struct svc_rqst *rqstp, kuid_t user, __be32 **p, int *buflen)
{
return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, user, INVALID_GID,
p, buflen);
}
static inline __be32
nfsd4_encode_group(struct svc_rqst *rqstp, kgid_t group, __be32 **p, int *buflen)
{
return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, INVALID_UID, group,
p, buflen);
}
static inline __be32
nfsd4_encode_aclname(struct svc_rqst *rqstp, struct nfs4_ace *ace,
__be32 **p, int *buflen)
{
kuid_t uid = INVALID_UID;
kgid_t gid = INVALID_GID;
if (ace->whotype == NFS4_ACL_WHO_NAMED) {
if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
gid = ace->who_gid;
else
uid = ace->who_uid;
}
return nfsd4_encode_name(rqstp, ace->whotype, uid, gid, p, buflen);
}
#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
FATTR4_WORD0_RDATTR_ERROR)
#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
{
/* As per referral draft: */
if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
*bmval1 & ~WORD1_ABSENT_FS_ATTRS) {
if (*bmval0 & FATTR4_WORD0_RDATTR_ERROR ||
*bmval0 & FATTR4_WORD0_FS_LOCATIONS)
*rdattr_err = NFSERR_MOVED;
else
return nfserr_moved;
}
*bmval0 &= WORD0_ABSENT_FS_ATTRS;
*bmval1 &= WORD1_ABSENT_FS_ATTRS;
return 0;
}
static int get_parent_attributes(struct svc_export *exp, struct kstat *stat)
{
struct path path = exp->ex_path;
int err;
path_get(&path);
while (follow_up(&path)) {
if (path.dentry != path.mnt->mnt_root)
break;
}
err = vfs_getattr(&path, stat);
path_put(&path);
return err;
}
/*
* Note: @fhp can be NULL; in this case, we might have to compose the filehandle
* ourselves.
*
* countp is the buffer size in _words_
*/
__be32
nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
struct dentry *dentry, __be32 **buffer, int count, u32 *bmval,
struct svc_rqst *rqstp, int ignore_crossmnt)
{
u32 bmval0 = bmval[0];
u32 bmval1 = bmval[1];
u32 bmval2 = bmval[2];
struct kstat stat;
struct svc_fh tempfh;
struct kstatfs statfs;
int buflen = count << 2;
__be32 *attrlenp;
u32 dummy;
u64 dummy64;
u32 rdattr_err = 0;
__be32 *p = *buffer;
__be32 status;
int err;
int aclsupport = 0;
struct nfs4_acl *acl = NULL;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
u32 minorversion = resp->cstate.minorversion;
struct path path = {
.mnt = exp->ex_path.mnt,
.dentry = dentry,
};
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
if (exp->ex_fslocs.migrated) {
BUG_ON(bmval[2]);
status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
if (status)
goto out;
}
err = vfs_getattr(&path, &stat);
if (err)
goto out_nfserr;
if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
(bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
FATTR4_WORD1_SPACE_TOTAL))) {
err = vfs_statfs(&path, &statfs);
if (err)
goto out_nfserr;
}
if ((bmval0 & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) && !fhp) {
fh_init(&tempfh, NFS4_FHSIZE);
status = fh_compose(&tempfh, exp, dentry, NULL);
if (status)
goto out;
fhp = &tempfh;
}
if (bmval0 & (FATTR4_WORD0_ACL | FATTR4_WORD0_ACLSUPPORT
| FATTR4_WORD0_SUPPORTED_ATTRS)) {
err = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
aclsupport = (err == 0);
if (bmval0 & FATTR4_WORD0_ACL) {
if (err == -EOPNOTSUPP)
bmval0 &= ~FATTR4_WORD0_ACL;
else if (err == -EINVAL) {
status = nfserr_attrnotsupp;
goto out;
} else if (err != 0)
goto out_nfserr;
}
}
if (bmval2) {
if ((buflen -= 16) < 0)
goto out_resource;
WRITE32(3);
WRITE32(bmval0);
WRITE32(bmval1);
WRITE32(bmval2);
} else if (bmval1) {
if ((buflen -= 12) < 0)
goto out_resource;
WRITE32(2);
WRITE32(bmval0);
WRITE32(bmval1);
} else {
if ((buflen -= 8) < 0)
goto out_resource;
WRITE32(1);
WRITE32(bmval0);
}
attrlenp = p++; /* to be backfilled later */
if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
u32 word0 = nfsd_suppattrs0(minorversion);
u32 word1 = nfsd_suppattrs1(minorversion);
u32 word2 = nfsd_suppattrs2(minorversion);
if (!aclsupport)
word0 &= ~FATTR4_WORD0_ACL;
if (!word2) {
if ((buflen -= 12) < 0)
goto out_resource;
WRITE32(2);
WRITE32(word0);
WRITE32(word1);
} else {
if ((buflen -= 16) < 0)
goto out_resource;
WRITE32(3);
WRITE32(word0);
WRITE32(word1);
WRITE32(word2);
}
}
if (bmval0 & FATTR4_WORD0_TYPE) {
if ((buflen -= 4) < 0)
goto out_resource;
dummy = nfs4_file_type(stat.mode);
if (dummy == NF4BAD)
goto out_serverfault;
WRITE32(dummy);
}
if (bmval0 & FATTR4_WORD0_FH_EXPIRE_TYPE) {
if ((buflen -= 4) < 0)
goto out_resource;
if (exp->ex_flags & NFSEXP_NOSUBTREECHECK)
WRITE32(NFS4_FH_PERSISTENT);
else
WRITE32(NFS4_FH_PERSISTENT|NFS4_FH_VOL_RENAME);
}
if (bmval0 & FATTR4_WORD0_CHANGE) {
if ((buflen -= 8) < 0)
goto out_resource;
write_change(&p, &stat, dentry->d_inode);
}
if (bmval0 & FATTR4_WORD0_SIZE) {
if ((buflen -= 8) < 0)
goto out_resource;
WRITE64(stat.size);
}
if (bmval0 & FATTR4_WORD0_LINK_SUPPORT) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(1);
}
if (bmval0 & FATTR4_WORD0_SYMLINK_SUPPORT) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(1);
}
if (bmval0 & FATTR4_WORD0_NAMED_ATTR) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(0);
}
if (bmval0 & FATTR4_WORD0_FSID) {
if ((buflen -= 16) < 0)
goto out_resource;
if (exp->ex_fslocs.migrated) {
WRITE64(NFS4_REFERRAL_FSID_MAJOR);
WRITE64(NFS4_REFERRAL_FSID_MINOR);
} else switch(fsid_source(fhp)) {
case FSIDSOURCE_FSID:
WRITE64((u64)exp->ex_fsid);
WRITE64((u64)0);
break;
case FSIDSOURCE_DEV:
WRITE32(0);
WRITE32(MAJOR(stat.dev));
WRITE32(0);
WRITE32(MINOR(stat.dev));
break;
case FSIDSOURCE_UUID:
WRITEMEM(exp->ex_uuid, 16);
break;
}
}
if (bmval0 & FATTR4_WORD0_UNIQUE_HANDLES) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(0);
}
if (bmval0 & FATTR4_WORD0_LEASE_TIME) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(nn->nfsd4_lease);
}
if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(rdattr_err);
}
if (bmval0 & FATTR4_WORD0_ACL) {
struct nfs4_ace *ace;
if (acl == NULL) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(0);
goto out_acl;
}
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(acl->naces);
for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) {
if ((buflen -= 4*3) < 0)
goto out_resource;
WRITE32(ace->type);
WRITE32(ace->flag);
WRITE32(ace->access_mask & NFS4_ACE_MASK_ALL);
status = nfsd4_encode_aclname(rqstp, ace, &p, &buflen);
if (status == nfserr_resource)
goto out_resource;
if (status)
goto out;
}
}
out_acl:
if (bmval0 & FATTR4_WORD0_ACLSUPPORT) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(aclsupport ?
ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL : 0);
}
if (bmval0 & FATTR4_WORD0_CANSETTIME) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(1);
}
if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(0);
}
if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(1);
}
if (bmval0 & FATTR4_WORD0_CHOWN_RESTRICTED) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(1);
}
if (bmval0 & FATTR4_WORD0_FILEHANDLE) {
buflen -= (XDR_QUADLEN(fhp->fh_handle.fh_size) << 2) + 4;
if (buflen < 0)
goto out_resource;
WRITE32(fhp->fh_handle.fh_size);
WRITEMEM(&fhp->fh_handle.fh_base, fhp->fh_handle.fh_size);
}
if (bmval0 & FATTR4_WORD0_FILEID) {
if ((buflen -= 8) < 0)
goto out_resource;
WRITE64(stat.ino);
}
if (bmval0 & FATTR4_WORD0_FILES_AVAIL) {
if ((buflen -= 8) < 0)
goto out_resource;
WRITE64((u64) statfs.f_ffree);
}
if (bmval0 & FATTR4_WORD0_FILES_FREE) {
if ((buflen -= 8) < 0)
goto out_resource;
WRITE64((u64) statfs.f_ffree);
}
if (bmval0 & FATTR4_WORD0_FILES_TOTAL) {
if ((buflen -= 8) < 0)
goto out_resource;
WRITE64((u64) statfs.f_files);
}
if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) {
status = nfsd4_encode_fs_locations(rqstp, exp, &p, &buflen);
if (status == nfserr_resource)
goto out_resource;
if (status)
goto out;
}
if (bmval0 & FATTR4_WORD0_HOMOGENEOUS) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(1);
}
if (bmval0 & FATTR4_WORD0_MAXFILESIZE) {
if ((buflen -= 8) < 0)
goto out_resource;
WRITE64(~(u64)0);
}
if (bmval0 & FATTR4_WORD0_MAXLINK) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(255);
}
if (bmval0 & FATTR4_WORD0_MAXNAME) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(statfs.f_namelen);
}
if (bmval0 & FATTR4_WORD0_MAXREAD) {
if ((buflen -= 8) < 0)
goto out_resource;
WRITE64((u64) svc_max_payload(rqstp));
}
if (bmval0 & FATTR4_WORD0_MAXWRITE) {
if ((buflen -= 8) < 0)
goto out_resource;
WRITE64((u64) svc_max_payload(rqstp));
}
if (bmval1 & FATTR4_WORD1_MODE) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(stat.mode & S_IALLUGO);
}
if (bmval1 & FATTR4_WORD1_NO_TRUNC) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(1);
}
if (bmval1 & FATTR4_WORD1_NUMLINKS) {
if ((buflen -= 4) < 0)
goto out_resource;
WRITE32(stat.nlink);
}
if (bmval1 & FATTR4_WORD1_OWNER) {
status = nfsd4_encode_user(rqstp, stat.uid, &p, &buflen);
if (status == nfserr_resource)
goto out_resource;
if (status)
goto out;
}
if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
status = nfsd4_encode_group(rqstp, stat.gid, &p, &buflen);
if (status == nfserr_resource)
goto out_resource;
if (status)
goto out;
}
if (bmval1 & FATTR4_WORD1_RAWDEV) {
if ((buflen -= 8) < 0)
goto out_resource;
WRITE32((u32) MAJOR(stat.rdev));
WRITE32((u32) MINOR(stat.rdev));
}
if (bmval1 & FATTR4_WORD1_SPACE_AVAIL) {
if ((buflen -= 8) < 0)
goto out_resource;
dummy64 = (u64)statfs.f_bavail * (u64)statfs.f_bsize;
WRITE64(dummy64);
}
if (bmval1 & FATTR4_WORD1_SPACE_FREE) {
if ((buflen -= 8) < 0)
goto out_resource;
dummy64 = (u64)statfs.f_bfree * (u64)statfs.f_bsize;
WRITE64(dummy64);
}
if (bmval1 & FATTR4_WORD1_SPACE_TOTAL) {
if ((buflen -= 8) < 0)
goto out_resource;
dummy64 = (u64)statfs.f_blocks * (u64)statfs.f_bsize;
WRITE64(dummy64);
}
if (bmval1 & FATTR4_WORD1_SPACE_USED) {
if ((buflen -= 8) < 0)
goto out_resource;
dummy64 = (u64)stat.blocks << 9;
WRITE64(dummy64);
}
if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
if ((buflen -= 12) < 0)
goto out_resource;
WRITE64((s64)stat.atime.tv_sec);
WRITE32(stat.atime.tv_nsec);
}
if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
if ((buflen -= 12) < 0)
goto out_resource;
WRITE32(0);
WRITE32(1);
WRITE32(0);
}
if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
if ((buflen -= 12) < 0)
goto out_resource;
WRITE64((s64)stat.ctime.tv_sec);
WRITE32(stat.ctime.tv_nsec);
}
if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
if ((buflen -= 12) < 0)
goto out_resource;
WRITE64((s64)stat.mtime.tv_sec);
WRITE32(stat.mtime.tv_nsec);
}
if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
if ((buflen -= 8) < 0)
goto out_resource;
/*
* Get parent's attributes if not ignoring crossmount
* and this is the root of a cross-mounted filesystem.
*/
if (ignore_crossmnt == 0 &&
dentry == exp->ex_path.mnt->mnt_root)
get_parent_attributes(exp, &stat);
WRITE64(stat.ino);
}
if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
if ((buflen -= 16) < 0)
goto out_resource;
WRITE32(3);
WRITE32(NFSD_SUPPATTR_EXCLCREAT_WORD0);
WRITE32(NFSD_SUPPATTR_EXCLCREAT_WORD1);
WRITE32(NFSD_SUPPATTR_EXCLCREAT_WORD2);
}
*attrlenp = htonl((char *)p - (char *)attrlenp - 4);
*buffer = p;
status = nfs_ok;
out:
kfree(acl);
if (fhp == &tempfh)
fh_put(&tempfh);
return status;
out_nfserr:
status = nfserrno(err);
goto out;
out_resource:
status = nfserr_resource;
goto out;
out_serverfault:
status = nfserr_serverfault;
goto out;
}
static inline int attributes_need_mount(u32 *bmval)
{
if (bmval[0] & ~(FATTR4_WORD0_RDATTR_ERROR | FATTR4_WORD0_LEASE_TIME))
return 1;
if (bmval[1] & ~FATTR4_WORD1_MOUNTED_ON_FILEID)
return 1;
return 0;
}
static __be32
nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd,
const char *name, int namlen, __be32 **p, int buflen)
{
struct svc_export *exp = cd->rd_fhp->fh_export;
struct dentry *dentry;
__be32 nfserr;
int ignore_crossmnt = 0;
dentry = lookup_one_len(name, cd->rd_fhp->fh_dentry, namlen);
if (IS_ERR(dentry))
return nfserrno(PTR_ERR(dentry));
if (!dentry->d_inode) {
/*
* nfsd_buffered_readdir drops the i_mutex between
* readdir and calling this callback, leaving a window
* where this directory entry could have gone away.
*/
dput(dentry);
return nfserr_noent;
}
exp_get(exp);
/*
* In the case of a mountpoint, the client may be asking for
* attributes that are only properties of the underlying filesystem
* as opposed to the cross-mounted file system. In such a case,
* we will not follow the cross mount and will fill the attribtutes
* directly from the mountpoint dentry.
*/
if (nfsd_mountpoint(dentry, exp)) {
int err;
if (!(exp->ex_flags & NFSEXP_V4ROOT)
&& !attributes_need_mount(cd->rd_bmval)) {
ignore_crossmnt = 1;
goto out_encode;
}
/*
* Why the heck aren't we just using nfsd_lookup??
* Different "."/".." handling? Something else?
* At least, add a comment here to explain....
*/
err = nfsd_cross_mnt(cd->rd_rqstp, &dentry, &exp);
if (err) {
nfserr = nfserrno(err);
goto out_put;
}
nfserr = check_nfsd_access(exp, cd->rd_rqstp);
if (nfserr)
goto out_put;
}
out_encode:
nfserr = nfsd4_encode_fattr(NULL, exp, dentry, p, buflen, cd->rd_bmval,
cd->rd_rqstp, ignore_crossmnt);
out_put:
dput(dentry);
exp_put(exp);
return nfserr;
}
static __be32 *
nfsd4_encode_rdattr_error(__be32 *p, int buflen, __be32 nfserr)
{
__be32 *attrlenp;
if (buflen < 6)
return NULL;
*p++ = htonl(2);
*p++ = htonl(FATTR4_WORD0_RDATTR_ERROR); /* bmval0 */
*p++ = htonl(0); /* bmval1 */
attrlenp = p++;
*p++ = nfserr; /* no htonl */
*attrlenp = htonl((char *)p - (char *)attrlenp - 4);
return p;
}
static int
nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
struct readdir_cd *ccd = ccdv;
struct nfsd4_readdir *cd = container_of(ccd, struct nfsd4_readdir, common);
int buflen;
__be32 *p = cd->buffer;
__be32 *cookiep;
__be32 nfserr = nfserr_toosmall;
/* In nfsv4, "." and ".." never make it onto the wire.. */
if (name && isdotent(name, namlen)) {
cd->common.err = nfs_ok;
return 0;
}
if (cd->offset)
xdr_encode_hyper(cd->offset, (u64) offset);
buflen = cd->buflen - 4 - XDR_QUADLEN(namlen);
if (buflen < 0)
goto fail;
*p++ = xdr_one; /* mark entry present */
cookiep = p;
p = xdr_encode_hyper(p, NFS_OFFSET_MAX); /* offset of next entry */
p = xdr_encode_array(p, name, namlen); /* name length & name */
nfserr = nfsd4_encode_dirent_fattr(cd, name, namlen, &p, buflen);
switch (nfserr) {
case nfs_ok:
break;
case nfserr_resource:
nfserr = nfserr_toosmall;
goto fail;
case nfserr_noent:
goto skip_entry;
default:
/*
* If the client requested the RDATTR_ERROR attribute,
* we stuff the error code into this attribute
* and continue. If this attribute was not requested,
* then in accordance with the spec, we fail the
* entire READDIR operation(!)
*/
if (!(cd->rd_bmval[0] & FATTR4_WORD0_RDATTR_ERROR))
goto fail;
p = nfsd4_encode_rdattr_error(p, buflen, nfserr);
if (p == NULL) {
nfserr = nfserr_toosmall;
goto fail;
}
}
cd->buflen -= (p - cd->buffer);
cd->buffer = p;
cd->offset = cookiep;
skip_entry:
cd->common.err = nfs_ok;
return 0;
fail:
cd->common.err = nfserr;
return -EINVAL;
}
static void
nfsd4_encode_stateid(struct nfsd4_compoundres *resp, stateid_t *sid)
{
__be32 *p;
RESERVE_SPACE(sizeof(stateid_t));
WRITE32(sid->si_generation);
WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
ADJUST_ARGS();
}
static __be32
nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_access *access)
{
__be32 *p;
if (!nfserr) {
RESERVE_SPACE(8);
WRITE32(access->ac_supported);
WRITE32(access->ac_resp_access);
ADJUST_ARGS();
}
return nfserr;
}
static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_bind_conn_to_session *bcts)
{
__be32 *p;
if (!nfserr) {
RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 8);
WRITEMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN);
WRITE32(bcts->dir);
/* Sorry, we do not yet support RDMA over 4.1: */
WRITE32(0);
ADJUST_ARGS();
}
return nfserr;
}
static __be32
nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close)
{
if (!nfserr)
nfsd4_encode_stateid(resp, &close->cl_stateid);
return nfserr;
}
static __be32
nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_commit *commit)
{
__be32 *p;
if (!nfserr) {
RESERVE_SPACE(NFS4_VERIFIER_SIZE);
WRITEMEM(commit->co_verf.data, NFS4_VERIFIER_SIZE);
ADJUST_ARGS();
}
return nfserr;
}
static __be32
nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_create *create)
{
__be32 *p;
if (!nfserr) {
RESERVE_SPACE(32);
write_cinfo(&p, &create->cr_cinfo);
WRITE32(2);
WRITE32(create->cr_bmval[0]);
WRITE32(create->cr_bmval[1]);
ADJUST_ARGS();
}
return nfserr;
}
static __be32
nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_getattr *getattr)
{
struct svc_fh *fhp = getattr->ga_fhp;
int buflen;
if (nfserr)
return nfserr;
buflen = resp->end - resp->p - (COMPOUND_ERR_SLACK_SPACE >> 2);
nfserr = nfsd4_encode_fattr(fhp, fhp->fh_export, fhp->fh_dentry,
&resp->p, buflen, getattr->ga_bmval,
resp->rqstp, 0);
return nfserr;
}
static __be32
nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr, struct svc_fh **fhpp)
{
struct svc_fh *fhp = *fhpp;
unsigned int len;
__be32 *p;
if (!nfserr) {
len = fhp->fh_handle.fh_size;
RESERVE_SPACE(len + 4);
WRITE32(len);
WRITEMEM(&fhp->fh_handle.fh_base, len);
ADJUST_ARGS();
}
return nfserr;
}
/*
* Including all fields other than the name, a LOCK4denied structure requires
* 8(clientid) + 4(namelen) + 8(offset) + 8(length) + 4(type) = 32 bytes.
*/
static void
nfsd4_encode_lock_denied(struct nfsd4_compoundres *resp, struct nfsd4_lock_denied *ld)
{
struct xdr_netobj *conf = &ld->ld_owner;
__be32 *p;
RESERVE_SPACE(32 + XDR_LEN(conf->len));
WRITE64(ld->ld_start);
WRITE64(ld->ld_length);
WRITE32(ld->ld_type);
if (conf->len) {
WRITEMEM(&ld->ld_clientid, 8);
WRITE32(conf->len);
WRITEMEM(conf->data, conf->len);
kfree(conf->data);
} else { /* non - nfsv4 lock in conflict, no clientid nor owner */
WRITE64((u64)0); /* clientid */
WRITE32(0); /* length of owner name */
}
ADJUST_ARGS();
}
static __be32
nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lock *lock)
{
if (!nfserr)
nfsd4_encode_stateid(resp, &lock->lk_resp_stateid);
else if (nfserr == nfserr_denied)
nfsd4_encode_lock_denied(resp, &lock->lk_denied);
return nfserr;
}
static __be32
nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lockt *lockt)
{
if (nfserr == nfserr_denied)
nfsd4_encode_lock_denied(resp, &lockt->lt_denied);
return nfserr;
}
static __be32
nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_locku *locku)
{
if (!nfserr)
nfsd4_encode_stateid(resp, &locku->lu_stateid);
return nfserr;
}
static __be32
nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_link *link)
{
__be32 *p;
if (!nfserr) {
RESERVE_SPACE(20);
write_cinfo(&p, &link->li_cinfo);
ADJUST_ARGS();
}
return nfserr;
}
static __be32
nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open)
{
__be32 *p;
if (nfserr)
goto out;
nfsd4_encode_stateid(resp, &open->op_stateid);
RESERVE_SPACE(40);
write_cinfo(&p, &open->op_cinfo);
WRITE32(open->op_rflags);
WRITE32(2);
WRITE32(open->op_bmval[0]);
WRITE32(open->op_bmval[1]);
WRITE32(open->op_delegate_type);
ADJUST_ARGS();
switch (open->op_delegate_type) {
case NFS4_OPEN_DELEGATE_NONE:
break;
case NFS4_OPEN_DELEGATE_READ:
nfsd4_encode_stateid(resp, &open->op_delegate_stateid);
RESERVE_SPACE(20);
WRITE32(open->op_recall);
/*
* TODO: ACE's in delegations
*/
WRITE32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
WRITE32(0);
WRITE32(0);
WRITE32(0); /* XXX: is NULL principal ok? */
ADJUST_ARGS();
break;
case NFS4_OPEN_DELEGATE_WRITE:
nfsd4_encode_stateid(resp, &open->op_delegate_stateid);
RESERVE_SPACE(32);
WRITE32(0);
/*
* TODO: space_limit's in delegations
*/
WRITE32(NFS4_LIMIT_SIZE);
WRITE32(~(u32)0);
WRITE32(~(u32)0);
/*
* TODO: ACE's in delegations
*/
WRITE32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
WRITE32(0);
WRITE32(0);
WRITE32(0); /* XXX: is NULL principal ok? */
ADJUST_ARGS();
break;
case NFS4_OPEN_DELEGATE_NONE_EXT: /* 4.1 */
switch (open->op_why_no_deleg) {
case WND4_CONTENTION:
case WND4_RESOURCE:
RESERVE_SPACE(8);
WRITE32(open->op_why_no_deleg);
WRITE32(0); /* deleg signaling not supported yet */
break;
default:
RESERVE_SPACE(4);
WRITE32(open->op_why_no_deleg);
}
ADJUST_ARGS();
break;
default:
BUG();
}
/* XXX save filehandle here */
out:
return nfserr;
}
static __be32
nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_confirm *oc)
{
if (!nfserr)
nfsd4_encode_stateid(resp, &oc->oc_resp_stateid);
return nfserr;
}
static __be32
nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_downgrade *od)
{
if (!nfserr)
nfsd4_encode_stateid(resp, &od->od_stateid);
return nfserr;
}
static __be32
nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_read *read)
{
u32 eof;
int v;
struct page *page;
unsigned long maxcount;
long len;
__be32 *p;
if (nfserr)
return nfserr;
if (resp->xbuf->page_len)
return nfserr_resource;
RESERVE_SPACE(8); /* eof flag and byte count */
maxcount = svc_max_payload(resp->rqstp);
if (maxcount > read->rd_length)
maxcount = read->rd_length;
len = maxcount;
v = 0;
while (len > 0) {
page = *(resp->rqstp->rq_next_page);
if (!page) { /* ran out of pages */
maxcount -= len;
break;
}
resp->rqstp->rq_vec[v].iov_base = page_address(page);
resp->rqstp->rq_vec[v].iov_len =
len < PAGE_SIZE ? len : PAGE_SIZE;
resp->rqstp->rq_next_page++;
v++;
len -= PAGE_SIZE;
}
read->rd_vlen = v;
nfserr = nfsd_read_file(read->rd_rqstp, read->rd_fhp, read->rd_filp,
read->rd_offset, resp->rqstp->rq_vec, read->rd_vlen,
&maxcount);
if (nfserr)
return nfserr;
eof = (read->rd_offset + maxcount >=
read->rd_fhp->fh_dentry->d_inode->i_size);
WRITE32(eof);
WRITE32(maxcount);
ADJUST_ARGS();
resp->xbuf->head[0].iov_len = (char*)p
- (char*)resp->xbuf->head[0].iov_base;
resp->xbuf->page_len = maxcount;
/* Use rest of head for padding and remaining ops: */
resp->xbuf->tail[0].iov_base = p;
resp->xbuf->tail[0].iov_len = 0;
if (maxcount&3) {
RESERVE_SPACE(4);
WRITE32(0);
resp->xbuf->tail[0].iov_base += maxcount&3;
resp->xbuf->tail[0].iov_len = 4 - (maxcount&3);
ADJUST_ARGS();
}
return 0;
}
static __be32
nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readlink *readlink)
{
int maxcount;
char *page;
__be32 *p;
if (nfserr)
return nfserr;
if (resp->xbuf->page_len)
return nfserr_resource;
if (!*resp->rqstp->rq_next_page)
return nfserr_resource;
page = page_address(*(resp->rqstp->rq_next_page++));
maxcount = PAGE_SIZE;
RESERVE_SPACE(4);
/*
* XXX: By default, the ->readlink() VFS op will truncate symlinks
* if they would overflow the buffer. Is this kosher in NFSv4? If
* not, one easy fix is: if ->readlink() precisely fills the buffer,
* assume that truncation occurred, and return NFS4ERR_RESOURCE.
*/
nfserr = nfsd_readlink(readlink->rl_rqstp, readlink->rl_fhp, page, &maxcount);
if (nfserr == nfserr_isdir)
return nfserr_inval;
if (nfserr)
return nfserr;
WRITE32(maxcount);
ADJUST_ARGS();
resp->xbuf->head[0].iov_len = (char*)p
- (char*)resp->xbuf->head[0].iov_base;
resp->xbuf->page_len = maxcount;
/* Use rest of head for padding and remaining ops: */
resp->xbuf->tail[0].iov_base = p;
resp->xbuf->tail[0].iov_len = 0;
if (maxcount&3) {
RESERVE_SPACE(4);
WRITE32(0);
resp->xbuf->tail[0].iov_base += maxcount&3;
resp->xbuf->tail[0].iov_len = 4 - (maxcount&3);
ADJUST_ARGS();
}
return 0;
}
static __be32
nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readdir *readdir)
{
int maxcount;
loff_t offset;
__be32 *page, *savep, *tailbase;
__be32 *p;
if (nfserr)
return nfserr;
if (resp->xbuf->page_len)
return nfserr_resource;
if (!*resp->rqstp->rq_next_page)
return nfserr_resource;
RESERVE_SPACE(NFS4_VERIFIER_SIZE);
savep = p;
/* XXX: Following NFSv3, we ignore the READDIR verifier for now. */
WRITE32(0);
WRITE32(0);
ADJUST_ARGS();
resp->xbuf->head[0].iov_len = ((char*)resp->p) - (char*)resp->xbuf->head[0].iov_base;
tailbase = p;
maxcount = PAGE_SIZE;
if (maxcount > readdir->rd_maxcount)
maxcount = readdir->rd_maxcount;
/*
* Convert from bytes to words, account for the two words already
* written, make sure to leave two words at the end for the next
* pointer and eof field.
*/
maxcount = (maxcount >> 2) - 4;
if (maxcount < 0) {
nfserr = nfserr_toosmall;
goto err_no_verf;
}
page = page_address(*(resp->rqstp->rq_next_page++));
readdir->common.err = 0;
readdir->buflen = maxcount;
readdir->buffer = page;
readdir->offset = NULL;
offset = readdir->rd_cookie;
nfserr = nfsd_readdir(readdir->rd_rqstp, readdir->rd_fhp,
&offset,
&readdir->common, nfsd4_encode_dirent);
if (nfserr == nfs_ok &&
readdir->common.err == nfserr_toosmall &&
readdir->buffer == page)
nfserr = nfserr_toosmall;
if (nfserr)
goto err_no_verf;
if (readdir->offset)
xdr_encode_hyper(readdir->offset, offset);
p = readdir->buffer;
*p++ = 0; /* no more entries */
*p++ = htonl(readdir->common.err == nfserr_eof);
resp->xbuf->page_len = ((char*)p) -
(char*)page_address(*(resp->rqstp->rq_next_page-1));
/* Use rest of head for padding and remaining ops: */
resp->xbuf->tail[0].iov_base = tailbase;
resp->xbuf->tail[0].iov_len = 0;
resp->p = resp->xbuf->tail[0].iov_base;
resp->end = resp->p + (PAGE_SIZE - resp->xbuf->head[0].iov_len)/4;
return 0;
err_no_verf:
p = savep;
ADJUST_ARGS();
return nfserr;
}
static __be32
nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_remove *remove)
{
__be32 *p;
if (!nfserr) {
RESERVE_SPACE(20);
write_cinfo(&p, &remove->rm_cinfo);
ADJUST_ARGS();
}
return nfserr;
}
static __be32
nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_rename *rename)
{
__be32 *p;
if (!nfserr) {
RESERVE_SPACE(40);
write_cinfo(&p, &rename->rn_sinfo);
write_cinfo(&p, &rename->rn_tinfo);
ADJUST_ARGS();
}
return nfserr;
}
static __be32
nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp,
__be32 nfserr, struct svc_export *exp)
{
u32 i, nflavs, supported;
struct exp_flavor_info *flavs;
struct exp_flavor_info def_flavs[2];
__be32 *p, *flavorsp;
static bool report = true;
if (nfserr)
goto out;
if (exp->ex_nflavors) {
flavs = exp->ex_flavors;
nflavs = exp->ex_nflavors;
} else { /* Handling of some defaults in absence of real secinfo: */
flavs = def_flavs;
if (exp->ex_client->flavour->flavour == RPC_AUTH_UNIX) {
nflavs = 2;
flavs[0].pseudoflavor = RPC_AUTH_UNIX;
flavs[1].pseudoflavor = RPC_AUTH_NULL;
} else if (exp->ex_client->flavour->flavour == RPC_AUTH_GSS) {
nflavs = 1;
flavs[0].pseudoflavor
= svcauth_gss_flavor(exp->ex_client);
} else {
nflavs = 1;
flavs[0].pseudoflavor
= exp->ex_client->flavour->flavour;
}
}
supported = 0;
RESERVE_SPACE(4);
flavorsp = p++; /* to be backfilled later */
ADJUST_ARGS();
for (i = 0; i < nflavs; i++) {
rpc_authflavor_t pf = flavs[i].pseudoflavor;
struct rpcsec_gss_info info;
if (rpcauth_get_gssinfo(pf, &info) == 0) {
supported++;
RESERVE_SPACE(4 + 4 + info.oid.len + 4 + 4);
WRITE32(RPC_AUTH_GSS);
WRITE32(info.oid.len);
WRITEMEM(info.oid.data, info.oid.len);
WRITE32(info.qop);
WRITE32(info.service);
ADJUST_ARGS();
} else if (pf < RPC_AUTH_MAXFLAVOR) {
supported++;
RESERVE_SPACE(4);
WRITE32(pf);
ADJUST_ARGS();
} else {
if (report)
pr_warn("NFS: SECINFO: security flavor %u "
"is not supported\n", pf);
}
}
if (nflavs != supported)
report = false;
*flavorsp = htonl(supported);
out:
if (exp)
exp_put(exp);
return nfserr;
}
static __be32
nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_secinfo *secinfo)
{
return nfsd4_do_encode_secinfo(resp, nfserr, secinfo->si_exp);
}
static __be32
nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_secinfo_no_name *secinfo)
{
return nfsd4_do_encode_secinfo(resp, nfserr, secinfo->sin_exp);
}
/*
* The SETATTR encode routine is special -- it always encodes a bitmap,
* regardless of the error status.
*/
static __be32
nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setattr *setattr)
{
__be32 *p;
RESERVE_SPACE(12);
if (nfserr) {
WRITE32(2);
WRITE32(0);
WRITE32(0);
}
else {
WRITE32(2);
WRITE32(setattr->sa_bmval[0]);
WRITE32(setattr->sa_bmval[1]);
}
ADJUST_ARGS();
return nfserr;
}
static __be32
nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setclientid *scd)
{
__be32 *p;
if (!nfserr) {
RESERVE_SPACE(8 + NFS4_VERIFIER_SIZE);
WRITEMEM(&scd->se_clientid, 8);
WRITEMEM(&scd->se_confirm, NFS4_VERIFIER_SIZE);
ADJUST_ARGS();
}
else if (nfserr == nfserr_clid_inuse) {
RESERVE_SPACE(8);
WRITE32(0);
WRITE32(0);
ADJUST_ARGS();
}
return nfserr;
}
static __be32
nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_write *write)
{
__be32 *p;
if (!nfserr) {
RESERVE_SPACE(16);
WRITE32(write->wr_bytes_written);
WRITE32(write->wr_how_written);
WRITEMEM(write->wr_verifier.data, NFS4_VERIFIER_SIZE);
ADJUST_ARGS();
}
return nfserr;
}
static __be32
nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_exchange_id *exid)
{
__be32 *p;
char *major_id;
char *server_scope;
int major_id_sz;
int server_scope_sz;
uint64_t minor_id = 0;
if (nfserr)
return nfserr;
major_id = utsname()->nodename;
major_id_sz = strlen(major_id);
server_scope = utsname()->nodename;
server_scope_sz = strlen(server_scope);
RESERVE_SPACE(
8 /* eir_clientid */ +
4 /* eir_sequenceid */ +
4 /* eir_flags */ +
4 /* spr_how (SP4_NONE) */ +
8 /* so_minor_id */ +
4 /* so_major_id.len */ +
(XDR_QUADLEN(major_id_sz) * 4) +
4 /* eir_server_scope.len */ +
(XDR_QUADLEN(server_scope_sz) * 4) +
4 /* eir_server_impl_id.count (0) */);
WRITEMEM(&exid->clientid, 8);
WRITE32(exid->seqid);
WRITE32(exid->flags);
/* state_protect4_r. Currently only support SP4_NONE */
BUG_ON(exid->spa_how != SP4_NONE);
WRITE32(exid->spa_how);
/* The server_owner struct */
WRITE64(minor_id); /* Minor id */
/* major id */
WRITE32(major_id_sz);
WRITEMEM(major_id, major_id_sz);
/* Server scope */
WRITE32(server_scope_sz);
WRITEMEM(server_scope, server_scope_sz);
/* Implementation id */
WRITE32(0); /* zero length nfs_impl_id4 array */
ADJUST_ARGS();
return 0;
}
static __be32
nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_create_session *sess)
{
__be32 *p;
if (nfserr)
return nfserr;
RESERVE_SPACE(24);
WRITEMEM(sess->sessionid.data, NFS4_MAX_SESSIONID_LEN);
WRITE32(sess->seqid);
WRITE32(sess->flags);
ADJUST_ARGS();
RESERVE_SPACE(28);
WRITE32(0); /* headerpadsz */
WRITE32(sess->fore_channel.maxreq_sz);
WRITE32(sess->fore_channel.maxresp_sz);
WRITE32(sess->fore_channel.maxresp_cached);
WRITE32(sess->fore_channel.maxops);
WRITE32(sess->fore_channel.maxreqs);
WRITE32(sess->fore_channel.nr_rdma_attrs);
ADJUST_ARGS();
if (sess->fore_channel.nr_rdma_attrs) {
RESERVE_SPACE(4);
WRITE32(sess->fore_channel.rdma_attrs);
ADJUST_ARGS();
}
RESERVE_SPACE(28);
WRITE32(0); /* headerpadsz */
WRITE32(sess->back_channel.maxreq_sz);
WRITE32(sess->back_channel.maxresp_sz);
WRITE32(sess->back_channel.maxresp_cached);
WRITE32(sess->back_channel.maxops);
WRITE32(sess->back_channel.maxreqs);
WRITE32(sess->back_channel.nr_rdma_attrs);
ADJUST_ARGS();
if (sess->back_channel.nr_rdma_attrs) {
RESERVE_SPACE(4);
WRITE32(sess->back_channel.rdma_attrs);
ADJUST_ARGS();
}
return 0;
}
static __be32
nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_destroy_session *destroy_session)
{
return nfserr;
}
static __be32
nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_free_stateid *free_stateid)
{
__be32 *p;
if (nfserr)
return nfserr;
RESERVE_SPACE(4);
*p++ = nfserr;
ADJUST_ARGS();
return nfserr;
}
static __be32
nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_sequence *seq)
{
__be32 *p;
if (nfserr)
return nfserr;
RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 20);
WRITEMEM(seq->sessionid.data, NFS4_MAX_SESSIONID_LEN);
WRITE32(seq->seqid);
WRITE32(seq->slotid);
/* Note slotid's are numbered from zero: */
WRITE32(seq->maxslots - 1); /* sr_highest_slotid */
WRITE32(seq->maxslots - 1); /* sr_target_highest_slotid */
WRITE32(seq->status_flags);
ADJUST_ARGS();
resp->cstate.datap = p; /* DRC cache data pointer */
return 0;
}
static __be32
nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_test_stateid *test_stateid)
{
struct nfsd4_test_stateid_id *stateid, *next;
__be32 *p;
if (nfserr)
return nfserr;
RESERVE_SPACE(4 + (4 * test_stateid->ts_num_ids));
*p++ = htonl(test_stateid->ts_num_ids);
list_for_each_entry_safe(stateid, next, &test_stateid->ts_stateid_list, ts_id_list) {
*p++ = stateid->ts_id_status;
}
ADJUST_ARGS();
return nfserr;
}
static __be32
nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr, void *p)
{
return nfserr;
}
typedef __be32(* nfsd4_enc)(struct nfsd4_compoundres *, __be32, void *);
/*
* Note: nfsd4_enc_ops vector is shared for v4.0 and v4.1
* since we don't need to filter out obsolete ops as this is
* done in the decoding phase.
*/
static nfsd4_enc nfsd4_enc_ops[] = {
[OP_ACCESS] = (nfsd4_enc)nfsd4_encode_access,
[OP_CLOSE] = (nfsd4_enc)nfsd4_encode_close,
[OP_COMMIT] = (nfsd4_enc)nfsd4_encode_commit,
[OP_CREATE] = (nfsd4_enc)nfsd4_encode_create,
[OP_DELEGPURGE] = (nfsd4_enc)nfsd4_encode_noop,
[OP_DELEGRETURN] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GETATTR] = (nfsd4_enc)nfsd4_encode_getattr,
[OP_GETFH] = (nfsd4_enc)nfsd4_encode_getfh,
[OP_LINK] = (nfsd4_enc)nfsd4_encode_link,
[OP_LOCK] = (nfsd4_enc)nfsd4_encode_lock,
[OP_LOCKT] = (nfsd4_enc)nfsd4_encode_lockt,
[OP_LOCKU] = (nfsd4_enc)nfsd4_encode_locku,
[OP_LOOKUP] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LOOKUPP] = (nfsd4_enc)nfsd4_encode_noop,
[OP_NVERIFY] = (nfsd4_enc)nfsd4_encode_noop,
[OP_OPEN] = (nfsd4_enc)nfsd4_encode_open,
[OP_OPENATTR] = (nfsd4_enc)nfsd4_encode_noop,
[OP_OPEN_CONFIRM] = (nfsd4_enc)nfsd4_encode_open_confirm,
[OP_OPEN_DOWNGRADE] = (nfsd4_enc)nfsd4_encode_open_downgrade,
[OP_PUTFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_PUTPUBFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_PUTROOTFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_READ] = (nfsd4_enc)nfsd4_encode_read,
[OP_READDIR] = (nfsd4_enc)nfsd4_encode_readdir,
[OP_READLINK] = (nfsd4_enc)nfsd4_encode_readlink,
[OP_REMOVE] = (nfsd4_enc)nfsd4_encode_remove,
[OP_RENAME] = (nfsd4_enc)nfsd4_encode_rename,
[OP_RENEW] = (nfsd4_enc)nfsd4_encode_noop,
[OP_RESTOREFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_SAVEFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_SECINFO] = (nfsd4_enc)nfsd4_encode_secinfo,
[OP_SETATTR] = (nfsd4_enc)nfsd4_encode_setattr,
[OP_SETCLIENTID] = (nfsd4_enc)nfsd4_encode_setclientid,
[OP_SETCLIENTID_CONFIRM] = (nfsd4_enc)nfsd4_encode_noop,
[OP_VERIFY] = (nfsd4_enc)nfsd4_encode_noop,
[OP_WRITE] = (nfsd4_enc)nfsd4_encode_write,
[OP_RELEASE_LOCKOWNER] = (nfsd4_enc)nfsd4_encode_noop,
/* NFSv4.1 operations */
[OP_BACKCHANNEL_CTL] = (nfsd4_enc)nfsd4_encode_noop,
[OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session,
[OP_EXCHANGE_ID] = (nfsd4_enc)nfsd4_encode_exchange_id,
[OP_CREATE_SESSION] = (nfsd4_enc)nfsd4_encode_create_session,
[OP_DESTROY_SESSION] = (nfsd4_enc)nfsd4_encode_destroy_session,
[OP_FREE_STATEID] = (nfsd4_enc)nfsd4_encode_free_stateid,
[OP_GET_DIR_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_noop,
[OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_secinfo_no_name,
[OP_SEQUENCE] = (nfsd4_enc)nfsd4_encode_sequence,
[OP_SET_SSV] = (nfsd4_enc)nfsd4_encode_noop,
[OP_TEST_STATEID] = (nfsd4_enc)nfsd4_encode_test_stateid,
[OP_WANT_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
[OP_DESTROY_CLIENTID] = (nfsd4_enc)nfsd4_encode_noop,
[OP_RECLAIM_COMPLETE] = (nfsd4_enc)nfsd4_encode_noop,
};
/*
* Calculate the total amount of memory that the compound response has taken
* after encoding the current operation with pad.
*
* pad: if operation is non-idempotent, pad was calculate by op_rsize_bop()
* which was specified at nfsd4_operation, else pad is zero.
*
* Compare this length to the session se_fmaxresp_sz and se_fmaxresp_cached.
*
* Our se_fmaxresp_cached will always be a multiple of PAGE_SIZE, and so
* will be at least a page and will therefore hold the xdr_buf head.
*/
__be32 nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad)
{
struct xdr_buf *xb = &resp->rqstp->rq_res;
struct nfsd4_session *session = NULL;
struct nfsd4_slot *slot = resp->cstate.slot;
u32 length, tlen = 0;
if (!nfsd4_has_session(&resp->cstate))
return 0;
session = resp->cstate.session;
if (session == NULL)
return 0;
if (xb->page_len == 0) {
length = (char *)resp->p - (char *)xb->head[0].iov_base + pad;
} else {
if (xb->tail[0].iov_base && xb->tail[0].iov_len > 0)
tlen = (char *)resp->p - (char *)xb->tail[0].iov_base;
length = xb->head[0].iov_len + xb->page_len + tlen + pad;
}
dprintk("%s length %u, xb->page_len %u tlen %u pad %u\n", __func__,
length, xb->page_len, tlen, pad);
if (length > session->se_fchannel.maxresp_sz)
return nfserr_rep_too_big;
if ((slot->sl_flags & NFSD4_SLOT_CACHETHIS) &&
length > session->se_fchannel.maxresp_cached)
return nfserr_rep_too_big_to_cache;
return 0;
}
void
nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
{
struct nfs4_stateowner *so = resp->cstate.replay_owner;
__be32 *statp;
__be32 *p;
RESERVE_SPACE(8);
WRITE32(op->opnum);
statp = p++; /* to be backfilled at the end */
ADJUST_ARGS();
if (op->opnum == OP_ILLEGAL)
goto status;
BUG_ON(op->opnum < 0 || op->opnum >= ARRAY_SIZE(nfsd4_enc_ops) ||
!nfsd4_enc_ops[op->opnum]);
op->status = nfsd4_enc_ops[op->opnum](resp, op->status, &op->u);
/* nfsd4_check_drc_limit guarantees enough room for error status */
if (!op->status)
op->status = nfsd4_check_resp_size(resp, 0);
if (so) {
so->so_replay.rp_status = op->status;
so->so_replay.rp_buflen = (char *)resp->p - (char *)(statp+1);
memcpy(so->so_replay.rp_buf, statp+1, so->so_replay.rp_buflen);
}
status:
/*
* Note: We write the status directly, instead of using WRITE32(),
* since it is already in network byte order.
*/
*statp = op->status;
}
/*
* Encode the reply stored in the stateowner reply cache
*
* XDR note: do not encode rp->rp_buflen: the buffer contains the
* previously sent already encoded operation.
*
* called with nfs4_lock_state() held
*/
void
nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
{
__be32 *p;
struct nfs4_replay *rp = op->replay;
BUG_ON(!rp);
RESERVE_SPACE(8);
WRITE32(op->opnum);
*p++ = rp->rp_status; /* already xdr'ed */
ADJUST_ARGS();
RESERVE_SPACE(rp->rp_buflen);
WRITEMEM(rp->rp_buf, rp->rp_buflen);
ADJUST_ARGS();
}
int
nfs4svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
return xdr_ressize_check(rqstp, p);
}
int nfsd4_release_compoundargs(void *rq, __be32 *p, void *resp)
{
struct svc_rqst *rqstp = rq;
struct nfsd4_compoundargs *args = rqstp->rq_argp;
if (args->ops != args->iops) {
kfree(args->ops);
args->ops = args->iops;
}
kfree(args->tmpp);
args->tmpp = NULL;
while (args->to_free) {
struct tmpbuf *tb = args->to_free;
args->to_free = tb->next;
tb->release(tb->buf);
kfree(tb);
}
return 1;
}
int
nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compoundargs *args)
{
args->p = p;
args->end = rqstp->rq_arg.head[0].iov_base + rqstp->rq_arg.head[0].iov_len;
args->pagelist = rqstp->rq_arg.pages;
args->pagelen = rqstp->rq_arg.page_len;
args->tmpp = NULL;
args->to_free = NULL;
args->ops = args->iops;
args->rqstp = rqstp;
return !nfsd4_decode_compound(args);
}
int
nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compoundres *resp)
{
/*
* All that remains is to write the tag and operation count...
*/
struct nfsd4_compound_state *cs = &resp->cstate;
struct kvec *iov;
p = resp->tagp;
*p++ = htonl(resp->taglen);
memcpy(p, resp->tag, resp->taglen);
p += XDR_QUADLEN(resp->taglen);
*p++ = htonl(resp->opcnt);
if (rqstp->rq_res.page_len)
iov = &rqstp->rq_res.tail[0];
else
iov = &rqstp->rq_res.head[0];
iov->iov_len = ((char*)resp->p) - (char*)iov->iov_base;
BUG_ON(iov->iov_len > PAGE_SIZE);
if (nfsd4_has_session(cs)) {
if (cs->status != nfserr_replay_cache) {
nfsd4_store_cache_entry(resp);
cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
}
/* Renew the clientid on success and on replay */
put_client_renew(cs->session->se_client);
nfsd4_put_session(cs->session);
}
return 1;
}
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
TeamHorizon/android_kernel_lge_c50 | drivers/hwmon/f75375s.c | 2215 | 26563 | /*
* f75375s.c - driver for the Fintek F75375/SP, F75373 and
* F75387SG/RG hardware monitoring features
* Copyright (C) 2006-2007 Riku Voipio
*
* Datasheets available at:
*
* f75375:
* http://www.fintek.com.tw/files/productfiles/F75375_V026P.pdf
*
* f75373:
* http://www.fintek.com.tw/files/productfiles/F75373_V025P.pdf
*
* f75387:
* http://www.fintek.com.tw/files/productfiles/F75387_V027P.pdf
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/f75375s.h>
#include <linux/slab.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2d, 0x2e, I2C_CLIENT_END };
enum chips { f75373, f75375, f75387 };
/* Fintek F75375 registers */
#define F75375_REG_CONFIG0 0x0
#define F75375_REG_CONFIG1 0x1
#define F75375_REG_CONFIG2 0x2
#define F75375_REG_CONFIG3 0x3
#define F75375_REG_ADDR 0x4
#define F75375_REG_INTR 0x31
#define F75375_CHIP_ID 0x5A
#define F75375_REG_VERSION 0x5C
#define F75375_REG_VENDOR 0x5D
#define F75375_REG_FAN_TIMER 0x60
#define F75375_REG_VOLT(nr) (0x10 + (nr))
#define F75375_REG_VOLT_HIGH(nr) (0x20 + (nr) * 2)
#define F75375_REG_VOLT_LOW(nr) (0x21 + (nr) * 2)
#define F75375_REG_TEMP(nr) (0x14 + (nr))
#define F75387_REG_TEMP11_LSB(nr) (0x1a + (nr))
#define F75375_REG_TEMP_HIGH(nr) (0x28 + (nr) * 2)
#define F75375_REG_TEMP_HYST(nr) (0x29 + (nr) * 2)
#define F75375_REG_FAN(nr) (0x16 + (nr) * 2)
#define F75375_REG_FAN_MIN(nr) (0x2C + (nr) * 2)
#define F75375_REG_FAN_FULL(nr) (0x70 + (nr) * 0x10)
#define F75375_REG_FAN_PWM_DUTY(nr) (0x76 + (nr) * 0x10)
#define F75375_REG_FAN_PWM_CLOCK(nr) (0x7D + (nr) * 0x10)
#define F75375_REG_FAN_EXP(nr) (0x74 + (nr) * 0x10)
#define F75375_REG_FAN_B_TEMP(nr, step) ((0xA0 + (nr) * 0x10) + (step))
#define F75375_REG_FAN_B_SPEED(nr, step) \
((0xA5 + (nr) * 0x10) + (step) * 2)
#define F75375_REG_PWM1_RAISE_DUTY 0x69
#define F75375_REG_PWM2_RAISE_DUTY 0x6A
#define F75375_REG_PWM1_DROP_DUTY 0x6B
#define F75375_REG_PWM2_DROP_DUTY 0x6C
#define F75375_FAN_CTRL_LINEAR(nr) (4 + nr)
#define F75387_FAN_CTRL_LINEAR(nr) (1 + ((nr) * 4))
#define FAN_CTRL_MODE(nr) (4 + ((nr) * 2))
#define F75387_FAN_DUTY_MODE(nr) (2 + ((nr) * 4))
#define F75387_FAN_MANU_MODE(nr) ((nr) * 4)
/*
* Data structures and manipulation thereof
*/
struct f75375_data {
unsigned short addr;
struct device *hwmon_dev;
const char *name;
int kind;
struct mutex update_lock; /* protect register access */
char valid;
unsigned long last_updated; /* In jiffies */
unsigned long last_limits; /* In jiffies */
/* Register values */
u8 in[4];
u8 in_max[4];
u8 in_min[4];
u16 fan[2];
u16 fan_min[2];
u16 fan_max[2];
u16 fan_target[2];
u8 fan_timer;
u8 pwm[2];
u8 pwm_mode[2];
u8 pwm_enable[2];
/*
* f75387: For remote temperature reading, it uses signed 11-bit
* values with LSB = 0.125 degree Celsius, left-justified in 16-bit
* registers. For original 8-bit temp readings, the LSB just is 0.
*/
s16 temp11[2];
s8 temp_high[2];
s8 temp_max_hyst[2];
};
static int f75375_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int f75375_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int f75375_remove(struct i2c_client *client);
static const struct i2c_device_id f75375_id[] = {
{ "f75373", f75373 },
{ "f75375", f75375 },
{ "f75387", f75387 },
{ }
};
MODULE_DEVICE_TABLE(i2c, f75375_id);
static struct i2c_driver f75375_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "f75375",
},
.probe = f75375_probe,
.remove = f75375_remove,
.id_table = f75375_id,
.detect = f75375_detect,
.address_list = normal_i2c,
};
static inline int f75375_read8(struct i2c_client *client, u8 reg)
{
return i2c_smbus_read_byte_data(client, reg);
}
/* in most cases, should be called while holding update_lock */
static inline u16 f75375_read16(struct i2c_client *client, u8 reg)
{
return (i2c_smbus_read_byte_data(client, reg) << 8)
| i2c_smbus_read_byte_data(client, reg + 1);
}
static inline void f75375_write8(struct i2c_client *client, u8 reg,
u8 value)
{
i2c_smbus_write_byte_data(client, reg, value);
}
static inline void f75375_write16(struct i2c_client *client, u8 reg,
u16 value)
{
int err = i2c_smbus_write_byte_data(client, reg, (value >> 8));
if (err)
return;
i2c_smbus_write_byte_data(client, reg + 1, (value & 0xFF));
}
static void f75375_write_pwm(struct i2c_client *client, int nr)
{
struct f75375_data *data = i2c_get_clientdata(client);
if (data->kind == f75387)
f75375_write16(client, F75375_REG_FAN_EXP(nr), data->pwm[nr]);
else
f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
data->pwm[nr]);
}
static struct f75375_data *f75375_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
int nr;
mutex_lock(&data->update_lock);
/* Limit registers cache is refreshed after 60 seconds */
if (time_after(jiffies, data->last_limits + 60 * HZ)
|| !data->valid) {
for (nr = 0; nr < 2; nr++) {
data->temp_high[nr] =
f75375_read8(client, F75375_REG_TEMP_HIGH(nr));
data->temp_max_hyst[nr] =
f75375_read8(client, F75375_REG_TEMP_HYST(nr));
data->fan_max[nr] =
f75375_read16(client, F75375_REG_FAN_FULL(nr));
data->fan_min[nr] =
f75375_read16(client, F75375_REG_FAN_MIN(nr));
data->fan_target[nr] =
f75375_read16(client, F75375_REG_FAN_EXP(nr));
}
for (nr = 0; nr < 4; nr++) {
data->in_max[nr] =
f75375_read8(client, F75375_REG_VOLT_HIGH(nr));
data->in_min[nr] =
f75375_read8(client, F75375_REG_VOLT_LOW(nr));
}
data->fan_timer = f75375_read8(client, F75375_REG_FAN_TIMER);
data->last_limits = jiffies;
}
/* Measurement registers cache is refreshed after 2 second */
if (time_after(jiffies, data->last_updated + 2 * HZ)
|| !data->valid) {
for (nr = 0; nr < 2; nr++) {
data->pwm[nr] = f75375_read8(client,
F75375_REG_FAN_PWM_DUTY(nr));
/* assign MSB, therefore shift it by 8 bits */
data->temp11[nr] =
f75375_read8(client, F75375_REG_TEMP(nr)) << 8;
if (data->kind == f75387)
/* merge F75387's temperature LSB (11-bit) */
data->temp11[nr] |=
f75375_read8(client,
F75387_REG_TEMP11_LSB(nr));
data->fan[nr] =
f75375_read16(client, F75375_REG_FAN(nr));
}
for (nr = 0; nr < 4; nr++)
data->in[nr] =
f75375_read8(client, F75375_REG_VOLT(nr));
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
static inline u16 rpm_from_reg(u16 reg)
{
if (reg == 0 || reg == 0xffff)
return 0;
return 1500000 / reg;
}
static inline u16 rpm_to_reg(int rpm)
{
if (rpm < 367 || rpm > 0xffff)
return 0xffff;
return 1500000 / rpm;
}
static bool duty_mode_enabled(u8 pwm_enable)
{
switch (pwm_enable) {
case 0: /* Manual, duty mode (full speed) */
case 1: /* Manual, duty mode */
case 4: /* Auto, duty mode */
return true;
case 2: /* Auto, speed mode */
case 3: /* Manual, speed mode */
return false;
default:
BUG();
return true;
}
}
static bool auto_mode_enabled(u8 pwm_enable)
{
switch (pwm_enable) {
case 0: /* Manual, duty mode (full speed) */
case 1: /* Manual, duty mode */
case 3: /* Manual, speed mode */
return false;
case 2: /* Auto, speed mode */
case 4: /* Auto, duty mode */
return true;
default:
BUG();
return false;
}
}
static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
mutex_lock(&data->update_lock);
data->fan_min[nr] = rpm_to_reg(val);
f75375_write16(client, F75375_REG_FAN_MIN(nr), data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_fan_target(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
if (auto_mode_enabled(data->pwm_enable[nr]))
return -EINVAL;
if (data->kind == f75387 && duty_mode_enabled(data->pwm_enable[nr]))
return -EINVAL;
mutex_lock(&data->update_lock);
data->fan_target[nr] = rpm_to_reg(val);
f75375_write16(client, F75375_REG_FAN_EXP(nr), data->fan_target[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
if (auto_mode_enabled(data->pwm_enable[nr]) ||
!duty_mode_enabled(data->pwm_enable[nr]))
return -EINVAL;
mutex_lock(&data->update_lock);
data->pwm[nr] = clamp_val(val, 0, 255);
f75375_write_pwm(client, nr);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_pwm_enable(struct device *dev, struct device_attribute
*attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct f75375_data *data = f75375_update_device(dev);
return sprintf(buf, "%d\n", data->pwm_enable[nr]);
}
static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
{
struct f75375_data *data = i2c_get_clientdata(client);
u8 fanmode;
if (val < 0 || val > 4)
return -EINVAL;
fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
if (data->kind == f75387) {
/* For now, deny dangerous toggling of duty mode */
if (duty_mode_enabled(data->pwm_enable[nr]) !=
duty_mode_enabled(val))
return -EOPNOTSUPP;
/* clear each fanX_mode bit before setting them properly */
fanmode &= ~(1 << F75387_FAN_DUTY_MODE(nr));
fanmode &= ~(1 << F75387_FAN_MANU_MODE(nr));
switch (val) {
case 0: /* full speed */
fanmode |= (1 << F75387_FAN_MANU_MODE(nr));
fanmode |= (1 << F75387_FAN_DUTY_MODE(nr));
data->pwm[nr] = 255;
break;
case 1: /* PWM */
fanmode |= (1 << F75387_FAN_MANU_MODE(nr));
fanmode |= (1 << F75387_FAN_DUTY_MODE(nr));
break;
case 2: /* Automatic, speed mode */
break;
case 3: /* fan speed */
fanmode |= (1 << F75387_FAN_MANU_MODE(nr));
break;
case 4: /* Automatic, pwm */
fanmode |= (1 << F75387_FAN_DUTY_MODE(nr));
break;
}
} else {
/* clear each fanX_mode bit before setting them properly */
fanmode &= ~(3 << FAN_CTRL_MODE(nr));
switch (val) {
case 0: /* full speed */
fanmode |= (3 << FAN_CTRL_MODE(nr));
data->pwm[nr] = 255;
break;
case 1: /* PWM */
fanmode |= (3 << FAN_CTRL_MODE(nr));
break;
case 2: /* AUTOMATIC*/
fanmode |= (1 << FAN_CTRL_MODE(nr));
break;
case 3: /* fan speed */
break;
case 4: /* Automatic pwm */
return -EINVAL;
}
}
f75375_write8(client, F75375_REG_FAN_TIMER, fanmode);
data->pwm_enable[nr] = val;
if (val == 0)
f75375_write_pwm(client, nr);
return 0;
}
static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
mutex_lock(&data->update_lock);
err = set_pwm_enable_direct(client, nr, val);
mutex_unlock(&data->update_lock);
return err ? err : count;
}
static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
u8 conf;
char reg, ctrl;
err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
if (!(val == 0 || val == 1))
return -EINVAL;
/* F75373 does not support DC (linear voltage) fan control mode */
if (data->kind == f75373 && val == 0)
return -EINVAL;
/* take care for different registers */
if (data->kind == f75387) {
reg = F75375_REG_FAN_TIMER;
ctrl = F75387_FAN_CTRL_LINEAR(nr);
} else {
reg = F75375_REG_CONFIG1;
ctrl = F75375_FAN_CTRL_LINEAR(nr);
}
mutex_lock(&data->update_lock);
conf = f75375_read8(client, reg);
conf &= ~(1 << ctrl);
if (val == 0)
conf |= (1 << ctrl);
f75375_write8(client, reg, conf);
data->pwm_mode[nr] = val;
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_pwm(struct device *dev, struct device_attribute
*attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct f75375_data *data = f75375_update_device(dev);
return sprintf(buf, "%d\n", data->pwm[nr]);
}
static ssize_t show_pwm_mode(struct device *dev, struct device_attribute
*attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct f75375_data *data = f75375_update_device(dev);
return sprintf(buf, "%d\n", data->pwm_mode[nr]);
}
#define VOLT_FROM_REG(val) ((val) * 8)
#define VOLT_TO_REG(val) ((val) / 8)
static ssize_t show_in(struct device *dev, struct device_attribute *attr,
char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct f75375_data *data = f75375_update_device(dev);
return sprintf(buf, "%d\n", VOLT_FROM_REG(data->in[nr]));
}
static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct f75375_data *data = f75375_update_device(dev);
return sprintf(buf, "%d\n", VOLT_FROM_REG(data->in_max[nr]));
}
static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct f75375_data *data = f75375_update_device(dev);
return sprintf(buf, "%d\n", VOLT_FROM_REG(data->in_min[nr]));
}
static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
val = clamp_val(VOLT_TO_REG(val), 0, 0xff);
mutex_lock(&data->update_lock);
data->in_max[nr] = val;
f75375_write8(client, F75375_REG_VOLT_HIGH(nr), data->in_max[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
val = clamp_val(VOLT_TO_REG(val), 0, 0xff);
mutex_lock(&data->update_lock);
data->in_min[nr] = val;
f75375_write8(client, F75375_REG_VOLT_LOW(nr), data->in_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
#define TEMP_FROM_REG(val) ((val) * 1000)
#define TEMP_TO_REG(val) ((val) / 1000)
#define TEMP11_FROM_REG(reg) ((reg) / 32 * 125)
static ssize_t show_temp11(struct device *dev, struct device_attribute *attr,
char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct f75375_data *data = f75375_update_device(dev);
return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[nr]));
}
static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct f75375_data *data = f75375_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_high[nr]));
}
static ssize_t show_temp_max_hyst(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct f75375_data *data = f75375_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max_hyst[nr]));
}
static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
val = clamp_val(TEMP_TO_REG(val), 0, 127);
mutex_lock(&data->update_lock);
data->temp_high[nr] = val;
f75375_write8(client, F75375_REG_TEMP_HIGH(nr), data->temp_high[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_temp_max_hyst(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
val = clamp_val(TEMP_TO_REG(val), 0, 127);
mutex_lock(&data->update_lock);
data->temp_max_hyst[nr] = val;
f75375_write8(client, F75375_REG_TEMP_HYST(nr),
data->temp_max_hyst[nr]);
mutex_unlock(&data->update_lock);
return count;
}
#define show_fan(thing) \
static ssize_t show_##thing(struct device *dev, struct device_attribute *attr, \
char *buf)\
{\
int nr = to_sensor_dev_attr(attr)->index;\
struct f75375_data *data = f75375_update_device(dev); \
return sprintf(buf, "%d\n", rpm_from_reg(data->thing[nr])); \
}
show_fan(fan);
show_fan(fan_min);
show_fan(fan_max);
show_fan(fan_target);
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in, NULL, 0);
static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO|S_IWUSR,
show_in_max, set_in_max, 0);
static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO|S_IWUSR,
show_in_min, set_in_min, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in, NULL, 1);
static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO|S_IWUSR,
show_in_max, set_in_max, 1);
static SENSOR_DEVICE_ATTR(in1_min, S_IRUGO|S_IWUSR,
show_in_min, set_in_min, 1);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in, NULL, 2);
static SENSOR_DEVICE_ATTR(in2_max, S_IRUGO|S_IWUSR,
show_in_max, set_in_max, 2);
static SENSOR_DEVICE_ATTR(in2_min, S_IRUGO|S_IWUSR,
show_in_min, set_in_min, 2);
static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in, NULL, 3);
static SENSOR_DEVICE_ATTR(in3_max, S_IRUGO|S_IWUSR,
show_in_max, set_in_max, 3);
static SENSOR_DEVICE_ATTR(in3_min, S_IRUGO|S_IWUSR,
show_in_min, set_in_min, 3);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp11, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO|S_IWUSR,
show_temp_max_hyst, set_temp_max_hyst, 0);
static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO|S_IWUSR,
show_temp_max, set_temp_max, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IRUGO|S_IWUSR,
show_temp_max_hyst, set_temp_max_hyst, 1);
static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO|S_IWUSR,
show_temp_max, set_temp_max, 1);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, show_fan_max, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO|S_IWUSR,
show_fan_min, set_fan_min, 0);
static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO|S_IWUSR,
show_fan_target, set_fan_target, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
static SENSOR_DEVICE_ATTR(fan2_max, S_IRUGO, show_fan_max, NULL, 1);
static SENSOR_DEVICE_ATTR(fan2_min, S_IRUGO|S_IWUSR,
show_fan_min, set_fan_min, 1);
static SENSOR_DEVICE_ATTR(fan2_target, S_IRUGO|S_IWUSR,
show_fan_target, set_fan_target, 1);
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO|S_IWUSR,
show_pwm, set_pwm, 0);
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO|S_IWUSR,
show_pwm_enable, set_pwm_enable, 0);
static SENSOR_DEVICE_ATTR(pwm1_mode, S_IRUGO,
show_pwm_mode, set_pwm_mode, 0);
static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR,
show_pwm, set_pwm, 1);
static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO|S_IWUSR,
show_pwm_enable, set_pwm_enable, 1);
static SENSOR_DEVICE_ATTR(pwm2_mode, S_IRUGO,
show_pwm_mode, set_pwm_mode, 1);
static struct attribute *f75375_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_max.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan1_target.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan2_max.dev_attr.attr,
&sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan2_target.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1_mode.dev_attr.attr,
&sensor_dev_attr_pwm2.dev_attr.attr,
&sensor_dev_attr_pwm2_enable.dev_attr.attr,
&sensor_dev_attr_pwm2_mode.dev_attr.attr,
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_max.dev_attr.attr,
&sensor_dev_attr_in0_min.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_max.dev_attr.attr,
&sensor_dev_attr_in1_min.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in2_max.dev_attr.attr,
&sensor_dev_attr_in2_min.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in3_max.dev_attr.attr,
&sensor_dev_attr_in3_min.dev_attr.attr,
NULL
};
static const struct attribute_group f75375_group = {
.attrs = f75375_attributes,
};
static void f75375_init(struct i2c_client *client, struct f75375_data *data,
struct f75375s_platform_data *f75375s_pdata)
{
int nr;
if (!f75375s_pdata) {
u8 conf, mode;
int nr;
conf = f75375_read8(client, F75375_REG_CONFIG1);
mode = f75375_read8(client, F75375_REG_FAN_TIMER);
for (nr = 0; nr < 2; nr++) {
if (data->kind == f75387) {
bool manu, duty;
if (!(mode & (1 << F75387_FAN_CTRL_LINEAR(nr))))
data->pwm_mode[nr] = 1;
manu = ((mode >> F75387_FAN_MANU_MODE(nr)) & 1);
duty = ((mode >> F75387_FAN_DUTY_MODE(nr)) & 1);
if (!manu && duty)
/* auto, pwm */
data->pwm_enable[nr] = 4;
else if (manu && !duty)
/* manual, speed */
data->pwm_enable[nr] = 3;
else if (!manu && !duty)
/* automatic, speed */
data->pwm_enable[nr] = 2;
else
/* manual, pwm */
data->pwm_enable[nr] = 1;
} else {
if (!(conf & (1 << F75375_FAN_CTRL_LINEAR(nr))))
data->pwm_mode[nr] = 1;
switch ((mode >> FAN_CTRL_MODE(nr)) & 3) {
case 0: /* speed */
data->pwm_enable[nr] = 3;
break;
case 1: /* automatic */
data->pwm_enable[nr] = 2;
break;
default: /* manual */
data->pwm_enable[nr] = 1;
break;
}
}
}
return;
}
set_pwm_enable_direct(client, 0, f75375s_pdata->pwm_enable[0]);
set_pwm_enable_direct(client, 1, f75375s_pdata->pwm_enable[1]);
for (nr = 0; nr < 2; nr++) {
if (auto_mode_enabled(f75375s_pdata->pwm_enable[nr]) ||
!duty_mode_enabled(f75375s_pdata->pwm_enable[nr]))
continue;
data->pwm[nr] = clamp_val(f75375s_pdata->pwm[nr], 0, 255);
f75375_write_pwm(client, nr);
}
}
static int f75375_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct f75375_data *data;
struct f75375s_platform_data *f75375s_pdata = client->dev.platform_data;
int err;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
data = devm_kzalloc(&client->dev, sizeof(struct f75375_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
data->kind = id->driver_data;
err = sysfs_create_group(&client->dev.kobj, &f75375_group);
if (err)
return err;
if (data->kind != f75373) {
err = sysfs_chmod_file(&client->dev.kobj,
&sensor_dev_attr_pwm1_mode.dev_attr.attr,
S_IRUGO | S_IWUSR);
if (err)
goto exit_remove;
err = sysfs_chmod_file(&client->dev.kobj,
&sensor_dev_attr_pwm2_mode.dev_attr.attr,
S_IRUGO | S_IWUSR);
if (err)
goto exit_remove;
}
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
f75375_init(client, data, f75375s_pdata);
return 0;
exit_remove:
sysfs_remove_group(&client->dev.kobj, &f75375_group);
return err;
}
static int f75375_remove(struct i2c_client *client)
{
struct f75375_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &f75375_group);
return 0;
}
/* Return 0 if detection is successful, -ENODEV otherwise */
static int f75375_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
u16 vendid, chipid;
u8 version;
const char *name;
vendid = f75375_read16(client, F75375_REG_VENDOR);
chipid = f75375_read16(client, F75375_CHIP_ID);
if (vendid != 0x1934)
return -ENODEV;
if (chipid == 0x0306)
name = "f75375";
else if (chipid == 0x0204)
name = "f75373";
else if (chipid == 0x0410)
name = "f75387";
else
return -ENODEV;
version = f75375_read8(client, F75375_REG_VERSION);
dev_info(&adapter->dev, "found %s version: %02X\n", name, version);
strlcpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
module_i2c_driver(f75375_driver);
MODULE_AUTHOR("Riku Voipio");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("F75373/F75375/F75387 hardware monitoring driver");
| gpl-2.0 |
Mirenk/marshmallow_kernel_b2wlj | drivers/acpi/video.c | 2727 | 47012 | /*
* video.c - ACPI Video Driver ($Revision:$)
*
* Copyright (C) 2004 Luming Yu <luming.yu@intel.com>
* Copyright (C) 2004 Bruno Ducrot <ducrot@poupinou.org>
* Copyright (C) 2006 Thomas Tuttle <linux-kernel@ttuttle.net>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/input.h>
#include <linux/backlight.h>
#include <linux/thermal.h>
#include <linux/sort.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/dmi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/suspend.h>
#include <acpi/video.h>
#define PREFIX "ACPI: "
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
#define ACPI_VIDEO_NOTIFY_SWITCH 0x80
#define ACPI_VIDEO_NOTIFY_PROBE 0x81
#define ACPI_VIDEO_NOTIFY_CYCLE 0x82
#define ACPI_VIDEO_NOTIFY_NEXT_OUTPUT 0x83
#define ACPI_VIDEO_NOTIFY_PREV_OUTPUT 0x84
#define ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS 0x85
#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86
#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87
#define ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS 0x88
#define ACPI_VIDEO_NOTIFY_DISPLAY_OFF 0x89
#define MAX_NAME_LEN 20
#define _COMPONENT ACPI_VIDEO_COMPONENT
ACPI_MODULE_NAME("video");
MODULE_AUTHOR("Bruno Ducrot");
MODULE_DESCRIPTION("ACPI Video Driver");
MODULE_LICENSE("GPL");
static bool brightness_switch_enabled = 1;
module_param(brightness_switch_enabled, bool, 0644);
/*
* By default, we don't allow duplicate ACPI video bus devices
* under the same VGA controller
*/
static bool allow_duplicates;
module_param(allow_duplicates, bool, 0644);
/*
* Some BIOSes claim they use minimum backlight at boot,
* and this may bring dimming screen after boot
*/
static bool use_bios_initial_backlight = 1;
module_param(use_bios_initial_backlight, bool, 0644);
static int register_count = 0;
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device, int type);
static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id video_device_ids[] = {
{ACPI_VIDEO_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, video_device_ids);
static struct acpi_driver acpi_video_bus = {
.name = "video",
.class = ACPI_VIDEO_CLASS,
.ids = video_device_ids,
.ops = {
.add = acpi_video_bus_add,
.remove = acpi_video_bus_remove,
.notify = acpi_video_bus_notify,
},
};
struct acpi_video_bus_flags {
u8 multihead:1; /* can switch video heads */
u8 rom:1; /* can retrieve a video rom */
u8 post:1; /* can configure the head to */
u8 reserved:5;
};
struct acpi_video_bus_cap {
u8 _DOS:1; /*Enable/Disable output switching */
u8 _DOD:1; /*Enumerate all devices attached to display adapter */
u8 _ROM:1; /*Get ROM Data */
u8 _GPD:1; /*Get POST Device */
u8 _SPD:1; /*Set POST Device */
u8 _VPO:1; /*Video POST Options */
u8 reserved:2;
};
struct acpi_video_device_attrib {
u32 display_index:4; /* A zero-based instance of the Display */
u32 display_port_attachment:4; /*This field differentiates the display type */
u32 display_type:4; /*Describe the specific type in use */
u32 vendor_specific:4; /*Chipset Vendor Specific */
u32 bios_can_detect:1; /*BIOS can detect the device */
u32 depend_on_vga:1; /*Non-VGA output device whose power is related to
the VGA device. */
u32 pipe_id:3; /*For VGA multiple-head devices. */
u32 reserved:10; /*Must be 0 */
u32 device_id_scheme:1; /*Device ID Scheme */
};
struct acpi_video_enumerated_device {
union {
u32 int_val;
struct acpi_video_device_attrib attrib;
} value;
struct acpi_video_device *bind_info;
};
struct acpi_video_bus {
struct acpi_device *device;
u8 dos_setting;
struct acpi_video_enumerated_device *attached_array;
u8 attached_count;
struct acpi_video_bus_cap cap;
struct acpi_video_bus_flags flags;
struct list_head video_device_list;
struct mutex device_list_lock; /* protects video_device_list */
struct input_dev *input;
char phys[32]; /* for input device */
struct notifier_block pm_nb;
};
struct acpi_video_device_flags {
u8 crt:1;
u8 lcd:1;
u8 tvout:1;
u8 dvi:1;
u8 bios:1;
u8 unknown:1;
u8 reserved:2;
};
struct acpi_video_device_cap {
u8 _ADR:1; /*Return the unique ID */
u8 _BCL:1; /*Query list of brightness control levels supported */
u8 _BCM:1; /*Set the brightness level */
u8 _BQC:1; /* Get current brightness level */
u8 _BCQ:1; /* Some buggy BIOS uses _BCQ instead of _BQC */
u8 _DDC:1; /*Return the EDID for this device */
};
struct acpi_video_brightness_flags {
u8 _BCL_no_ac_battery_levels:1; /* no AC/Battery levels in _BCL */
u8 _BCL_reversed:1; /* _BCL package is in a reversed order*/
u8 _BCL_use_index:1; /* levels in _BCL are index values */
u8 _BCM_use_index:1; /* input of _BCM is an index value */
u8 _BQC_use_index:1; /* _BQC returns an index value */
};
struct acpi_video_device_brightness {
int curr;
int count;
int *levels;
struct acpi_video_brightness_flags flags;
};
struct acpi_video_device {
unsigned long device_id;
struct acpi_video_device_flags flags;
struct acpi_video_device_cap cap;
struct list_head entry;
struct acpi_video_bus *video;
struct acpi_device *dev;
struct acpi_video_device_brightness *brightness;
struct backlight_device *backlight;
struct thermal_cooling_device *cooling_dev;
};
static const char device_decode[][30] = {
"motherboard VGA device",
"PCI VGA device",
"AGP VGA device",
"UNKNOWN",
};
static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data);
static void acpi_video_device_rebind(struct acpi_video_bus *video);
static void acpi_video_device_bind(struct acpi_video_bus *video,
struct acpi_video_device *device);
static int acpi_video_device_enumerate(struct acpi_video_bus *video);
static int acpi_video_device_lcd_set_level(struct acpi_video_device *device,
int level);
static int acpi_video_device_lcd_get_level_current(
struct acpi_video_device *device,
unsigned long long *level, int init);
static int acpi_video_get_next_level(struct acpi_video_device *device,
u32 level_current, u32 event);
static int acpi_video_switch_brightness(struct acpi_video_device *device,
int event);
/*backlight device sysfs support*/
static int acpi_video_get_brightness(struct backlight_device *bd)
{
unsigned long long cur_level;
int i;
struct acpi_video_device *vd =
(struct acpi_video_device *)bl_get_data(bd);
if (acpi_video_device_lcd_get_level_current(vd, &cur_level, 0))
return -EINVAL;
for (i = 2; i < vd->brightness->count; i++) {
if (vd->brightness->levels[i] == cur_level)
/* The first two entries are special - see page 575
of the ACPI spec 3.0 */
return i-2;
}
return 0;
}
static int acpi_video_set_brightness(struct backlight_device *bd)
{
int request_level = bd->props.brightness + 2;
struct acpi_video_device *vd =
(struct acpi_video_device *)bl_get_data(bd);
return acpi_video_device_lcd_set_level(vd,
vd->brightness->levels[request_level]);
}
static const struct backlight_ops acpi_backlight_ops = {
.get_brightness = acpi_video_get_brightness,
.update_status = acpi_video_set_brightness,
};
/* thermal cooling device callbacks */
static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned
long *state)
{
struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
*state = video->brightness->count - 3;
return 0;
}
static int video_get_cur_state(struct thermal_cooling_device *cooling_dev, unsigned
long *state)
{
struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
unsigned long long level;
int offset;
if (acpi_video_device_lcd_get_level_current(video, &level, 0))
return -EINVAL;
for (offset = 2; offset < video->brightness->count; offset++)
if (level == video->brightness->levels[offset]) {
*state = video->brightness->count - offset - 1;
return 0;
}
return -EINVAL;
}
static int
video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long state)
{
struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
int level;
if ( state >= video->brightness->count - 2)
return -EINVAL;
state = video->brightness->count - state;
level = video->brightness->levels[state -1];
return acpi_video_device_lcd_set_level(video, level);
}
static const struct thermal_cooling_device_ops video_cooling_ops = {
.get_max_state = video_get_max_state,
.get_cur_state = video_get_cur_state,
.set_cur_state = video_set_cur_state,
};
/* --------------------------------------------------------------------------
Video Management
-------------------------------------------------------------------------- */
static int
acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
union acpi_object **levels)
{
int status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
*levels = NULL;
status = acpi_evaluate_object(device->dev->handle, "_BCL", NULL, &buffer);
if (!ACPI_SUCCESS(status))
return status;
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
printk(KERN_ERR PREFIX "Invalid _BCL data\n");
status = -EFAULT;
goto err;
}
*levels = obj;
return 0;
err:
kfree(buffer.pointer);
return status;
}
static int
acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
{
int status;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
int state;
arg0.integer.value = level;
status = acpi_evaluate_object(device->dev->handle, "_BCM",
&args, NULL);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Evaluating _BCM failed"));
return -EIO;
}
device->brightness->curr = level;
for (state = 2; state < device->brightness->count; state++)
if (level == device->brightness->levels[state]) {
if (device->backlight)
device->backlight->props.brightness = state - 2;
return 0;
}
ACPI_ERROR((AE_INFO, "Current brightness invalid"));
return -EINVAL;
}
/*
* For some buggy _BQC methods, we need to add a constant value to
* the _BQC return value to get the actual current brightness level
*/
static int bqc_offset_aml_bug_workaround;
static int __init video_set_bqc_offset(const struct dmi_system_id *d)
{
bqc_offset_aml_bug_workaround = 9;
return 0;
}
static struct dmi_system_id video_dmi_table[] __initdata = {
/*
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
*/
{
.callback = video_set_bqc_offset,
.ident = "Acer Aspire 5720",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
},
},
{
.callback = video_set_bqc_offset,
.ident = "Acer Aspire 5710Z",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710Z"),
},
},
{
.callback = video_set_bqc_offset,
.ident = "eMachines E510",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "EMACHINES"),
DMI_MATCH(DMI_PRODUCT_NAME, "eMachines E510"),
},
},
{
.callback = video_set_bqc_offset,
.ident = "Acer Aspire 5315",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
},
},
{
.callback = video_set_bqc_offset,
.ident = "Acer Aspire 7720",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
},
},
{}
};
static int
acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
unsigned long long *level, int init)
{
acpi_status status = AE_OK;
int i;
if (device->cap._BQC || device->cap._BCQ) {
char *buf = device->cap._BQC ? "_BQC" : "_BCQ";
status = acpi_evaluate_integer(device->dev->handle, buf,
NULL, level);
if (ACPI_SUCCESS(status)) {
if (device->brightness->flags._BQC_use_index) {
if (device->brightness->flags._BCL_reversed)
*level = device->brightness->count
- 3 - (*level);
*level = device->brightness->levels[*level + 2];
}
*level += bqc_offset_aml_bug_workaround;
for (i = 2; i < device->brightness->count; i++)
if (device->brightness->levels[i] == *level) {
device->brightness->curr = *level;
return 0;
}
if (!init) {
/*
* BQC returned an invalid level.
* Stop using it.
*/
ACPI_WARNING((AE_INFO,
"%s returned an invalid level",
buf));
device->cap._BQC = device->cap._BCQ = 0;
}
} else {
/* Fixme:
* should we return an error or ignore this failure?
* dev->brightness->curr is a cached value which stores
* the correct current backlight level in most cases.
* ACPI video backlight still works w/ buggy _BQC.
* http://bugzilla.kernel.org/show_bug.cgi?id=12233
*/
ACPI_WARNING((AE_INFO, "Evaluating %s failed", buf));
device->cap._BQC = device->cap._BCQ = 0;
}
}
*level = device->brightness->curr;
return 0;
}
static int
acpi_video_device_EDID(struct acpi_video_device *device,
union acpi_object **edid, ssize_t length)
{
int status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
*edid = NULL;
if (!device)
return -ENODEV;
if (length == 128)
arg0.integer.value = 1;
else if (length == 256)
arg0.integer.value = 2;
else
return -EINVAL;
status = acpi_evaluate_object(device->dev->handle, "_DDC", &args, &buffer);
if (ACPI_FAILURE(status))
return -ENODEV;
obj = buffer.pointer;
if (obj && obj->type == ACPI_TYPE_BUFFER)
*edid = obj;
else {
printk(KERN_ERR PREFIX "Invalid _DDC data\n");
status = -EFAULT;
kfree(obj);
}
return status;
}
/* bus */
/*
* Arg:
* video : video bus device pointer
* bios_flag :
* 0. The system BIOS should NOT automatically switch(toggle)
* the active display output.
* 1. The system BIOS should automatically switch (toggle) the
* active display output. No switch event.
* 2. The _DGS value should be locked.
* 3. The system BIOS should not automatically switch (toggle) the
* active display output, but instead generate the display switch
* event notify code.
* lcd_flag :
* 0. The system BIOS should automatically control the brightness level
* of the LCD when the power changes from AC to DC
* 1. The system BIOS should NOT automatically control the brightness
* level of the LCD when the power changes from AC to DC.
* Return Value:
* -EINVAL wrong arg.
*/
static int
acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
{
acpi_status status;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
return -EINVAL;
arg0.integer.value = (lcd_flag << 2) | bios_flag;
video->dos_setting = arg0.integer.value;
status = acpi_evaluate_object(video->device->handle, "_DOS",
&args, NULL);
if (ACPI_FAILURE(status))
return -EIO;
return 0;
}
/*
* Simple comparison function used to sort backlight levels.
*/
static int
acpi_video_cmp_level(const void *a, const void *b)
{
return *(int *)a - *(int *)b;
}
/*
* Arg:
* device : video output device (LCD, CRT, ..)
*
* Return Value:
* Maximum brightness level
*
* Allocate and initialize device->brightness.
*/
static int
acpi_video_init_brightness(struct acpi_video_device *device)
{
union acpi_object *obj = NULL;
int i, max_level = 0, count = 0, level_ac_battery = 0;
unsigned long long level, level_old;
union acpi_object *o;
struct acpi_video_device_brightness *br = NULL;
int result = -EINVAL;
if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
"LCD brightness level\n"));
goto out;
}
if (obj->package.count < 2)
goto out;
br = kzalloc(sizeof(*br), GFP_KERNEL);
if (!br) {
printk(KERN_ERR "can't allocate memory\n");
result = -ENOMEM;
goto out;
}
br->levels = kmalloc((obj->package.count + 2) * sizeof *(br->levels),
GFP_KERNEL);
if (!br->levels) {
result = -ENOMEM;
goto out_free;
}
for (i = 0; i < obj->package.count; i++) {
o = (union acpi_object *)&obj->package.elements[i];
if (o->type != ACPI_TYPE_INTEGER) {
printk(KERN_ERR PREFIX "Invalid data\n");
continue;
}
br->levels[count] = (u32) o->integer.value;
if (br->levels[count] > max_level)
max_level = br->levels[count];
count++;
}
/*
* some buggy BIOS don't export the levels
* when machine is on AC/Battery in _BCL package.
* In this case, the first two elements in _BCL packages
* are also supported brightness levels that OS should take care of.
*/
for (i = 2; i < count; i++) {
if (br->levels[i] == br->levels[0])
level_ac_battery++;
if (br->levels[i] == br->levels[1])
level_ac_battery++;
}
if (level_ac_battery < 2) {
level_ac_battery = 2 - level_ac_battery;
br->flags._BCL_no_ac_battery_levels = 1;
for (i = (count - 1 + level_ac_battery); i >= 2; i--)
br->levels[i] = br->levels[i - level_ac_battery];
count += level_ac_battery;
} else if (level_ac_battery > 2)
ACPI_ERROR((AE_INFO, "Too many duplicates in _BCL package\n"));
/* Check if the _BCL package is in a reversed order */
if (max_level == br->levels[2]) {
br->flags._BCL_reversed = 1;
sort(&br->levels[2], count - 2, sizeof(br->levels[2]),
acpi_video_cmp_level, NULL);
} else if (max_level != br->levels[count - 1])
ACPI_ERROR((AE_INFO,
"Found unordered _BCL package\n"));
br->count = count;
device->brightness = br;
/* Check the input/output of _BQC/_BCL/_BCM */
if ((max_level < 100) && (max_level <= (count - 2)))
br->flags._BCL_use_index = 1;
/*
* _BCM is always consistent with _BCL,
* at least for all the laptops we have ever seen.
*/
br->flags._BCM_use_index = br->flags._BCL_use_index;
/* _BQC uses INDEX while _BCL uses VALUE in some laptops */
br->curr = level = max_level;
if (!device->cap._BQC)
goto set_level;
result = acpi_video_device_lcd_get_level_current(device, &level_old, 1);
if (result)
goto out_free_levels;
/*
* Set the level to maximum and check if _BQC uses indexed value
*/
result = acpi_video_device_lcd_set_level(device, max_level);
if (result)
goto out_free_levels;
result = acpi_video_device_lcd_get_level_current(device, &level, 0);
if (result)
goto out_free_levels;
br->flags._BQC_use_index = (level == max_level ? 0 : 1);
if (!br->flags._BQC_use_index) {
/*
* Set the backlight to the initial state.
* On some buggy laptops, _BQC returns an uninitialized value
* when invoked for the first time, i.e. level_old is invalid.
* set the backlight to max_level in this case
*/
if (use_bios_initial_backlight) {
for (i = 2; i < br->count; i++)
if (level_old == br->levels[i])
level = level_old;
}
goto set_level;
}
if (br->flags._BCL_reversed)
level_old = (br->count - 1) - level_old;
level = br->levels[level_old];
set_level:
result = acpi_video_device_lcd_set_level(device, level);
if (result)
goto out_free_levels;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"found %d brightness levels\n", count - 2));
kfree(obj);
return result;
out_free_levels:
kfree(br->levels);
out_free:
kfree(br);
out:
device->brightness = NULL;
kfree(obj);
return result;
}
/*
* Arg:
* device : video output device (LCD, CRT, ..)
*
* Return Value:
* None
*
* Find out all required AML methods defined under the output
* device.
*/
static void acpi_video_device_find_cap(struct acpi_video_device *device)
{
acpi_handle h_dummy1;
if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_ADR", &h_dummy1))) {
device->cap._ADR = 1;
}
if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCL", &h_dummy1))) {
device->cap._BCL = 1;
}
if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCM", &h_dummy1))) {
device->cap._BCM = 1;
}
if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle,"_BQC",&h_dummy1)))
device->cap._BQC = 1;
else if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCQ",
&h_dummy1))) {
printk(KERN_WARNING FW_BUG "_BCQ is used instead of _BQC\n");
device->cap._BCQ = 1;
}
if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) {
device->cap._DDC = 1;
}
if (acpi_video_backlight_support()) {
struct backlight_properties props;
struct pci_dev *pdev;
acpi_handle acpi_parent;
struct device *parent = NULL;
int result;
static int count = 0;
char *name;
result = acpi_video_init_brightness(device);
if (result)
return;
name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
count++;
acpi_get_parent(device->dev->handle, &acpi_parent);
pdev = acpi_get_pci_dev(acpi_parent);
if (pdev) {
parent = &pdev->dev;
pci_dev_put(pdev);
}
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_FIRMWARE;
props.max_brightness = device->brightness->count - 3;
device->backlight = backlight_device_register(name,
parent,
device,
&acpi_backlight_ops,
&props);
kfree(name);
if (IS_ERR(device->backlight))
return;
/*
* Save current brightness level in case we have to restore it
* before acpi_video_device_lcd_set_level() is called next time.
*/
device->backlight->props.brightness =
acpi_video_get_brightness(device->backlight);
device->cooling_dev = thermal_cooling_device_register("LCD",
device->dev, &video_cooling_ops);
if (IS_ERR(device->cooling_dev)) {
/*
* Set cooling_dev to NULL so we don't crash trying to
* free it.
* Also, why the hell we are returning early and
* not attempt to register video output if cooling
* device registration failed?
* -- dtor
*/
device->cooling_dev = NULL;
return;
}
dev_info(&device->dev->dev, "registered as cooling_device%d\n",
device->cooling_dev->id);
result = sysfs_create_link(&device->dev->dev.kobj,
&device->cooling_dev->device.kobj,
"thermal_cooling");
if (result)
printk(KERN_ERR PREFIX "Create sysfs link\n");
result = sysfs_create_link(&device->cooling_dev->device.kobj,
&device->dev->dev.kobj, "device");
if (result)
printk(KERN_ERR PREFIX "Create sysfs link\n");
}
}
/*
* Arg:
* device : video output device (VGA)
*
* Return Value:
* None
*
* Find out all required AML methods defined under the video bus device.
*/
static void acpi_video_bus_find_cap(struct acpi_video_bus *video)
{
acpi_handle h_dummy1;
if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOS", &h_dummy1))) {
video->cap._DOS = 1;
}
if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOD", &h_dummy1))) {
video->cap._DOD = 1;
}
if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_ROM", &h_dummy1))) {
video->cap._ROM = 1;
}
if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_GPD", &h_dummy1))) {
video->cap._GPD = 1;
}
if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_SPD", &h_dummy1))) {
video->cap._SPD = 1;
}
if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_VPO", &h_dummy1))) {
video->cap._VPO = 1;
}
}
/*
* Check whether the video bus device has required AML method to
* support the desired features
*/
static int acpi_video_bus_check(struct acpi_video_bus *video)
{
acpi_status status = -ENOENT;
struct pci_dev *dev;
if (!video)
return -EINVAL;
dev = acpi_get_pci_dev(video->device->handle);
if (!dev)
return -ENODEV;
pci_dev_put(dev);
/* Since there is no HID, CID and so on for VGA driver, we have
* to check well known required nodes.
*/
/* Does this device support video switching? */
if (video->cap._DOS || video->cap._DOD) {
if (!video->cap._DOS) {
printk(KERN_WARNING FW_BUG
"ACPI(%s) defines _DOD but not _DOS\n",
acpi_device_bid(video->device));
}
video->flags.multihead = 1;
status = 0;
}
/* Does this device support retrieving a video ROM? */
if (video->cap._ROM) {
video->flags.rom = 1;
status = 0;
}
/* Does this device support configuring which video device to POST? */
if (video->cap._GPD && video->cap._SPD && video->cap._VPO) {
video->flags.post = 1;
status = 0;
}
return status;
}
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
/* device interface */
static struct acpi_video_device_attrib*
acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id)
{
struct acpi_video_enumerated_device *ids;
int i;
for (i = 0; i < video->attached_count; i++) {
ids = &video->attached_array[i];
if ((ids->value.int_val & 0xffff) == device_id)
return &ids->value.attrib;
}
return NULL;
}
static int
acpi_video_get_device_type(struct acpi_video_bus *video,
unsigned long device_id)
{
struct acpi_video_enumerated_device *ids;
int i;
for (i = 0; i < video->attached_count; i++) {
ids = &video->attached_array[i];
if ((ids->value.int_val & 0xffff) == device_id)
return ids->value.int_val;
}
return 0;
}
static int
acpi_video_bus_get_one_device(struct acpi_device *device,
struct acpi_video_bus *video)
{
unsigned long long device_id;
int status, device_type;
struct acpi_video_device *data;
struct acpi_video_device_attrib* attribute;
if (!device || !video)
return -EINVAL;
status =
acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id);
if (ACPI_SUCCESS(status)) {
data = kzalloc(sizeof(struct acpi_video_device), GFP_KERNEL);
if (!data)
return -ENOMEM;
strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
device->driver_data = data;
data->device_id = device_id;
data->video = video;
data->dev = device;
attribute = acpi_video_get_device_attr(video, device_id);
if((attribute != NULL) && attribute->device_id_scheme) {
switch (attribute->display_type) {
case ACPI_VIDEO_DISPLAY_CRT:
data->flags.crt = 1;
break;
case ACPI_VIDEO_DISPLAY_TV:
data->flags.tvout = 1;
break;
case ACPI_VIDEO_DISPLAY_DVI:
data->flags.dvi = 1;
break;
case ACPI_VIDEO_DISPLAY_LCD:
data->flags.lcd = 1;
break;
default:
data->flags.unknown = 1;
break;
}
if(attribute->bios_can_detect)
data->flags.bios = 1;
} else {
/* Check for legacy IDs */
device_type = acpi_video_get_device_type(video,
device_id);
/* Ignore bits 16 and 18-20 */
switch (device_type & 0xffe2ffff) {
case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
data->flags.crt = 1;
break;
case ACPI_VIDEO_DISPLAY_LEGACY_PANEL:
data->flags.lcd = 1;
break;
case ACPI_VIDEO_DISPLAY_LEGACY_TV:
data->flags.tvout = 1;
break;
default:
data->flags.unknown = 1;
}
}
acpi_video_device_bind(video, data);
acpi_video_device_find_cap(data);
status = acpi_install_notify_handler(device->handle,
ACPI_DEVICE_NOTIFY,
acpi_video_device_notify,
data);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX
"Error installing notify handler\n");
if(data->brightness)
kfree(data->brightness->levels);
kfree(data->brightness);
kfree(data);
return -ENODEV;
}
mutex_lock(&video->device_list_lock);
list_add_tail(&data->entry, &video->video_device_list);
mutex_unlock(&video->device_list_lock);
return 0;
}
return -ENOENT;
}
/*
* Arg:
* video : video bus device
*
* Return:
* none
*
* Enumerate the video device list of the video bus,
* bind the ids with the corresponding video devices
* under the video bus.
*/
static void acpi_video_device_rebind(struct acpi_video_bus *video)
{
struct acpi_video_device *dev;
mutex_lock(&video->device_list_lock);
list_for_each_entry(dev, &video->video_device_list, entry)
acpi_video_device_bind(video, dev);
mutex_unlock(&video->device_list_lock);
}
/*
* Arg:
* video : video bus device
* device : video output device under the video
* bus
*
* Return:
* none
*
* Bind the ids with the corresponding video devices
* under the video bus.
*/
static void
acpi_video_device_bind(struct acpi_video_bus *video,
struct acpi_video_device *device)
{
struct acpi_video_enumerated_device *ids;
int i;
for (i = 0; i < video->attached_count; i++) {
ids = &video->attached_array[i];
if (device->device_id == (ids->value.int_val & 0xffff)) {
ids->bind_info = device;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "device_bind %d\n", i));
}
}
}
/*
* Arg:
* video : video bus device
*
* Return:
* < 0 : error
*
* Call _DOD to enumerate all devices attached to display adapter
*
*/
static int acpi_video_device_enumerate(struct acpi_video_bus *video)
{
int status;
int count;
int i;
struct acpi_video_enumerated_device *active_list;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *dod = NULL;
union acpi_object *obj;
status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer);
if (!ACPI_SUCCESS(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD"));
return status;
}
dod = buffer.pointer;
if (!dod || (dod->type != ACPI_TYPE_PACKAGE)) {
ACPI_EXCEPTION((AE_INFO, status, "Invalid _DOD data"));
status = -EFAULT;
goto out;
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d video heads in _DOD\n",
dod->package.count));
active_list = kcalloc(1 + dod->package.count,
sizeof(struct acpi_video_enumerated_device),
GFP_KERNEL);
if (!active_list) {
status = -ENOMEM;
goto out;
}
count = 0;
for (i = 0; i < dod->package.count; i++) {
obj = &dod->package.elements[i];
if (obj->type != ACPI_TYPE_INTEGER) {
printk(KERN_ERR PREFIX
"Invalid _DOD data in element %d\n", i);
continue;
}
active_list[count].value.int_val = obj->integer.value;
active_list[count].bind_info = NULL;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "dod element[%d] = %d\n", i,
(int)obj->integer.value));
count++;
}
kfree(video->attached_array);
video->attached_array = active_list;
video->attached_count = count;
out:
kfree(buffer.pointer);
return status;
}
static int
acpi_video_get_next_level(struct acpi_video_device *device,
u32 level_current, u32 event)
{
int min, max, min_above, max_below, i, l, delta = 255;
max = max_below = 0;
min = min_above = 255;
/* Find closest level to level_current */
for (i = 2; i < device->brightness->count; i++) {
l = device->brightness->levels[i];
if (abs(l - level_current) < abs(delta)) {
delta = l - level_current;
if (!delta)
break;
}
}
/* Ajust level_current to closest available level */
level_current += delta;
for (i = 2; i < device->brightness->count; i++) {
l = device->brightness->levels[i];
if (l < min)
min = l;
if (l > max)
max = l;
if (l < min_above && l > level_current)
min_above = l;
if (l > max_below && l < level_current)
max_below = l;
}
switch (event) {
case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS:
return (level_current < max) ? min_above : min;
case ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS:
return (level_current < max) ? min_above : max;
case ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS:
return (level_current > min) ? max_below : min;
case ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS:
case ACPI_VIDEO_NOTIFY_DISPLAY_OFF:
return 0;
default:
return level_current;
}
}
static int
acpi_video_switch_brightness(struct acpi_video_device *device, int event)
{
unsigned long long level_current, level_next;
int result = -EINVAL;
/* no warning message if acpi_backlight=vendor is used */
if (!acpi_video_backlight_support())
return 0;
if (!device->brightness)
goto out;
result = acpi_video_device_lcd_get_level_current(device,
&level_current, 0);
if (result)
goto out;
level_next = acpi_video_get_next_level(device, level_current, event);
result = acpi_video_device_lcd_set_level(device, level_next);
if (!result)
backlight_force_update(device->backlight,
BACKLIGHT_UPDATE_HOTKEY);
out:
if (result)
printk(KERN_ERR PREFIX "Failed to switch the brightness\n");
return result;
}
int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
void **edid)
{
struct acpi_video_bus *video;
struct acpi_video_device *video_device;
union acpi_object *buffer = NULL;
acpi_status status;
int i, length;
if (!device || !acpi_driver_data(device))
return -EINVAL;
video = acpi_driver_data(device);
for (i = 0; i < video->attached_count; i++) {
video_device = video->attached_array[i].bind_info;
length = 256;
if (!video_device)
continue;
if (!video_device->cap._DDC)
continue;
if (type) {
switch (type) {
case ACPI_VIDEO_DISPLAY_CRT:
if (!video_device->flags.crt)
continue;
break;
case ACPI_VIDEO_DISPLAY_TV:
if (!video_device->flags.tvout)
continue;
break;
case ACPI_VIDEO_DISPLAY_DVI:
if (!video_device->flags.dvi)
continue;
break;
case ACPI_VIDEO_DISPLAY_LCD:
if (!video_device->flags.lcd)
continue;
break;
}
} else if (video_device->device_id != device_id) {
continue;
}
status = acpi_video_device_EDID(video_device, &buffer, length);
if (ACPI_FAILURE(status) || !buffer ||
buffer->type != ACPI_TYPE_BUFFER) {
length = 128;
status = acpi_video_device_EDID(video_device, &buffer,
length);
if (ACPI_FAILURE(status) || !buffer ||
buffer->type != ACPI_TYPE_BUFFER) {
continue;
}
}
*edid = buffer->buffer.pointer;
return length;
}
return -ENODEV;
}
EXPORT_SYMBOL(acpi_video_get_edid);
static int
acpi_video_bus_get_devices(struct acpi_video_bus *video,
struct acpi_device *device)
{
int status;
struct acpi_device *dev;
status = acpi_video_device_enumerate(video);
if (status)
return status;
list_for_each_entry(dev, &device->children, node) {
status = acpi_video_bus_get_one_device(dev, video);
if (status) {
printk(KERN_WARNING PREFIX
"Can't attach device\n");
continue;
}
}
return status;
}
static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
{
acpi_status status;
if (!device || !device->video)
return -ENOENT;
status = acpi_remove_notify_handler(device->dev->handle,
ACPI_DEVICE_NOTIFY,
acpi_video_device_notify);
if (ACPI_FAILURE(status)) {
printk(KERN_WARNING PREFIX
"Can't remove video notify handler\n");
}
if (device->backlight) {
backlight_device_unregister(device->backlight);
device->backlight = NULL;
}
if (device->cooling_dev) {
sysfs_remove_link(&device->dev->dev.kobj,
"thermal_cooling");
sysfs_remove_link(&device->cooling_dev->device.kobj,
"device");
thermal_cooling_device_unregister(device->cooling_dev);
device->cooling_dev = NULL;
}
return 0;
}
static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
{
int status;
struct acpi_video_device *dev, *next;
mutex_lock(&video->device_list_lock);
list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
status = acpi_video_bus_put_one_device(dev);
if (ACPI_FAILURE(status))
printk(KERN_WARNING PREFIX
"hhuuhhuu bug in acpi video driver.\n");
if (dev->brightness) {
kfree(dev->brightness->levels);
kfree(dev->brightness);
}
list_del(&dev->entry);
kfree(dev);
}
mutex_unlock(&video->device_list_lock);
return 0;
}
/* acpi_video interface */
static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
{
return acpi_video_bus_DOS(video, 0, 0);
}
static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
{
return acpi_video_bus_DOS(video, 0, 1);
}
static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
{
struct acpi_video_bus *video = acpi_driver_data(device);
struct input_dev *input;
int keycode = 0;
if (!video)
return;
input = video->input;
switch (event) {
case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch,
* most likely via hotkey. */
acpi_bus_generate_proc_event(device, event, 0);
if (!acpi_notifier_call_chain(device, event, 0))
keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video
* connector. */
acpi_video_device_enumerate(video);
acpi_video_device_rebind(video);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_CYCLE: /* Cycle Display output hotkey pressed. */
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_NEXT_OUTPUT: /* Next Display output hotkey pressed. */
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_VIDEO_NEXT;
break;
case ACPI_VIDEO_NOTIFY_PREV_OUTPUT: /* previous Display output hotkey pressed. */
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_VIDEO_PREV;
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
break;
}
if (event != ACPI_VIDEO_NOTIFY_SWITCH)
acpi_notifier_call_chain(device, event, 0);
if (keycode) {
input_report_key(input, keycode, 1);
input_sync(input);
input_report_key(input, keycode, 0);
input_sync(input);
}
return;
}
static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_video_device *video_device = data;
struct acpi_device *device = NULL;
struct acpi_video_bus *bus;
struct input_dev *input;
int keycode = 0;
if (!video_device)
return;
device = video_device->dev;
bus = video_device->video;
input = bus->input;
switch (event) {
case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS: /* Cycle brightness */
if (brightness_switch_enabled)
acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESS_CYCLE;
break;
case ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS: /* Increase brightness */
if (brightness_switch_enabled)
acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESSUP;
break;
case ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS: /* Decrease brightness */
if (brightness_switch_enabled)
acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESSDOWN;
break;
case ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS: /* zero brightness */
if (brightness_switch_enabled)
acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESS_ZERO;
break;
case ACPI_VIDEO_NOTIFY_DISPLAY_OFF: /* display device off */
if (brightness_switch_enabled)
acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_DISPLAY_OFF;
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
break;
}
acpi_notifier_call_chain(device, event, 0);
if (keycode) {
input_report_key(input, keycode, 1);
input_sync(input);
input_report_key(input, keycode, 0);
input_sync(input);
}
return;
}
static int acpi_video_resume(struct notifier_block *nb,
unsigned long val, void *ign)
{
struct acpi_video_bus *video;
struct acpi_video_device *video_device;
int i;
switch (val) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
case PM_RESTORE_PREPARE:
return NOTIFY_DONE;
}
video = container_of(nb, struct acpi_video_bus, pm_nb);
dev_info(&video->device->dev, "Restoring backlight state\n");
for (i = 0; i < video->attached_count; i++) {
video_device = video->attached_array[i].bind_info;
if (video_device && video_device->backlight)
acpi_video_set_brightness(video_device->backlight);
}
return NOTIFY_OK;
}
static acpi_status
acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
void **return_value)
{
struct acpi_device *device = context;
struct acpi_device *sibling;
int result;
if (handle == device->handle)
return AE_CTRL_TERMINATE;
result = acpi_bus_get_device(handle, &sibling);
if (result)
return AE_OK;
if (!strcmp(acpi_device_name(sibling), ACPI_VIDEO_BUS_NAME))
return AE_ALREADY_EXISTS;
return AE_OK;
}
static int instance;
static int acpi_video_bus_add(struct acpi_device *device)
{
struct acpi_video_bus *video;
struct input_dev *input;
int error;
acpi_status status;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
device->parent->handle, 1,
acpi_video_bus_match, NULL,
device, NULL);
if (status == AE_ALREADY_EXISTS) {
printk(KERN_WARNING FW_BUG
"Duplicate ACPI video bus devices for the"
" same VGA controller, please try module "
"parameter \"video.allow_duplicates=1\""
"if the current driver doesn't work.\n");
if (!allow_duplicates)
return -ENODEV;
}
video = kzalloc(sizeof(struct acpi_video_bus), GFP_KERNEL);
if (!video)
return -ENOMEM;
/* a hack to fix the duplicate name "VID" problem on T61 */
if (!strcmp(device->pnp.bus_id, "VID")) {
if (instance)
device->pnp.bus_id[3] = '0' + instance;
instance ++;
}
/* a hack to fix the duplicate name "VGA" problem on Pa 3553 */
if (!strcmp(device->pnp.bus_id, "VGA")) {
if (instance)
device->pnp.bus_id[3] = '0' + instance;
instance++;
}
video->device = device;
strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME);
strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
device->driver_data = video;
acpi_video_bus_find_cap(video);
error = acpi_video_bus_check(video);
if (error)
goto err_free_video;
mutex_init(&video->device_list_lock);
INIT_LIST_HEAD(&video->video_device_list);
error = acpi_video_bus_get_devices(video, device);
if (error)
goto err_free_video;
video->input = input = input_allocate_device();
if (!input) {
error = -ENOMEM;
goto err_put_video;
}
error = acpi_video_bus_start_devices(video);
if (error)
goto err_free_input_dev;
snprintf(video->phys, sizeof(video->phys),
"%s/video/input0", acpi_device_hid(video->device));
input->name = acpi_device_name(video->device);
input->phys = video->phys;
input->id.bustype = BUS_HOST;
input->id.product = 0x06;
input->dev.parent = &device->dev;
input->evbit[0] = BIT(EV_KEY);
set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
set_bit(KEY_VIDEO_NEXT, input->keybit);
set_bit(KEY_VIDEO_PREV, input->keybit);
set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
set_bit(KEY_BRIGHTNESSUP, input->keybit);
set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
set_bit(KEY_DISPLAY_OFF, input->keybit);
error = input_register_device(input);
if (error)
goto err_stop_video;
printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
video->flags.multihead ? "yes" : "no",
video->flags.rom ? "yes" : "no",
video->flags.post ? "yes" : "no");
video->pm_nb.notifier_call = acpi_video_resume;
video->pm_nb.priority = 0;
error = register_pm_notifier(&video->pm_nb);
if (error)
goto err_unregister_input_dev;
return 0;
err_unregister_input_dev:
input_unregister_device(input);
err_stop_video:
acpi_video_bus_stop_devices(video);
err_free_input_dev:
input_free_device(input);
err_put_video:
acpi_video_bus_put_devices(video);
kfree(video->attached_array);
err_free_video:
kfree(video);
device->driver_data = NULL;
return error;
}
static int acpi_video_bus_remove(struct acpi_device *device, int type)
{
struct acpi_video_bus *video = NULL;
if (!device || !acpi_driver_data(device))
return -EINVAL;
video = acpi_driver_data(device);
unregister_pm_notifier(&video->pm_nb);
acpi_video_bus_stop_devices(video);
acpi_video_bus_put_devices(video);
input_unregister_device(video->input);
kfree(video->attached_array);
kfree(video);
return 0;
}
static int __init intel_opregion_present(void)
{
#if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE)
struct pci_dev *dev = NULL;
u32 address;
for_each_pci_dev(dev) {
if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
continue;
if (dev->vendor != PCI_VENDOR_ID_INTEL)
continue;
pci_read_config_dword(dev, 0xfc, &address);
if (!address)
continue;
return 1;
}
#endif
return 0;
}
int acpi_video_register(void)
{
int result = 0;
if (register_count) {
/*
* if the function of acpi_video_register is already called,
* don't register the acpi_vide_bus again and return no error.
*/
return 0;
}
result = acpi_bus_register_driver(&acpi_video_bus);
if (result < 0)
return -ENODEV;
/*
* When the acpi_video_bus is loaded successfully, increase
* the counter reference.
*/
register_count = 1;
return 0;
}
EXPORT_SYMBOL(acpi_video_register);
void acpi_video_unregister(void)
{
if (!register_count) {
/*
* If the acpi video bus is already unloaded, don't
* unload it again and return directly.
*/
return;
}
acpi_bus_unregister_driver(&acpi_video_bus);
register_count = 0;
return;
}
EXPORT_SYMBOL(acpi_video_unregister);
/*
* This is kind of nasty. Hardware using Intel chipsets may require
* the video opregion code to be run first in order to initialise
* state before any ACPI video calls are made. To handle this we defer
* registration of the video class until the opregion code has run.
*/
static int __init acpi_video_init(void)
{
dmi_check_system(video_dmi_table);
if (intel_opregion_present())
return 0;
return acpi_video_register();
}
static void __exit acpi_video_exit(void)
{
acpi_video_unregister();
return;
}
module_init(acpi_video_init);
module_exit(acpi_video_exit);
| gpl-2.0 |
AAccount/android_kernel_samsung_p4 | drivers/oprofile/event_buffer.c | 3239 | 4624 | /**
* @file event_buffer.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*
* This is the global event buffer that the user-space
* daemon reads from. The event buffer is an untyped array
* of unsigned longs. Entries are prefixed by the
* escape value ESCAPE_CODE followed by an identifying code.
*/
#include <linux/vmalloc.h>
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <linux/capability.h>
#include <linux/dcookies.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include "oprof.h"
#include "event_buffer.h"
#include "oprofile_stats.h"
DEFINE_MUTEX(buffer_mutex);
static unsigned long buffer_opened;
static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
static unsigned long *event_buffer;
static unsigned long buffer_size;
static unsigned long buffer_watershed;
static size_t buffer_pos;
/* atomic_t because wait_event checks it outside of buffer_mutex */
static atomic_t buffer_ready = ATOMIC_INIT(0);
/*
* Add an entry to the event buffer. When we get near to the end we
* wake up the process sleeping on the read() of the file. To protect
* the event_buffer this function may only be called when buffer_mutex
* is set.
*/
void add_event_entry(unsigned long value)
{
/*
* This shouldn't happen since all workqueues or handlers are
* canceled or flushed before the event buffer is freed.
*/
if (!event_buffer) {
WARN_ON_ONCE(1);
return;
}
if (buffer_pos == buffer_size) {
atomic_inc(&oprofile_stats.event_lost_overflow);
return;
}
event_buffer[buffer_pos] = value;
if (++buffer_pos == buffer_size - buffer_watershed) {
atomic_set(&buffer_ready, 1);
wake_up(&buffer_wait);
}
}
/* Wake up the waiting process if any. This happens
* on "echo 0 >/dev/oprofile/enable" so the daemon
* processes the data remaining in the event buffer.
*/
void wake_up_buffer_waiter(void)
{
mutex_lock(&buffer_mutex);
atomic_set(&buffer_ready, 1);
wake_up(&buffer_wait);
mutex_unlock(&buffer_mutex);
}
int alloc_event_buffer(void)
{
unsigned long flags;
spin_lock_irqsave(&oprofilefs_lock, flags);
buffer_size = oprofile_buffer_size;
buffer_watershed = oprofile_buffer_watershed;
spin_unlock_irqrestore(&oprofilefs_lock, flags);
if (buffer_watershed >= buffer_size)
return -EINVAL;
buffer_pos = 0;
event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
if (!event_buffer)
return -ENOMEM;
return 0;
}
void free_event_buffer(void)
{
mutex_lock(&buffer_mutex);
vfree(event_buffer);
buffer_pos = 0;
event_buffer = NULL;
mutex_unlock(&buffer_mutex);
}
static int event_buffer_open(struct inode *inode, struct file *file)
{
int err = -EPERM;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (test_and_set_bit_lock(0, &buffer_opened))
return -EBUSY;
/* Register as a user of dcookies
* to ensure they persist for the lifetime of
* the open event file
*/
err = -EINVAL;
file->private_data = dcookie_register();
if (!file->private_data)
goto out;
if ((err = oprofile_setup()))
goto fail;
/* NB: the actual start happens from userspace
* echo 1 >/dev/oprofile/enable
*/
return nonseekable_open(inode, file);
fail:
dcookie_unregister(file->private_data);
out:
__clear_bit_unlock(0, &buffer_opened);
return err;
}
static int event_buffer_release(struct inode *inode, struct file *file)
{
oprofile_stop();
oprofile_shutdown();
dcookie_unregister(file->private_data);
buffer_pos = 0;
atomic_set(&buffer_ready, 0);
__clear_bit_unlock(0, &buffer_opened);
return 0;
}
static ssize_t event_buffer_read(struct file *file, char __user *buf,
size_t count, loff_t *offset)
{
int retval = -EINVAL;
size_t const max = buffer_size * sizeof(unsigned long);
/* handling partial reads is more trouble than it's worth */
if (count != max || *offset)
return -EINVAL;
wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready));
if (signal_pending(current))
return -EINTR;
/* can't currently happen */
if (!atomic_read(&buffer_ready))
return -EAGAIN;
mutex_lock(&buffer_mutex);
/* May happen if the buffer is freed during pending reads. */
if (!event_buffer) {
retval = -EINTR;
goto out;
}
atomic_set(&buffer_ready, 0);
retval = -EFAULT;
count = buffer_pos * sizeof(unsigned long);
if (copy_to_user(buf, event_buffer, count))
goto out;
retval = count;
buffer_pos = 0;
out:
mutex_unlock(&buffer_mutex);
return retval;
}
const struct file_operations event_buffer_fops = {
.open = event_buffer_open,
.release = event_buffer_release,
.read = event_buffer_read,
.llseek = no_llseek,
};
| gpl-2.0 |
Tkkg1994/Hulk-Kernel | drivers/cdrom/cdrom.c | 3495 | 100490 | /* linux/drivers/cdrom/cdrom.c
Copyright (c) 1996, 1997 David A. van Leeuwen.
Copyright (c) 1997, 1998 Erik Andersen <andersee@debian.org>
Copyright (c) 1998, 1999 Jens Axboe <axboe@image.dk>
May be copied or modified under the terms of the GNU General Public
License. See linux/COPYING for more information.
Uniform CD-ROM driver for Linux.
See Documentation/cdrom/cdrom-standard.tex for usage information.
The routines in the file provide a uniform interface between the
software that uses CD-ROMs and the various low-level drivers that
actually talk to the hardware. Suggestions are welcome.
Patches that work are more welcome though. ;-)
To Do List:
----------------------------------
-- Modify sysctl/proc interface. I plan on having one directory per
drive, with entries for outputing general drive information, and sysctl
based tunable parameters such as whether the tray should auto-close for
that drive. Suggestions (or patches) for this welcome!
Revision History
----------------------------------
1.00 Date Unknown -- David van Leeuwen <david@tm.tno.nl>
-- Initial version by David A. van Leeuwen. I don't have a detailed
changelog for the 1.x series, David?
2.00 Dec 2, 1997 -- Erik Andersen <andersee@debian.org>
-- New maintainer! As David A. van Leeuwen has been too busy to actively
maintain and improve this driver, I am now carrying on the torch. If
you have a problem with this driver, please feel free to contact me.
-- Added (rudimentary) sysctl interface. I realize this is really weak
right now, and is _very_ badly implemented. It will be improved...
-- Modified CDROM_DISC_STATUS so that it is now incorporated into
the Uniform CD-ROM driver via the cdrom_count_tracks function.
The cdrom_count_tracks function helps resolve some of the false
assumptions of the CDROM_DISC_STATUS ioctl, and is also used to check
for the correct media type when mounting or playing audio from a CD.
-- Remove the calls to verify_area and only use the copy_from_user and
copy_to_user stuff, since these calls now provide their own memory
checking with the 2.1.x kernels.
-- Major update to return codes so that errors from low-level drivers
are passed on through (thanks to Gerd Knorr for pointing out this
problem).
-- Made it so if a function isn't implemented in a low-level driver,
ENOSYS is now returned instead of EINVAL.
-- Simplified some complex logic so that the source code is easier to read.
-- Other stuff I probably forgot to mention (lots of changes).
2.01 to 2.11 Dec 1997-Jan 1998
-- TO-DO! Write changelogs for 2.01 to 2.12.
2.12 Jan 24, 1998 -- Erik Andersen <andersee@debian.org>
-- Fixed a bug in the IOCTL_IN and IOCTL_OUT macros. It turns out that
copy_*_user does not return EFAULT on error, but instead returns the number
of bytes not copied. I was returning whatever non-zero stuff came back from
the copy_*_user functions directly, which would result in strange errors.
2.13 July 17, 1998 -- Erik Andersen <andersee@debian.org>
-- Fixed a bug in CDROM_SELECT_SPEED where you couldn't lower the speed
of the drive. Thanks to Tobias Ringstr|m <tori@prosolvia.se> for pointing
this out and providing a simple fix.
-- Fixed the procfs-unload-module bug with the fill_inode procfs callback.
thanks to Andrea Arcangeli
-- Fixed it so that the /proc entry now also shows up when cdrom is
compiled into the kernel. Before it only worked when loaded as a module.
2.14 August 17, 1998 -- Erik Andersen <andersee@debian.org>
-- Fixed a bug in cdrom_media_changed and handling of reporting that
the media had changed for devices that _don't_ implement media_changed.
Thanks to Grant R. Guenther <grant@torque.net> for spotting this bug.
-- Made a few things more pedanticly correct.
2.50 Oct 19, 1998 - Jens Axboe <axboe@image.dk>
-- New maintainers! Erik was too busy to continue the work on the driver,
so now Chris Zwilling <chris@cloudnet.com> and Jens Axboe <axboe@image.dk>
will do their best to follow in his footsteps
2.51 Dec 20, 1998 - Jens Axboe <axboe@image.dk>
-- Check if drive is capable of doing what we ask before blindly changing
cdi->options in various ioctl.
-- Added version to proc entry.
2.52 Jan 16, 1999 - Jens Axboe <axboe@image.dk>
-- Fixed an error in open_for_data where we would sometimes not return
the correct error value. Thanks Huba Gaspar <huba@softcell.hu>.
-- Fixed module usage count - usage was based on /proc/sys/dev
instead of /proc/sys/dev/cdrom. This could lead to an oops when other
modules had entries in dev. Feb 02 - real bug was in sysctl.c where
dev would be removed even though it was used. cdrom.c just illuminated
that bug.
2.53 Feb 22, 1999 - Jens Axboe <axboe@image.dk>
-- Fixup of several ioctl calls, in particular CDROM_SET_OPTIONS has
been "rewritten" because capabilities and options aren't in sync. They
should be...
-- Added CDROM_LOCKDOOR ioctl. Locks the door and keeps it that way.
-- Added CDROM_RESET ioctl.
-- Added CDROM_DEBUG ioctl. Enable debug messages on-the-fly.
-- Added CDROM_GET_CAPABILITY ioctl. This relieves userspace programs
from parsing /proc/sys/dev/cdrom/info.
2.54 Mar 15, 1999 - Jens Axboe <axboe@image.dk>
-- Check capability mask from low level driver when counting tracks as
per suggestion from Corey J. Scotts <cstotts@blue.weeg.uiowa.edu>.
2.55 Apr 25, 1999 - Jens Axboe <axboe@image.dk>
-- autoclose was mistakenly checked against CDC_OPEN_TRAY instead of
CDC_CLOSE_TRAY.
-- proc info didn't mask against capabilities mask.
3.00 Aug 5, 1999 - Jens Axboe <axboe@image.dk>
-- Unified audio ioctl handling across CD-ROM drivers. A lot of the
code was duplicated before. Drives that support the generic packet
interface are now being fed packets from here instead.
-- First attempt at adding support for MMC2 commands - for DVD and
CD-R(W) drives. Only the DVD parts are in now - the interface used is
the same as for the audio ioctls.
-- ioctl cleanups. if a drive couldn't play audio, it didn't get
a change to perform device specific ioctls as well.
-- Defined CDROM_CAN(CDC_XXX) for checking the capabilities.
-- Put in sysctl files for autoclose, autoeject, check_media, debug,
and lock.
-- /proc/sys/dev/cdrom/info has been updated to also contain info about
CD-Rx and DVD capabilities.
-- Now default to checking media type.
-- CDROM_SEND_PACKET ioctl added. The infrastructure was in place for
doing this anyway, with the generic_packet addition.
3.01 Aug 6, 1999 - Jens Axboe <axboe@image.dk>
-- Fix up the sysctl handling so that the option flags get set
correctly.
-- Fix up ioctl handling so the device specific ones actually get
called :).
3.02 Aug 8, 1999 - Jens Axboe <axboe@image.dk>
-- Fixed volume control on SCSI drives (or others with longer audio
page).
-- Fixed a couple of DVD minors. Thanks to Andrew T. Veliath
<andrewtv@usa.net> for telling me and for having defined the various
DVD structures and ioctls in the first place! He designed the original
DVD patches for ide-cd and while I rearranged and unified them, the
interface is still the same.
3.03 Sep 1, 1999 - Jens Axboe <axboe@image.dk>
-- Moved the rest of the audio ioctls from the CD-ROM drivers here. Only
CDROMREADTOCENTRY and CDROMREADTOCHDR are left.
-- Moved the CDROMREADxxx ioctls in here.
-- Defined the cdrom_get_last_written and cdrom_get_next_block as ioctls
and exported functions.
-- Erik Andersen <andersen@xmission.com> modified all SCMD_ commands
to now read GPCMD_ for the new generic packet interface. All low level
drivers are updated as well.
-- Various other cleanups.
3.04 Sep 12, 1999 - Jens Axboe <axboe@image.dk>
-- Fixed a couple of possible memory leaks (if an operation failed and
we didn't free the buffer before returning the error).
-- Integrated Uniform CD Changer handling from Richard Sharman
<rsharman@pobox.com>.
-- Defined CD_DVD and CD_CHANGER log levels.
-- Fixed the CDROMREADxxx ioctls.
-- CDROMPLAYTRKIND uses the GPCMD_PLAY_AUDIO_MSF command - too few
drives supported it. We lose the index part, however.
-- Small modifications to accommodate opens of /dev/hdc1, required
for ide-cd to handle multisession discs.
-- Export cdrom_mode_sense and cdrom_mode_select.
-- init_cdrom_command() for setting up a cgc command.
3.05 Oct 24, 1999 - Jens Axboe <axboe@image.dk>
-- Changed the interface for CDROM_SEND_PACKET. Before it was virtually
impossible to send the drive data in a sensible way.
-- Lowered stack usage in mmc_ioctl(), dvd_read_disckey(), and
dvd_read_manufact.
-- Added setup of write mode for packet writing.
-- Fixed CDDA ripping with cdda2wav - accept much larger requests of
number of frames and split the reads in blocks of 8.
3.06 Dec 13, 1999 - Jens Axboe <axboe@image.dk>
-- Added support for changing the region of DVD drives.
-- Added sense data to generic command.
3.07 Feb 2, 2000 - Jens Axboe <axboe@suse.de>
-- Do same "read header length" trick in cdrom_get_disc_info() as
we do in cdrom_get_track_info() -- some drive don't obey specs and
fail if they can't supply the full Mt Fuji size table.
-- Deleted stuff related to setting up write modes. It has a different
home now.
-- Clear header length in mode_select unconditionally.
-- Removed the register_disk() that was added, not needed here.
3.08 May 1, 2000 - Jens Axboe <axboe@suse.de>
-- Fix direction flag in setup_send_key and setup_report_key. This
gave some SCSI adapters problems.
-- Always return -EROFS for write opens
-- Convert to module_init/module_exit style init and remove some
of the #ifdef MODULE stuff
-- Fix several dvd errors - DVD_LU_SEND_ASF should pass agid,
DVD_HOST_SEND_RPC_STATE did not set buffer size in cdb, and
dvd_do_auth passed uninitialized data to drive because init_cdrom_command
did not clear a 0 sized buffer.
3.09 May 12, 2000 - Jens Axboe <axboe@suse.de>
-- Fix Video-CD on SCSI drives that don't support READ_CD command. In
that case switch block size and issue plain READ_10 again, then switch
back.
3.10 Jun 10, 2000 - Jens Axboe <axboe@suse.de>
-- Fix volume control on CD's - old SCSI-II drives now use their own
code, as doing MODE6 stuff in here is really not my intention.
-- Use READ_DISC_INFO for more reliable end-of-disc.
3.11 Jun 12, 2000 - Jens Axboe <axboe@suse.de>
-- Fix bug in getting rpc phase 2 region info.
-- Reinstate "correct" CDROMPLAYTRKIND
3.12 Oct 18, 2000 - Jens Axboe <axboe@suse.de>
-- Use quiet bit on packet commands not known to work
3.20 Dec 17, 2003 - Jens Axboe <axboe@suse.de>
-- Various fixes and lots of cleanups not listed :-)
-- Locking fixes
-- Mt Rainier support
-- DVD-RAM write open fixes
Nov 5 2001, Aug 8 2002. Modified by Andy Polyakov
<appro@fy.chalmers.se> to support MMC-3 compliant DVD+RW units.
Modified by Nigel Kukard <nkukard@lbsd.net> - support DVD+RW
2.4.x patch by Andy Polyakov <appro@fy.chalmers.se>
-------------------------------------------------------------------------*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define REVISION "Revision: 3.20"
#define VERSION "Id: cdrom.c 3.20 2003/12/17"
/* I use an error-log mask to give fine grain control over the type of
messages dumped to the system logs. The available masks include: */
#define CD_NOTHING 0x0
#define CD_WARNING 0x1
#define CD_REG_UNREG 0x2
#define CD_DO_IOCTL 0x4
#define CD_OPEN 0x8
#define CD_CLOSE 0x10
#define CD_COUNT_TRACKS 0x20
#define CD_CHANGER 0x40
#define CD_DVD 0x80
/* Define this to remove _all_ the debugging messages */
/* #define ERRLOGMASK CD_NOTHING */
#define ERRLOGMASK CD_WARNING
/* #define ERRLOGMASK (CD_WARNING|CD_OPEN|CD_COUNT_TRACKS|CD_CLOSE) */
/* #define ERRLOGMASK (CD_WARNING|CD_REG_UNREG|CD_DO_IOCTL|CD_OPEN|CD_CLOSE|CD_COUNT_TRACKS) */
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/major.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/cdrom.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/blkpg.h>
#include <linux/init.h>
#include <linux/fcntl.h>
#include <linux/blkdev.h>
#include <linux/times.h>
#include <asm/uaccess.h>
/* used to tell the module to turn on full debugging messages */
static bool debug;
/* default compatibility mode */
static bool autoclose=1;
static bool autoeject;
static bool lockdoor = 1;
/* will we ever get to use this... sigh. */
static bool check_media_type;
/* automatically restart mrw format */
static bool mrw_format_restart = 1;
module_param(debug, bool, 0);
module_param(autoclose, bool, 0);
module_param(autoeject, bool, 0);
module_param(lockdoor, bool, 0);
module_param(check_media_type, bool, 0);
module_param(mrw_format_restart, bool, 0);
static DEFINE_MUTEX(cdrom_mutex);
static const char *mrw_format_status[] = {
"not mrw",
"bgformat inactive",
"bgformat active",
"mrw complete",
};
static const char *mrw_address_space[] = { "DMA", "GAA" };
#if (ERRLOGMASK!=CD_NOTHING)
#define cdinfo(type, fmt, args...) \
do { \
if ((ERRLOGMASK & type) || debug == 1) \
pr_info(fmt, ##args); \
} while (0)
#else
#define cdinfo(type, fmt, args...) \
do { \
if (0 && (ERRLOGMASK & type) || debug == 1) \
pr_info(fmt, ##args); \
} while (0)
#endif
/* These are used to simplify getting data in from and back to user land */
#define IOCTL_IN(arg, type, in) \
if (copy_from_user(&(in), (type __user *) (arg), sizeof (in))) \
return -EFAULT;
#define IOCTL_OUT(arg, type, out) \
if (copy_to_user((type __user *) (arg), &(out), sizeof (out))) \
return -EFAULT;
/* The (cdo->capability & ~cdi->mask & CDC_XXX) construct was used in
a lot of places. This macro makes the code more clear. */
#define CDROM_CAN(type) (cdi->ops->capability & ~cdi->mask & (type))
/* used in the audio ioctls */
#define CHECKAUDIO if ((ret=check_for_audio_disc(cdi, cdo))) return ret
/*
* Another popular OS uses 7 seconds as the hard timeout for default
* commands, so it is a good choice for us as well.
*/
#define CDROM_DEF_TIMEOUT (7 * HZ)
/* Not-exported routines. */
static int open_for_data(struct cdrom_device_info * cdi);
static int check_for_audio_disc(struct cdrom_device_info * cdi,
struct cdrom_device_ops * cdo);
static void sanitize_format(union cdrom_addr *addr,
u_char * curr, u_char requested);
static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
unsigned long arg);
int cdrom_get_last_written(struct cdrom_device_info *, long *);
static int cdrom_get_next_writable(struct cdrom_device_info *, long *);
static void cdrom_count_tracks(struct cdrom_device_info *, tracktype*);
static int cdrom_mrw_exit(struct cdrom_device_info *cdi);
static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di);
static void cdrom_sysctl_register(void);
static LIST_HEAD(cdrom_list);
static int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
if (cgc->sense) {
cgc->sense->sense_key = 0x05;
cgc->sense->asc = 0x20;
cgc->sense->ascq = 0x00;
}
cgc->stat = -EIO;
return -EIO;
}
/* This macro makes sure we don't have to check on cdrom_device_ops
* existence in the run-time routines below. Change_capability is a
* hack to have the capability flags defined const, while we can still
* change it here without gcc complaining at every line.
*/
#define ENSURE(call, bits) if (cdo->call == NULL) *change_capability &= ~(bits)
int register_cdrom(struct cdrom_device_info *cdi)
{
static char banner_printed;
struct cdrom_device_ops *cdo = cdi->ops;
int *change_capability = (int *)&cdo->capability; /* hack */
cdinfo(CD_OPEN, "entering register_cdrom\n");
if (cdo->open == NULL || cdo->release == NULL)
return -EINVAL;
if (!banner_printed) {
pr_info("Uniform CD-ROM driver " REVISION "\n");
banner_printed = 1;
cdrom_sysctl_register();
}
ENSURE(drive_status, CDC_DRIVE_STATUS );
if (cdo->check_events == NULL && cdo->media_changed == NULL)
*change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
ENSURE(lock_door, CDC_LOCK);
ENSURE(select_speed, CDC_SELECT_SPEED);
ENSURE(get_last_session, CDC_MULTI_SESSION);
ENSURE(get_mcn, CDC_MCN);
ENSURE(reset, CDC_RESET);
ENSURE(generic_packet, CDC_GENERIC_PACKET);
cdi->mc_flags = 0;
cdo->n_minors = 0;
cdi->options = CDO_USE_FFLAGS;
if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
cdi->options |= (int) CDO_AUTO_CLOSE;
if (autoeject==1 && CDROM_CAN(CDC_OPEN_TRAY))
cdi->options |= (int) CDO_AUTO_EJECT;
if (lockdoor==1)
cdi->options |= (int) CDO_LOCK;
if (check_media_type==1)
cdi->options |= (int) CDO_CHECK_TYPE;
if (CDROM_CAN(CDC_MRW_W))
cdi->exit = cdrom_mrw_exit;
if (cdi->disk)
cdi->cdda_method = CDDA_BPC_FULL;
else
cdi->cdda_method = CDDA_OLD;
if (!cdo->generic_packet)
cdo->generic_packet = cdrom_dummy_generic_packet;
cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
mutex_lock(&cdrom_mutex);
list_add(&cdi->list, &cdrom_list);
mutex_unlock(&cdrom_mutex);
return 0;
}
#undef ENSURE
void unregister_cdrom(struct cdrom_device_info *cdi)
{
cdinfo(CD_OPEN, "entering unregister_cdrom\n");
mutex_lock(&cdrom_mutex);
list_del(&cdi->list);
mutex_unlock(&cdrom_mutex);
if (cdi->exit)
cdi->exit(cdi);
cdi->ops->n_minors--;
cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
}
int cdrom_get_media_event(struct cdrom_device_info *cdi,
struct media_event_desc *med)
{
struct packet_command cgc;
unsigned char buffer[8];
struct event_header *eh = (struct event_header *) buffer;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION;
cgc.cmd[1] = 1; /* IMMED */
cgc.cmd[4] = 1 << 4; /* media event */
cgc.cmd[8] = sizeof(buffer);
cgc.quiet = 1;
if (cdi->ops->generic_packet(cdi, &cgc))
return 1;
if (be16_to_cpu(eh->data_len) < sizeof(*med))
return 1;
if (eh->nea || eh->notification_class != 0x4)
return 1;
memcpy(med, &buffer[sizeof(*eh)], sizeof(*med));
return 0;
}
/*
* the first prototypes used 0x2c as the page code for the mrw mode page,
* subsequently this was changed to 0x03. probe the one used by this drive
*/
static int cdrom_mrw_probe_pc(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[16];
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.timeout = HZ;
cgc.quiet = 1;
if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC, 0)) {
cdi->mrw_mode_page = MRW_MODE_PC;
return 0;
} else if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC_PRE1, 0)) {
cdi->mrw_mode_page = MRW_MODE_PC_PRE1;
return 0;
}
return 1;
}
static int cdrom_is_mrw(struct cdrom_device_info *cdi, int *write)
{
struct packet_command cgc;
struct mrw_feature_desc *mfd;
unsigned char buffer[16];
int ret;
*write = 0;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
cgc.cmd[3] = CDF_MRW;
cgc.cmd[8] = sizeof(buffer);
cgc.quiet = 1;
if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
return ret;
mfd = (struct mrw_feature_desc *)&buffer[sizeof(struct feature_header)];
if (be16_to_cpu(mfd->feature_code) != CDF_MRW)
return 1;
*write = mfd->write;
if ((ret = cdrom_mrw_probe_pc(cdi))) {
*write = 0;
return ret;
}
return 0;
}
static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
{
struct packet_command cgc;
unsigned char buffer[12];
int ret;
pr_info("%sstarting format\n", cont ? "Re" : "");
/*
* FmtData bit set (bit 4), format type is 1
*/
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_WRITE);
cgc.cmd[0] = GPCMD_FORMAT_UNIT;
cgc.cmd[1] = (1 << 4) | 1;
cgc.timeout = 5 * 60 * HZ;
/*
* 4 byte format list header, 8 byte format list descriptor
*/
buffer[1] = 1 << 1;
buffer[3] = 8;
/*
* nr_blocks field
*/
buffer[4] = 0xff;
buffer[5] = 0xff;
buffer[6] = 0xff;
buffer[7] = 0xff;
buffer[8] = 0x24 << 2;
buffer[11] = cont;
ret = cdi->ops->generic_packet(cdi, &cgc);
if (ret)
pr_info("bgformat failed\n");
return ret;
}
static int cdrom_mrw_bgformat_susp(struct cdrom_device_info *cdi, int immed)
{
struct packet_command cgc;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_CLOSE_TRACK;
/*
* Session = 1, Track = 0
*/
cgc.cmd[1] = !!immed;
cgc.cmd[2] = 1 << 1;
cgc.timeout = 5 * 60 * HZ;
return cdi->ops->generic_packet(cdi, &cgc);
}
static int cdrom_flush_cache(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_FLUSH_CACHE;
cgc.timeout = 5 * 60 * HZ;
return cdi->ops->generic_packet(cdi, &cgc);
}
static int cdrom_mrw_exit(struct cdrom_device_info *cdi)
{
disc_information di;
int ret;
ret = cdrom_get_disc_info(cdi, &di);
if (ret < 0 || ret < (int)offsetof(typeof(di),disc_type))
return 1;
ret = 0;
if (di.mrw_status == CDM_MRW_BGFORMAT_ACTIVE) {
pr_info("issuing MRW background format suspend\n");
ret = cdrom_mrw_bgformat_susp(cdi, 0);
}
if (!ret && cdi->media_written)
ret = cdrom_flush_cache(cdi);
return ret;
}
static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
{
struct packet_command cgc;
struct mode_page_header *mph;
char buffer[16];
int ret, offset, size;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.buffer = buffer;
cgc.buflen = sizeof(buffer);
if ((ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0)))
return ret;
mph = (struct mode_page_header *) buffer;
offset = be16_to_cpu(mph->desc_length);
size = be16_to_cpu(mph->mode_data_length) + 2;
buffer[offset + 3] = space;
cgc.buflen = size;
if ((ret = cdrom_mode_select(cdi, &cgc)))
return ret;
pr_info("%s: mrw address space %s selected\n",
cdi->name, mrw_address_space[space]);
return 0;
}
static int cdrom_get_random_writable(struct cdrom_device_info *cdi,
struct rwrt_feature_desc *rfd)
{
struct packet_command cgc;
char buffer[24];
int ret;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION; /* often 0x46 */
cgc.cmd[3] = CDF_RWRT; /* often 0x0020 */
cgc.cmd[8] = sizeof(buffer); /* often 0x18 */
cgc.quiet = 1;
if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
return ret;
memcpy(rfd, &buffer[sizeof(struct feature_header)], sizeof (*rfd));
return 0;
}
static int cdrom_has_defect_mgt(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[16];
__be16 *feature_code;
int ret;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
cgc.cmd[3] = CDF_HWDM;
cgc.cmd[8] = sizeof(buffer);
cgc.quiet = 1;
if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
return ret;
feature_code = (__be16 *) &buffer[sizeof(struct feature_header)];
if (be16_to_cpu(*feature_code) == CDF_HWDM)
return 0;
return 1;
}
static int cdrom_is_random_writable(struct cdrom_device_info *cdi, int *write)
{
struct rwrt_feature_desc rfd;
int ret;
*write = 0;
if ((ret = cdrom_get_random_writable(cdi, &rfd)))
return ret;
if (CDF_RWRT == be16_to_cpu(rfd.feature_code))
*write = 1;
return 0;
}
static int cdrom_media_erasable(struct cdrom_device_info *cdi)
{
disc_information di;
int ret;
ret = cdrom_get_disc_info(cdi, &di);
if (ret < 0 || ret < offsetof(typeof(di), n_first_track))
return -1;
return di.erasable;
}
/*
* FIXME: check RO bit
*/
static int cdrom_dvdram_open_write(struct cdrom_device_info *cdi)
{
int ret = cdrom_media_erasable(cdi);
/*
* allow writable open if media info read worked and media is
* erasable, _or_ if it fails since not all drives support it
*/
if (!ret)
return 1;
return 0;
}
static int cdrom_mrw_open_write(struct cdrom_device_info *cdi)
{
disc_information di;
int ret;
/*
* always reset to DMA lba space on open
*/
if (cdrom_mrw_set_lba_space(cdi, MRW_LBA_DMA)) {
pr_err("failed setting lba address space\n");
return 1;
}
ret = cdrom_get_disc_info(cdi, &di);
if (ret < 0 || ret < offsetof(typeof(di),disc_type))
return 1;
if (!di.erasable)
return 1;
/*
* mrw_status
* 0 - not MRW formatted
* 1 - MRW bgformat started, but not running or complete
* 2 - MRW bgformat in progress
* 3 - MRW formatting complete
*/
ret = 0;
pr_info("open: mrw_status '%s'\n", mrw_format_status[di.mrw_status]);
if (!di.mrw_status)
ret = 1;
else if (di.mrw_status == CDM_MRW_BGFORMAT_INACTIVE &&
mrw_format_restart)
ret = cdrom_mrw_bgformat(cdi, 1);
return ret;
}
static int mo_open_write(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[255];
int ret;
init_cdrom_command(&cgc, &buffer, 4, CGC_DATA_READ);
cgc.quiet = 1;
/*
* obtain write protect information as per
* drivers/scsi/sd.c:sd_read_write_protect_flag
*/
ret = cdrom_mode_sense(cdi, &cgc, GPMODE_ALL_PAGES, 0);
if (ret)
ret = cdrom_mode_sense(cdi, &cgc, GPMODE_VENDOR_PAGE, 0);
if (ret) {
cgc.buflen = 255;
ret = cdrom_mode_sense(cdi, &cgc, GPMODE_ALL_PAGES, 0);
}
/* drive gave us no info, let the user go ahead */
if (ret)
return 0;
return buffer[3] & 0x80;
}
static int cdrom_ram_open_write(struct cdrom_device_info *cdi)
{
struct rwrt_feature_desc rfd;
int ret;
if ((ret = cdrom_has_defect_mgt(cdi)))
return ret;
if ((ret = cdrom_get_random_writable(cdi, &rfd)))
return ret;
else if (CDF_RWRT == be16_to_cpu(rfd.feature_code))
ret = !rfd.curr;
cdinfo(CD_OPEN, "can open for random write\n");
return ret;
}
static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[32];
int ret, mmc3_profile;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
cgc.cmd[1] = 0;
cgc.cmd[2] = cgc.cmd[3] = 0; /* Starting Feature Number */
cgc.cmd[8] = sizeof(buffer); /* Allocation Length */
cgc.quiet = 1;
if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
mmc3_profile = 0xffff;
else
mmc3_profile = (buffer[6] << 8) | buffer[7];
cdi->mmc3_profile = mmc3_profile;
}
static int cdrom_is_dvd_rw(struct cdrom_device_info *cdi)
{
switch (cdi->mmc3_profile) {
case 0x12: /* DVD-RAM */
case 0x1A: /* DVD+RW */
return 0;
default:
return 1;
}
}
/*
* returns 0 for ok to open write, non-0 to disallow
*/
static int cdrom_open_write(struct cdrom_device_info *cdi)
{
int mrw, mrw_write, ram_write;
int ret = 1;
mrw = 0;
if (!cdrom_is_mrw(cdi, &mrw_write))
mrw = 1;
if (CDROM_CAN(CDC_MO_DRIVE))
ram_write = 1;
else
(void) cdrom_is_random_writable(cdi, &ram_write);
if (mrw)
cdi->mask &= ~CDC_MRW;
else
cdi->mask |= CDC_MRW;
if (mrw_write)
cdi->mask &= ~CDC_MRW_W;
else
cdi->mask |= CDC_MRW_W;
if (ram_write)
cdi->mask &= ~CDC_RAM;
else
cdi->mask |= CDC_RAM;
if (CDROM_CAN(CDC_MRW_W))
ret = cdrom_mrw_open_write(cdi);
else if (CDROM_CAN(CDC_DVD_RAM))
ret = cdrom_dvdram_open_write(cdi);
else if (CDROM_CAN(CDC_RAM) &&
!CDROM_CAN(CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_MRW|CDC_MO_DRIVE))
ret = cdrom_ram_open_write(cdi);
else if (CDROM_CAN(CDC_MO_DRIVE))
ret = mo_open_write(cdi);
else if (!cdrom_is_dvd_rw(cdi))
ret = 0;
return ret;
}
static void cdrom_dvd_rw_close_write(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
if (cdi->mmc3_profile != 0x1a) {
cdinfo(CD_CLOSE, "%s: No DVD+RW\n", cdi->name);
return;
}
if (!cdi->media_written) {
cdinfo(CD_CLOSE, "%s: DVD+RW media clean\n", cdi->name);
return;
}
pr_info("%s: dirty DVD+RW media, \"finalizing\"\n", cdi->name);
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_FLUSH_CACHE;
cgc.timeout = 30*HZ;
cdi->ops->generic_packet(cdi, &cgc);
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_CLOSE_TRACK;
cgc.timeout = 3000*HZ;
cgc.quiet = 1;
cdi->ops->generic_packet(cdi, &cgc);
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_CLOSE_TRACK;
cgc.cmd[2] = 2; /* Close session */
cgc.quiet = 1;
cgc.timeout = 3000*HZ;
cdi->ops->generic_packet(cdi, &cgc);
cdi->media_written = 0;
}
static int cdrom_close_write(struct cdrom_device_info *cdi)
{
#if 0
return cdrom_flush_cache(cdi);
#else
return 0;
#endif
}
/* We use the open-option O_NONBLOCK to indicate that the
* purpose of opening is only for subsequent ioctl() calls; no device
* integrity checks are performed.
*
* We hope that all cd-player programs will adopt this convention. It
* is in their own interest: device control becomes a lot easier
* this way.
*/
int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode)
{
int ret;
cdinfo(CD_OPEN, "entering cdrom_open\n");
/* open is event synchronization point, check events first */
check_disk_change(bdev);
/* if this was a O_NONBLOCK open and we should honor the flags,
* do a quick open without drive/disc integrity checks. */
cdi->use_count++;
if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) {
ret = cdi->ops->open(cdi, 1);
} else {
ret = open_for_data(cdi);
if (ret)
goto err;
cdrom_mmc3_profile(cdi);
if (mode & FMODE_WRITE) {
ret = -EROFS;
if (cdrom_open_write(cdi))
goto err_release;
if (!CDROM_CAN(CDC_RAM))
goto err_release;
ret = 0;
cdi->media_written = 0;
}
}
if (ret)
goto err;
cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n",
cdi->name, cdi->use_count);
return 0;
err_release:
if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
cdi->ops->lock_door(cdi, 0);
cdinfo(CD_OPEN, "door unlocked.\n");
}
cdi->ops->release(cdi);
err:
cdi->use_count--;
return ret;
}
static
int open_for_data(struct cdrom_device_info * cdi)
{
int ret;
struct cdrom_device_ops *cdo = cdi->ops;
tracktype tracks;
cdinfo(CD_OPEN, "entering open_for_data\n");
/* Check if the driver can report drive status. If it can, we
can do clever things. If it can't, well, we at least tried! */
if (cdo->drive_status != NULL) {
ret = cdo->drive_status(cdi, CDSL_CURRENT);
cdinfo(CD_OPEN, "drive_status=%d\n", ret);
if (ret == CDS_TRAY_OPEN) {
cdinfo(CD_OPEN, "the tray is open...\n");
/* can/may i close it? */
if (CDROM_CAN(CDC_CLOSE_TRAY) &&
cdi->options & CDO_AUTO_CLOSE) {
cdinfo(CD_OPEN, "trying to close the tray.\n");
ret=cdo->tray_move(cdi,0);
if (ret) {
cdinfo(CD_OPEN, "bummer. tried to close the tray but failed.\n");
/* Ignore the error from the low
level driver. We don't care why it
couldn't close the tray. We only care
that there is no disc in the drive,
since that is the _REAL_ problem here.*/
ret=-ENOMEDIUM;
goto clean_up_and_return;
}
} else {
cdinfo(CD_OPEN, "bummer. this drive can't close the tray.\n");
ret=-ENOMEDIUM;
goto clean_up_and_return;
}
/* Ok, the door should be closed now.. Check again */
ret = cdo->drive_status(cdi, CDSL_CURRENT);
if ((ret == CDS_NO_DISC) || (ret==CDS_TRAY_OPEN)) {
cdinfo(CD_OPEN, "bummer. the tray is still not closed.\n");
cdinfo(CD_OPEN, "tray might not contain a medium.\n");
ret=-ENOMEDIUM;
goto clean_up_and_return;
}
cdinfo(CD_OPEN, "the tray is now closed.\n");
}
/* the door should be closed now, check for the disc */
ret = cdo->drive_status(cdi, CDSL_CURRENT);
if (ret!=CDS_DISC_OK) {
ret = -ENOMEDIUM;
goto clean_up_and_return;
}
}
cdrom_count_tracks(cdi, &tracks);
if (tracks.error == CDS_NO_DISC) {
cdinfo(CD_OPEN, "bummer. no disc.\n");
ret=-ENOMEDIUM;
goto clean_up_and_return;
}
/* CD-Players which don't use O_NONBLOCK, workman
* for example, need bit CDO_CHECK_TYPE cleared! */
if (tracks.data==0) {
if (cdi->options & CDO_CHECK_TYPE) {
/* give people a warning shot, now that CDO_CHECK_TYPE
is the default case! */
cdinfo(CD_OPEN, "bummer. wrong media type.\n");
cdinfo(CD_WARNING, "pid %d must open device O_NONBLOCK!\n",
(unsigned int)task_pid_nr(current));
ret=-EMEDIUMTYPE;
goto clean_up_and_return;
}
else {
cdinfo(CD_OPEN, "wrong media type, but CDO_CHECK_TYPE not set.\n");
}
}
cdinfo(CD_OPEN, "all seems well, opening the device.\n");
/* all seems well, we can open the device */
ret = cdo->open(cdi, 0); /* open for data */
cdinfo(CD_OPEN, "opening the device gave me %d.\n", ret);
/* After all this careful checking, we shouldn't have problems
opening the device, but we don't want the device locked if
this somehow fails... */
if (ret) {
cdinfo(CD_OPEN, "open device failed.\n");
goto clean_up_and_return;
}
if (CDROM_CAN(CDC_LOCK) && (cdi->options & CDO_LOCK)) {
cdo->lock_door(cdi, 1);
cdinfo(CD_OPEN, "door locked.\n");
}
cdinfo(CD_OPEN, "device opened successfully.\n");
return ret;
/* Something failed. Try to unlock the drive, because some drivers
(notably ide-cd) lock the drive after every command. This produced
a nasty bug where after mount failed, the drive would remain locked!
This ensures that the drive gets unlocked after a mount fails. This
is a goto to avoid bloating the driver with redundant code. */
clean_up_and_return:
cdinfo(CD_OPEN, "open failed.\n");
if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
cdo->lock_door(cdi, 0);
cdinfo(CD_OPEN, "door unlocked.\n");
}
return ret;
}
/* This code is similar to that in open_for_data. The routine is called
whenever an audio play operation is requested.
*/
static int check_for_audio_disc(struct cdrom_device_info * cdi,
struct cdrom_device_ops * cdo)
{
int ret;
tracktype tracks;
cdinfo(CD_OPEN, "entering check_for_audio_disc\n");
if (!(cdi->options & CDO_CHECK_TYPE))
return 0;
if (cdo->drive_status != NULL) {
ret = cdo->drive_status(cdi, CDSL_CURRENT);
cdinfo(CD_OPEN, "drive_status=%d\n", ret);
if (ret == CDS_TRAY_OPEN) {
cdinfo(CD_OPEN, "the tray is open...\n");
/* can/may i close it? */
if (CDROM_CAN(CDC_CLOSE_TRAY) &&
cdi->options & CDO_AUTO_CLOSE) {
cdinfo(CD_OPEN, "trying to close the tray.\n");
ret=cdo->tray_move(cdi,0);
if (ret) {
cdinfo(CD_OPEN, "bummer. tried to close tray but failed.\n");
/* Ignore the error from the low
level driver. We don't care why it
couldn't close the tray. We only care
that there is no disc in the drive,
since that is the _REAL_ problem here.*/
return -ENOMEDIUM;
}
} else {
cdinfo(CD_OPEN, "bummer. this driver can't close the tray.\n");
return -ENOMEDIUM;
}
/* Ok, the door should be closed now.. Check again */
ret = cdo->drive_status(cdi, CDSL_CURRENT);
if ((ret == CDS_NO_DISC) || (ret==CDS_TRAY_OPEN)) {
cdinfo(CD_OPEN, "bummer. the tray is still not closed.\n");
return -ENOMEDIUM;
}
if (ret!=CDS_DISC_OK) {
cdinfo(CD_OPEN, "bummer. disc isn't ready.\n");
return -EIO;
}
cdinfo(CD_OPEN, "the tray is now closed.\n");
}
}
cdrom_count_tracks(cdi, &tracks);
if (tracks.error)
return(tracks.error);
if (tracks.audio==0)
return -EMEDIUMTYPE;
return 0;
}
void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode)
{
struct cdrom_device_ops *cdo = cdi->ops;
int opened_for_data;
cdinfo(CD_CLOSE, "entering cdrom_release\n");
if (cdi->use_count > 0)
cdi->use_count--;
if (cdi->use_count == 0) {
cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name);
cdrom_dvd_rw_close_write(cdi);
if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) {
cdinfo(CD_CLOSE, "Unlocking door!\n");
cdo->lock_door(cdi, 0);
}
}
opened_for_data = !(cdi->options & CDO_USE_FFLAGS) ||
!(mode & FMODE_NDELAY);
/*
* flush cache on last write release
*/
if (CDROM_CAN(CDC_RAM) && !cdi->use_count && cdi->for_data)
cdrom_close_write(cdi);
cdo->release(cdi);
if (cdi->use_count == 0) { /* last process that closes dev*/
if (opened_for_data &&
cdi->options & CDO_AUTO_EJECT && CDROM_CAN(CDC_OPEN_TRAY))
cdo->tray_move(cdi, 1);
}
}
static int cdrom_read_mech_status(struct cdrom_device_info *cdi,
struct cdrom_changer_info *buf)
{
struct packet_command cgc;
struct cdrom_device_ops *cdo = cdi->ops;
int length;
/*
* Sanyo changer isn't spec compliant (doesn't use regular change
* LOAD_UNLOAD command, and it doesn't implement the mech status
* command below
*/
if (cdi->sanyo_slot) {
buf->hdr.nslots = 3;
buf->hdr.curslot = cdi->sanyo_slot == 3 ? 0 : cdi->sanyo_slot;
for (length = 0; length < 3; length++) {
buf->slots[length].disc_present = 1;
buf->slots[length].change = 0;
}
return 0;
}
length = sizeof(struct cdrom_mechstat_header) +
cdi->capacity * sizeof(struct cdrom_slot);
init_cdrom_command(&cgc, buf, length, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_MECHANISM_STATUS;
cgc.cmd[8] = (length >> 8) & 0xff;
cgc.cmd[9] = length & 0xff;
return cdo->generic_packet(cdi, &cgc);
}
static int cdrom_slot_status(struct cdrom_device_info *cdi, int slot)
{
struct cdrom_changer_info *info;
int ret;
cdinfo(CD_CHANGER, "entering cdrom_slot_status()\n");
if (cdi->sanyo_slot)
return CDS_NO_INFO;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
if ((ret = cdrom_read_mech_status(cdi, info)))
goto out_free;
if (info->slots[slot].disc_present)
ret = CDS_DISC_OK;
else
ret = CDS_NO_DISC;
out_free:
kfree(info);
return ret;
}
/* Return the number of slots for an ATAPI/SCSI cdrom,
* return 1 if not a changer.
*/
int cdrom_number_of_slots(struct cdrom_device_info *cdi)
{
int status;
int nslots = 1;
struct cdrom_changer_info *info;
cdinfo(CD_CHANGER, "entering cdrom_number_of_slots()\n");
/* cdrom_read_mech_status requires a valid value for capacity: */
cdi->capacity = 0;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
if ((status = cdrom_read_mech_status(cdi, info)) == 0)
nslots = info->hdr.nslots;
kfree(info);
return nslots;
}
/* If SLOT < 0, unload the current slot. Otherwise, try to load SLOT. */
static int cdrom_load_unload(struct cdrom_device_info *cdi, int slot)
{
struct packet_command cgc;
cdinfo(CD_CHANGER, "entering cdrom_load_unload()\n");
if (cdi->sanyo_slot && slot < 0)
return 0;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_LOAD_UNLOAD;
cgc.cmd[4] = 2 + (slot >= 0);
cgc.cmd[8] = slot;
cgc.timeout = 60 * HZ;
/* The Sanyo 3 CD changer uses byte 7 of the
GPCMD_TEST_UNIT_READY to command to switch CDs instead of
using the GPCMD_LOAD_UNLOAD opcode. */
if (cdi->sanyo_slot && -1 < slot) {
cgc.cmd[0] = GPCMD_TEST_UNIT_READY;
cgc.cmd[7] = slot;
cgc.cmd[4] = cgc.cmd[8] = 0;
cdi->sanyo_slot = slot ? slot : 3;
}
return cdi->ops->generic_packet(cdi, &cgc);
}
static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
{
struct cdrom_changer_info *info;
int curslot;
int ret;
cdinfo(CD_CHANGER, "entering cdrom_select_disc()\n");
if (!CDROM_CAN(CDC_SELECT_DISC))
return -EDRIVE_CANT_DO_THIS;
if (cdi->ops->check_events)
cdi->ops->check_events(cdi, 0, slot);
else
cdi->ops->media_changed(cdi, slot);
if (slot == CDSL_NONE) {
/* set media changed bits, on both queues */
cdi->mc_flags = 0x3;
return cdrom_load_unload(cdi, -1);
}
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
if ((ret = cdrom_read_mech_status(cdi, info))) {
kfree(info);
return ret;
}
curslot = info->hdr.curslot;
kfree(info);
if (cdi->use_count > 1 || cdi->keeplocked) {
if (slot == CDSL_CURRENT) {
return curslot;
} else {
return -EBUSY;
}
}
/* Specifying CDSL_CURRENT will attempt to load the currnet slot,
which is useful if it had been previously unloaded.
Whether it can or not, it returns the current slot.
Similarly, if slot happens to be the current one, we still
try and load it. */
if (slot == CDSL_CURRENT)
slot = curslot;
/* set media changed bits on both queues */
cdi->mc_flags = 0x3;
if ((ret = cdrom_load_unload(cdi, slot)))
return ret;
return slot;
}
/*
* As cdrom implements an extra ioctl consumer for media changed
* event, it needs to buffer ->check_events() output, such that event
* is not lost for both the usual VFS and ioctl paths.
* cdi->{vfs|ioctl}_events are used to buffer pending events for each
* path.
*
* XXX: Locking is non-existent. cdi->ops->check_events() can be
* called in parallel and buffering fields are accessed without any
* exclusion. The original media_changed code had the same problem.
* It might be better to simply deprecate CDROM_MEDIA_CHANGED ioctl
* and remove this cruft altogether. It doesn't have much usefulness
* at this point.
*/
static void cdrom_update_events(struct cdrom_device_info *cdi,
unsigned int clearing)
{
unsigned int events;
events = cdi->ops->check_events(cdi, clearing, CDSL_CURRENT);
cdi->vfs_events |= events;
cdi->ioctl_events |= events;
}
unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
unsigned int clearing)
{
unsigned int events;
cdrom_update_events(cdi, clearing);
events = cdi->vfs_events;
cdi->vfs_events = 0;
return events;
}
EXPORT_SYMBOL(cdrom_check_events);
/* We want to make media_changed accessible to the user through an
* ioctl. The main problem now is that we must double-buffer the
* low-level implementation, to assure that the VFS and the user both
* see a medium change once.
*/
static
int media_changed(struct cdrom_device_info *cdi, int queue)
{
unsigned int mask = (1 << (queue & 1));
int ret = !!(cdi->mc_flags & mask);
bool changed;
if (!CDROM_CAN(CDC_MEDIA_CHANGED))
return ret;
/* changed since last call? */
if (cdi->ops->check_events) {
BUG_ON(!queue); /* shouldn't be called from VFS path */
cdrom_update_events(cdi, DISK_EVENT_MEDIA_CHANGE);
changed = cdi->ioctl_events & DISK_EVENT_MEDIA_CHANGE;
cdi->ioctl_events = 0;
} else
changed = cdi->ops->media_changed(cdi, CDSL_CURRENT);
if (changed) {
cdi->mc_flags = 0x3; /* set bit on both queues */
ret |= 1;
cdi->media_written = 0;
}
cdi->mc_flags &= ~mask; /* clear bit */
return ret;
}
int cdrom_media_changed(struct cdrom_device_info *cdi)
{
/* This talks to the VFS, which doesn't like errors - just 1 or 0.
* Returning "0" is always safe (media hasn't been changed). Do that
* if the low-level cdrom driver dosn't support media changed. */
if (cdi == NULL || cdi->ops->media_changed == NULL)
return 0;
if (!CDROM_CAN(CDC_MEDIA_CHANGED))
return 0;
return media_changed(cdi, 0);
}
/* badly broken, I know. Is due for a fixup anytime. */
static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype* tracks)
{
struct cdrom_tochdr header;
struct cdrom_tocentry entry;
int ret, i;
tracks->data=0;
tracks->audio=0;
tracks->cdi=0;
tracks->xa=0;
tracks->error=0;
cdinfo(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
/* Grab the TOC header so we can see how many tracks there are */
if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header))) {
if (ret == -ENOMEDIUM)
tracks->error = CDS_NO_DISC;
else
tracks->error = CDS_NO_INFO;
return;
}
/* check what type of tracks are on this disc */
entry.cdte_format = CDROM_MSF;
for (i = header.cdth_trk0; i <= header.cdth_trk1; i++) {
entry.cdte_track = i;
if (cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry)) {
tracks->error=CDS_NO_INFO;
return;
}
if (entry.cdte_ctrl & CDROM_DATA_TRACK) {
if (entry.cdte_format == 0x10)
tracks->cdi++;
else if (entry.cdte_format == 0x20)
tracks->xa++;
else
tracks->data++;
} else
tracks->audio++;
cdinfo(CD_COUNT_TRACKS, "track %d: format=%d, ctrl=%d\n",
i, entry.cdte_format, entry.cdte_ctrl);
}
cdinfo(CD_COUNT_TRACKS, "disc has %d tracks: %d=audio %d=data %d=Cd-I %d=XA\n",
header.cdth_trk1, tracks->audio, tracks->data,
tracks->cdi, tracks->xa);
}
/* Requests to the low-level drivers will /always/ be done in the
following format convention:
CDROM_LBA: all data-related requests.
CDROM_MSF: all audio-related requests.
However, a low-level implementation is allowed to refuse this
request, and return information in its own favorite format.
It doesn't make sense /at all/ to ask for a play_audio in LBA
format, or ask for multi-session info in MSF format. However, for
backward compatibility these format requests will be satisfied, but
the requests to the low-level drivers will be sanitized in the more
meaningful format indicated above.
*/
static
void sanitize_format(union cdrom_addr *addr,
u_char * curr, u_char requested)
{
if (*curr == requested)
return; /* nothing to be done! */
if (requested == CDROM_LBA) {
addr->lba = (int) addr->msf.frame +
75 * (addr->msf.second - 2 + 60 * addr->msf.minute);
} else { /* CDROM_MSF */
int lba = addr->lba;
addr->msf.frame = lba % 75;
lba /= 75;
lba += 2;
addr->msf.second = lba % 60;
addr->msf.minute = lba / 60;
}
*curr = requested;
}
void init_cdrom_command(struct packet_command *cgc, void *buf, int len,
int type)
{
memset(cgc, 0, sizeof(struct packet_command));
if (buf)
memset(buf, 0, len);
cgc->buffer = (char *) buf;
cgc->buflen = len;
cgc->data_direction = type;
cgc->timeout = CDROM_DEF_TIMEOUT;
}
/* DVD handling */
#define copy_key(dest,src) memcpy((dest), (src), sizeof(dvd_key))
#define copy_chal(dest,src) memcpy((dest), (src), sizeof(dvd_challenge))
static void setup_report_key(struct packet_command *cgc, unsigned agid, unsigned type)
{
cgc->cmd[0] = GPCMD_REPORT_KEY;
cgc->cmd[10] = type | (agid << 6);
switch (type) {
case 0: case 8: case 5: {
cgc->buflen = 8;
break;
}
case 1: {
cgc->buflen = 16;
break;
}
case 2: case 4: {
cgc->buflen = 12;
break;
}
}
cgc->cmd[9] = cgc->buflen;
cgc->data_direction = CGC_DATA_READ;
}
static void setup_send_key(struct packet_command *cgc, unsigned agid, unsigned type)
{
cgc->cmd[0] = GPCMD_SEND_KEY;
cgc->cmd[10] = type | (agid << 6);
switch (type) {
case 1: {
cgc->buflen = 16;
break;
}
case 3: {
cgc->buflen = 12;
break;
}
case 6: {
cgc->buflen = 8;
break;
}
}
cgc->cmd[9] = cgc->buflen;
cgc->data_direction = CGC_DATA_WRITE;
}
static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
{
int ret;
u_char buf[20];
struct packet_command cgc;
struct cdrom_device_ops *cdo = cdi->ops;
rpc_state_t rpc_state;
memset(buf, 0, sizeof(buf));
init_cdrom_command(&cgc, buf, 0, CGC_DATA_READ);
switch (ai->type) {
/* LU data send */
case DVD_LU_SEND_AGID:
cdinfo(CD_DVD, "entering DVD_LU_SEND_AGID\n");
cgc.quiet = 1;
setup_report_key(&cgc, ai->lsa.agid, 0);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
ai->lsa.agid = buf[7] >> 6;
/* Returning data, let host change state */
break;
case DVD_LU_SEND_KEY1:
cdinfo(CD_DVD, "entering DVD_LU_SEND_KEY1\n");
setup_report_key(&cgc, ai->lsk.agid, 2);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
copy_key(ai->lsk.key, &buf[4]);
/* Returning data, let host change state */
break;
case DVD_LU_SEND_CHALLENGE:
cdinfo(CD_DVD, "entering DVD_LU_SEND_CHALLENGE\n");
setup_report_key(&cgc, ai->lsc.agid, 1);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
copy_chal(ai->lsc.chal, &buf[4]);
/* Returning data, let host change state */
break;
/* Post-auth key */
case DVD_LU_SEND_TITLE_KEY:
cdinfo(CD_DVD, "entering DVD_LU_SEND_TITLE_KEY\n");
cgc.quiet = 1;
setup_report_key(&cgc, ai->lstk.agid, 4);
cgc.cmd[5] = ai->lstk.lba;
cgc.cmd[4] = ai->lstk.lba >> 8;
cgc.cmd[3] = ai->lstk.lba >> 16;
cgc.cmd[2] = ai->lstk.lba >> 24;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
ai->lstk.cpm = (buf[4] >> 7) & 1;
ai->lstk.cp_sec = (buf[4] >> 6) & 1;
ai->lstk.cgms = (buf[4] >> 4) & 3;
copy_key(ai->lstk.title_key, &buf[5]);
/* Returning data, let host change state */
break;
case DVD_LU_SEND_ASF:
cdinfo(CD_DVD, "entering DVD_LU_SEND_ASF\n");
setup_report_key(&cgc, ai->lsasf.agid, 5);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
ai->lsasf.asf = buf[7] & 1;
break;
/* LU data receive (LU changes state) */
case DVD_HOST_SEND_CHALLENGE:
cdinfo(CD_DVD, "entering DVD_HOST_SEND_CHALLENGE\n");
setup_send_key(&cgc, ai->hsc.agid, 1);
buf[1] = 0xe;
copy_chal(&buf[4], ai->hsc.chal);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
ai->type = DVD_LU_SEND_KEY1;
break;
case DVD_HOST_SEND_KEY2:
cdinfo(CD_DVD, "entering DVD_HOST_SEND_KEY2\n");
setup_send_key(&cgc, ai->hsk.agid, 3);
buf[1] = 0xa;
copy_key(&buf[4], ai->hsk.key);
if ((ret = cdo->generic_packet(cdi, &cgc))) {
ai->type = DVD_AUTH_FAILURE;
return ret;
}
ai->type = DVD_AUTH_ESTABLISHED;
break;
/* Misc */
case DVD_INVALIDATE_AGID:
cgc.quiet = 1;
cdinfo(CD_DVD, "entering DVD_INVALIDATE_AGID\n");
setup_report_key(&cgc, ai->lsa.agid, 0x3f);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
break;
/* Get region settings */
case DVD_LU_SEND_RPC_STATE:
cdinfo(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n");
setup_report_key(&cgc, 0, 8);
memset(&rpc_state, 0, sizeof(rpc_state_t));
cgc.buffer = (char *) &rpc_state;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
ai->lrpcs.type = rpc_state.type_code;
ai->lrpcs.vra = rpc_state.vra;
ai->lrpcs.ucca = rpc_state.ucca;
ai->lrpcs.region_mask = rpc_state.region_mask;
ai->lrpcs.rpc_scheme = rpc_state.rpc_scheme;
break;
/* Set region settings */
case DVD_HOST_SEND_RPC_STATE:
cdinfo(CD_DVD, "entering DVD_HOST_SEND_RPC_STATE\n");
setup_send_key(&cgc, 0, 6);
buf[1] = 6;
buf[4] = ai->hrpcs.pdrc;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
break;
default:
cdinfo(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type);
return -ENOTTY;
}
return 0;
}
static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s,
struct packet_command *cgc)
{
unsigned char buf[21], *base;
struct dvd_layer *layer;
struct cdrom_device_ops *cdo = cdi->ops;
int ret, layer_num = s->physical.layer_num;
if (layer_num >= DVD_LAYERS)
return -EINVAL;
init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
cgc->cmd[6] = layer_num;
cgc->cmd[7] = s->type;
cgc->cmd[9] = cgc->buflen & 0xff;
/*
* refrain from reporting errors on non-existing layers (mainly)
*/
cgc->quiet = 1;
ret = cdo->generic_packet(cdi, cgc);
if (ret)
return ret;
base = &buf[4];
layer = &s->physical.layer[layer_num];
/*
* place the data... really ugly, but at least we won't have to
* worry about endianess in userspace.
*/
memset(layer, 0, sizeof(*layer));
layer->book_version = base[0] & 0xf;
layer->book_type = base[0] >> 4;
layer->min_rate = base[1] & 0xf;
layer->disc_size = base[1] >> 4;
layer->layer_type = base[2] & 0xf;
layer->track_path = (base[2] >> 4) & 1;
layer->nlayers = (base[2] >> 5) & 3;
layer->track_density = base[3] & 0xf;
layer->linear_density = base[3] >> 4;
layer->start_sector = base[5] << 16 | base[6] << 8 | base[7];
layer->end_sector = base[9] << 16 | base[10] << 8 | base[11];
layer->end_sector_l0 = base[13] << 16 | base[14] << 8 | base[15];
layer->bca = base[16] >> 7;
return 0;
}
static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s,
struct packet_command *cgc)
{
int ret;
u_char buf[8];
struct cdrom_device_ops *cdo = cdi->ops;
init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
cgc->cmd[6] = s->copyright.layer_num;
cgc->cmd[7] = s->type;
cgc->cmd[8] = cgc->buflen >> 8;
cgc->cmd[9] = cgc->buflen & 0xff;
ret = cdo->generic_packet(cdi, cgc);
if (ret)
return ret;
s->copyright.cpst = buf[4];
s->copyright.rmi = buf[5];
return 0;
}
static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s,
struct packet_command *cgc)
{
int ret, size;
u_char *buf;
struct cdrom_device_ops *cdo = cdi->ops;
size = sizeof(s->disckey.value) + 4;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
cgc->cmd[7] = s->type;
cgc->cmd[8] = size >> 8;
cgc->cmd[9] = size & 0xff;
cgc->cmd[10] = s->disckey.agid << 6;
ret = cdo->generic_packet(cdi, cgc);
if (!ret)
memcpy(s->disckey.value, &buf[4], sizeof(s->disckey.value));
kfree(buf);
return ret;
}
static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s,
struct packet_command *cgc)
{
int ret, size = 4 + 188;
u_char *buf;
struct cdrom_device_ops *cdo = cdi->ops;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
cgc->cmd[7] = s->type;
cgc->cmd[9] = cgc->buflen & 0xff;
ret = cdo->generic_packet(cdi, cgc);
if (ret)
goto out;
s->bca.len = buf[0] << 8 | buf[1];
if (s->bca.len < 12 || s->bca.len > 188) {
cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len);
ret = -EIO;
goto out;
}
memcpy(s->bca.value, &buf[4], s->bca.len);
ret = 0;
out:
kfree(buf);
return ret;
}
static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
struct packet_command *cgc)
{
int ret = 0, size;
u_char *buf;
struct cdrom_device_ops *cdo = cdi->ops;
size = sizeof(s->manufact.value) + 4;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
cgc->cmd[7] = s->type;
cgc->cmd[8] = size >> 8;
cgc->cmd[9] = size & 0xff;
ret = cdo->generic_packet(cdi, cgc);
if (ret)
goto out;
s->manufact.len = buf[0] << 8 | buf[1];
if (s->manufact.len < 0) {
cdinfo(CD_WARNING, "Received invalid manufacture info length"
" (%d)\n", s->manufact.len);
ret = -EIO;
} else {
if (s->manufact.len > 2048) {
cdinfo(CD_WARNING, "Received invalid manufacture info "
"length (%d): truncating to 2048\n",
s->manufact.len);
s->manufact.len = 2048;
}
memcpy(s->manufact.value, &buf[4], s->manufact.len);
}
out:
kfree(buf);
return ret;
}
static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s,
struct packet_command *cgc)
{
switch (s->type) {
case DVD_STRUCT_PHYSICAL:
return dvd_read_physical(cdi, s, cgc);
case DVD_STRUCT_COPYRIGHT:
return dvd_read_copyright(cdi, s, cgc);
case DVD_STRUCT_DISCKEY:
return dvd_read_disckey(cdi, s, cgc);
case DVD_STRUCT_BCA:
return dvd_read_bca(cdi, s, cgc);
case DVD_STRUCT_MANUFACT:
return dvd_read_manufact(cdi, s, cgc);
default:
cdinfo(CD_WARNING, ": Invalid DVD structure read requested (%d)\n",
s->type);
return -EINVAL;
}
}
int cdrom_mode_sense(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int page_code, int page_control)
{
struct cdrom_device_ops *cdo = cdi->ops;
memset(cgc->cmd, 0, sizeof(cgc->cmd));
cgc->cmd[0] = GPCMD_MODE_SENSE_10;
cgc->cmd[2] = page_code | (page_control << 6);
cgc->cmd[7] = cgc->buflen >> 8;
cgc->cmd[8] = cgc->buflen & 0xff;
cgc->data_direction = CGC_DATA_READ;
return cdo->generic_packet(cdi, cgc);
}
int cdrom_mode_select(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
struct cdrom_device_ops *cdo = cdi->ops;
memset(cgc->cmd, 0, sizeof(cgc->cmd));
memset(cgc->buffer, 0, 2);
cgc->cmd[0] = GPCMD_MODE_SELECT_10;
cgc->cmd[1] = 0x10; /* PF */
cgc->cmd[7] = cgc->buflen >> 8;
cgc->cmd[8] = cgc->buflen & 0xff;
cgc->data_direction = CGC_DATA_WRITE;
return cdo->generic_packet(cdi, cgc);
}
static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
struct cdrom_subchnl *subchnl, int mcn)
{
struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
char buffer[32];
int ret;
init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_SUBCHANNEL;
cgc.cmd[1] = 2; /* MSF addressing */
cgc.cmd[2] = 0x40; /* request subQ data */
cgc.cmd[3] = mcn ? 2 : 1;
cgc.cmd[8] = 16;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
subchnl->cdsc_audiostatus = cgc.buffer[1];
subchnl->cdsc_format = CDROM_MSF;
subchnl->cdsc_ctrl = cgc.buffer[5] & 0xf;
subchnl->cdsc_trk = cgc.buffer[6];
subchnl->cdsc_ind = cgc.buffer[7];
subchnl->cdsc_reladdr.msf.minute = cgc.buffer[13];
subchnl->cdsc_reladdr.msf.second = cgc.buffer[14];
subchnl->cdsc_reladdr.msf.frame = cgc.buffer[15];
subchnl->cdsc_absaddr.msf.minute = cgc.buffer[9];
subchnl->cdsc_absaddr.msf.second = cgc.buffer[10];
subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11];
return 0;
}
/*
* Specific READ_10 interface
*/
static int cdrom_read_cd(struct cdrom_device_info *cdi,
struct packet_command *cgc, int lba,
int blocksize, int nblocks)
{
struct cdrom_device_ops *cdo = cdi->ops;
memset(&cgc->cmd, 0, sizeof(cgc->cmd));
cgc->cmd[0] = GPCMD_READ_10;
cgc->cmd[2] = (lba >> 24) & 0xff;
cgc->cmd[3] = (lba >> 16) & 0xff;
cgc->cmd[4] = (lba >> 8) & 0xff;
cgc->cmd[5] = lba & 0xff;
cgc->cmd[6] = (nblocks >> 16) & 0xff;
cgc->cmd[7] = (nblocks >> 8) & 0xff;
cgc->cmd[8] = nblocks & 0xff;
cgc->buflen = blocksize * nblocks;
return cdo->generic_packet(cdi, cgc);
}
/* very generic interface for reading the various types of blocks */
static int cdrom_read_block(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int lba, int nblocks, int format, int blksize)
{
struct cdrom_device_ops *cdo = cdi->ops;
memset(&cgc->cmd, 0, sizeof(cgc->cmd));
cgc->cmd[0] = GPCMD_READ_CD;
/* expected sector size - cdda,mode1,etc. */
cgc->cmd[1] = format << 2;
/* starting address */
cgc->cmd[2] = (lba >> 24) & 0xff;
cgc->cmd[3] = (lba >> 16) & 0xff;
cgc->cmd[4] = (lba >> 8) & 0xff;
cgc->cmd[5] = lba & 0xff;
/* number of blocks */
cgc->cmd[6] = (nblocks >> 16) & 0xff;
cgc->cmd[7] = (nblocks >> 8) & 0xff;
cgc->cmd[8] = nblocks & 0xff;
cgc->buflen = blksize * nblocks;
/* set the header info returned */
switch (blksize) {
case CD_FRAMESIZE_RAW0 : cgc->cmd[9] = 0x58; break;
case CD_FRAMESIZE_RAW1 : cgc->cmd[9] = 0x78; break;
case CD_FRAMESIZE_RAW : cgc->cmd[9] = 0xf8; break;
default : cgc->cmd[9] = 0x10;
}
return cdo->generic_packet(cdi, cgc);
}
static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
int lba, int nframes)
{
struct packet_command cgc;
int ret = 0;
int nr;
cdi->last_sense = 0;
memset(&cgc, 0, sizeof(cgc));
/*
* start with will ra.nframes size, back down if alloc fails
*/
nr = nframes;
do {
cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
if (cgc.buffer)
break;
nr >>= 1;
} while (nr);
if (!nr)
return -ENOMEM;
cgc.data_direction = CGC_DATA_READ;
while (nframes > 0) {
if (nr > nframes)
nr = nframes;
ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
if (ret)
break;
if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
ret = -EFAULT;
break;
}
ubuf += CD_FRAMESIZE_RAW * nr;
nframes -= nr;
lba += nr;
}
kfree(cgc.buffer);
return ret;
}
static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
int lba, int nframes)
{
struct request_queue *q = cdi->disk->queue;
struct request *rq;
struct bio *bio;
unsigned int len;
int nr, ret = 0;
if (!q)
return -ENXIO;
cdi->last_sense = 0;
while (nframes) {
nr = nframes;
if (cdi->cdda_method == CDDA_BPC_SINGLE)
nr = 1;
if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
len = nr * CD_FRAMESIZE_RAW;
rq = blk_get_request(q, READ, GFP_KERNEL);
if (!rq) {
ret = -ENOMEM;
break;
}
ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
if (ret) {
blk_put_request(rq);
break;
}
rq->cmd[0] = GPCMD_READ_CD;
rq->cmd[1] = 1 << 2;
rq->cmd[2] = (lba >> 24) & 0xff;
rq->cmd[3] = (lba >> 16) & 0xff;
rq->cmd[4] = (lba >> 8) & 0xff;
rq->cmd[5] = lba & 0xff;
rq->cmd[6] = (nr >> 16) & 0xff;
rq->cmd[7] = (nr >> 8) & 0xff;
rq->cmd[8] = nr & 0xff;
rq->cmd[9] = 0xf8;
rq->cmd_len = 12;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->timeout = 60 * HZ;
bio = rq->bio;
if (blk_execute_rq(q, cdi->disk, rq, 0)) {
struct request_sense *s = rq->sense;
ret = -EIO;
cdi->last_sense = s->sense_key;
}
if (blk_rq_unmap_user(bio))
ret = -EFAULT;
blk_put_request(rq);
if (ret)
break;
nframes -= nr;
lba += nr;
ubuf += len;
}
return ret;
}
static int cdrom_read_cdda(struct cdrom_device_info *cdi, __u8 __user *ubuf,
int lba, int nframes)
{
int ret;
if (cdi->cdda_method == CDDA_OLD)
return cdrom_read_cdda_old(cdi, ubuf, lba, nframes);
retry:
/*
* for anything else than success and io error, we need to retry
*/
ret = cdrom_read_cdda_bpc(cdi, ubuf, lba, nframes);
if (!ret || ret != -EIO)
return ret;
/*
* I've seen drives get sense 4/8/3 udma crc errors on multi
* frame dma, so drop to single frame dma if we need to
*/
if (cdi->cdda_method == CDDA_BPC_FULL && nframes > 1) {
pr_info("dropping to single frame dma\n");
cdi->cdda_method = CDDA_BPC_SINGLE;
goto retry;
}
/*
* so we have an io error of some sort with multi frame dma. if the
* condition wasn't a hardware error
* problems, not for any error
*/
if (cdi->last_sense != 0x04 && cdi->last_sense != 0x0b)
return ret;
pr_info("dropping to old style cdda (sense=%x)\n", cdi->last_sense);
cdi->cdda_method = CDDA_OLD;
return cdrom_read_cdda_old(cdi, ubuf, lba, nframes);
}
static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_multisession ms_info;
u8 requested_format;
int ret;
cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n");
if (!(cdi->ops->capability & CDC_MULTI_SESSION))
return -ENOSYS;
if (copy_from_user(&ms_info, argp, sizeof(ms_info)))
return -EFAULT;
requested_format = ms_info.addr_format;
if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
return -EINVAL;
ms_info.addr_format = CDROM_LBA;
ret = cdi->ops->get_last_session(cdi, &ms_info);
if (ret)
return ret;
sanitize_format(&ms_info.addr, &ms_info.addr_format, requested_format);
if (copy_to_user(argp, &ms_info, sizeof(ms_info)))
return -EFAULT;
cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n");
return 0;
}
static int cdrom_ioctl_eject(struct cdrom_device_info *cdi)
{
cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n");
if (!CDROM_CAN(CDC_OPEN_TRAY))
return -ENOSYS;
if (cdi->use_count != 1 || cdi->keeplocked)
return -EBUSY;
if (CDROM_CAN(CDC_LOCK)) {
int ret = cdi->ops->lock_door(cdi, 0);
if (ret)
return ret;
}
return cdi->ops->tray_move(cdi, 1);
}
static int cdrom_ioctl_closetray(struct cdrom_device_info *cdi)
{
cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n");
if (!CDROM_CAN(CDC_CLOSE_TRAY))
return -ENOSYS;
return cdi->ops->tray_move(cdi, 0);
}
static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n");
if (!CDROM_CAN(CDC_OPEN_TRAY))
return -ENOSYS;
if (cdi->keeplocked)
return -EBUSY;
cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT);
if (arg)
cdi->options |= CDO_AUTO_CLOSE | CDO_AUTO_EJECT;
return 0;
}
static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
unsigned long arg)
{
struct cdrom_changer_info *info;
int ret;
cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n");
if (!CDROM_CAN(CDC_MEDIA_CHANGED))
return -ENOSYS;
/* cannot select disc or select current disc */
if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
return media_changed(cdi, 1);
if ((unsigned int)arg >= cdi->capacity)
return -EINVAL;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
ret = cdrom_read_mech_status(cdi, info);
if (!ret)
ret = info->slots[arg].change;
kfree(info);
return ret;
}
static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n");
/*
* Options need to be in sync with capability.
* Too late for that, so we have to check each one separately.
*/
switch (arg) {
case CDO_USE_FFLAGS:
case CDO_CHECK_TYPE:
break;
case CDO_LOCK:
if (!CDROM_CAN(CDC_LOCK))
return -ENOSYS;
break;
case 0:
return cdi->options;
/* default is basically CDO_[AUTO_CLOSE|AUTO_EJECT] */
default:
if (!CDROM_CAN(arg))
return -ENOSYS;
}
cdi->options |= (int) arg;
return cdi->options;
}
static int cdrom_ioctl_clear_options(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n");
cdi->options &= ~(int) arg;
return cdi->options;
}
static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n");
if (!CDROM_CAN(CDC_SELECT_SPEED))
return -ENOSYS;
return cdi->ops->select_speed(cdi, arg);
}
static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n");
if (!CDROM_CAN(CDC_SELECT_DISC))
return -ENOSYS;
if (arg != CDSL_CURRENT && arg != CDSL_NONE) {
if ((int)arg >= cdi->capacity)
return -EINVAL;
}
/*
* ->select_disc is a hook to allow a driver-specific way of
* seleting disc. However, since there is no equivalent hook for
* cdrom_slot_status this may not actually be useful...
*/
if (cdi->ops->select_disc)
return cdi->ops->select_disc(cdi, arg);
cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n");
return cdrom_select_disc(cdi, arg);
}
static int cdrom_ioctl_reset(struct cdrom_device_info *cdi,
struct block_device *bdev)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n");
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!CDROM_CAN(CDC_RESET))
return -ENOSYS;
invalidate_bdev(bdev);
return cdi->ops->reset(cdi);
}
static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl");
if (!CDROM_CAN(CDC_LOCK))
return -EDRIVE_CANT_DO_THIS;
cdi->keeplocked = arg ? 1 : 0;
/*
* Don't unlock the door on multiple opens by default, but allow
* root to do so.
*/
if (cdi->use_count != 1 && !arg && !capable(CAP_SYS_ADMIN))
return -EBUSY;
return cdi->ops->lock_door(cdi, arg);
}
static int cdrom_ioctl_debug(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis");
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
debug = arg ? 1 : 0;
return debug;
}
static int cdrom_ioctl_get_capability(struct cdrom_device_info *cdi)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n");
return (cdi->ops->capability & ~cdi->mask);
}
/*
* The following function is implemented, although very few audio
* discs give Universal Product Code information, which should just be
* the Medium Catalog Number on the box. Note, that the way the code
* is written on the CD is /not/ uniform across all discs!
*/
static int cdrom_ioctl_get_mcn(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_mcn mcn;
int ret;
cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n");
if (!(cdi->ops->capability & CDC_MCN))
return -ENOSYS;
ret = cdi->ops->get_mcn(cdi, &mcn);
if (ret)
return ret;
if (copy_to_user(argp, &mcn, sizeof(mcn)))
return -EFAULT;
cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n");
return 0;
}
static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n");
if (!(cdi->ops->capability & CDC_DRIVE_STATUS))
return -ENOSYS;
if (!CDROM_CAN(CDC_SELECT_DISC) ||
(arg == CDSL_CURRENT || arg == CDSL_NONE))
return cdi->ops->drive_status(cdi, CDSL_CURRENT);
if (((int)arg >= cdi->capacity))
return -EINVAL;
return cdrom_slot_status(cdi, arg);
}
/*
* Ok, this is where problems start. The current interface for the
* CDROM_DISC_STATUS ioctl is flawed. It makes the false assumption that
* CDs are all CDS_DATA_1 or all CDS_AUDIO, etc. Unfortunately, while this
* is often the case, it is also very common for CDs to have some tracks
* with data, and some tracks with audio. Just because I feel like it,
* I declare the following to be the best way to cope. If the CD has ANY
* data tracks on it, it will be returned as a data CD. If it has any XA
* tracks, I will return it as that. Now I could simplify this interface
* by combining these returns with the above, but this more clearly
* demonstrates the problem with the current interface. Too bad this
* wasn't designed to use bitmasks... -Erik
*
* Well, now we have the option CDS_MIXED: a mixed-type CD.
* User level programmers might feel the ioctl is not very useful.
* ---david
*/
static int cdrom_ioctl_disc_status(struct cdrom_device_info *cdi)
{
tracktype tracks;
cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n");
cdrom_count_tracks(cdi, &tracks);
if (tracks.error)
return tracks.error;
/* Policy mode on */
if (tracks.audio > 0) {
if (!tracks.data && !tracks.cdi && !tracks.xa)
return CDS_AUDIO;
else
return CDS_MIXED;
}
if (tracks.cdi > 0)
return CDS_XA_2_2;
if (tracks.xa > 0)
return CDS_XA_2_1;
if (tracks.data > 0)
return CDS_DATA_1;
/* Policy mode off */
cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n");
return CDS_NO_INFO;
}
static int cdrom_ioctl_changer_nslots(struct cdrom_device_info *cdi)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n");
return cdi->capacity;
}
static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_subchnl q;
u8 requested, back;
int ret;
/* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/
if (copy_from_user(&q, argp, sizeof(q)))
return -EFAULT;
requested = q.cdsc_format;
if (requested != CDROM_MSF && requested != CDROM_LBA)
return -EINVAL;
q.cdsc_format = CDROM_MSF;
ret = cdi->ops->audio_ioctl(cdi, CDROMSUBCHNL, &q);
if (ret)
return ret;
back = q.cdsc_format; /* local copy */
sanitize_format(&q.cdsc_absaddr, &back, requested);
sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
if (copy_to_user(argp, &q, sizeof(q)))
return -EFAULT;
/* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
return 0;
}
static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_tochdr header;
int ret;
/* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */
if (copy_from_user(&header, argp, sizeof(header)))
return -EFAULT;
ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
if (ret)
return ret;
if (copy_to_user(argp, &header, sizeof(header)))
return -EFAULT;
/* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */
return 0;
}
static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_tocentry entry;
u8 requested_format;
int ret;
/* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */
if (copy_from_user(&entry, argp, sizeof(entry)))
return -EFAULT;
requested_format = entry.cdte_format;
if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
return -EINVAL;
/* make interface to low-level uniform */
entry.cdte_format = CDROM_MSF;
ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry);
if (ret)
return ret;
sanitize_format(&entry.cdte_addr, &entry.cdte_format, requested_format);
if (copy_to_user(argp, &entry, sizeof(entry)))
return -EFAULT;
/* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */
return 0;
}
static int cdrom_ioctl_play_msf(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_msf msf;
cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
if (copy_from_user(&msf, argp, sizeof(msf)))
return -EFAULT;
return cdi->ops->audio_ioctl(cdi, CDROMPLAYMSF, &msf);
}
static int cdrom_ioctl_play_trkind(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_ti ti;
int ret;
cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
if (copy_from_user(&ti, argp, sizeof(ti)))
return -EFAULT;
ret = check_for_audio_disc(cdi, cdi->ops);
if (ret)
return ret;
return cdi->ops->audio_ioctl(cdi, CDROMPLAYTRKIND, &ti);
}
static int cdrom_ioctl_volctrl(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_volctrl volume;
cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
if (copy_from_user(&volume, argp, sizeof(volume)))
return -EFAULT;
return cdi->ops->audio_ioctl(cdi, CDROMVOLCTRL, &volume);
}
static int cdrom_ioctl_volread(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_volctrl volume;
int ret;
cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
ret = cdi->ops->audio_ioctl(cdi, CDROMVOLREAD, &volume);
if (ret)
return ret;
if (copy_to_user(argp, &volume, sizeof(volume)))
return -EFAULT;
return 0;
}
static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi,
unsigned int cmd)
{
int ret;
cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
ret = check_for_audio_disc(cdi, cdi->ops);
if (ret)
return ret;
return cdi->ops->audio_ioctl(cdi, cmd, NULL);
}
/*
* Just about every imaginable ioctl is supported in the Uniform layer
* these days.
* ATAPI / SCSI specific code now mainly resides in mmc_ioctl().
*/
int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
fmode_t mode, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
int ret;
/*
* Try the generic SCSI command ioctl's first.
*/
ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
if (ret != -ENOTTY)
return ret;
switch (cmd) {
case CDROMMULTISESSION:
return cdrom_ioctl_multisession(cdi, argp);
case CDROMEJECT:
return cdrom_ioctl_eject(cdi);
case CDROMCLOSETRAY:
return cdrom_ioctl_closetray(cdi);
case CDROMEJECT_SW:
return cdrom_ioctl_eject_sw(cdi, arg);
case CDROM_MEDIA_CHANGED:
return cdrom_ioctl_media_changed(cdi, arg);
case CDROM_SET_OPTIONS:
return cdrom_ioctl_set_options(cdi, arg);
case CDROM_CLEAR_OPTIONS:
return cdrom_ioctl_clear_options(cdi, arg);
case CDROM_SELECT_SPEED:
return cdrom_ioctl_select_speed(cdi, arg);
case CDROM_SELECT_DISC:
return cdrom_ioctl_select_disc(cdi, arg);
case CDROMRESET:
return cdrom_ioctl_reset(cdi, bdev);
case CDROM_LOCKDOOR:
return cdrom_ioctl_lock_door(cdi, arg);
case CDROM_DEBUG:
return cdrom_ioctl_debug(cdi, arg);
case CDROM_GET_CAPABILITY:
return cdrom_ioctl_get_capability(cdi);
case CDROM_GET_MCN:
return cdrom_ioctl_get_mcn(cdi, argp);
case CDROM_DRIVE_STATUS:
return cdrom_ioctl_drive_status(cdi, arg);
case CDROM_DISC_STATUS:
return cdrom_ioctl_disc_status(cdi);
case CDROM_CHANGER_NSLOTS:
return cdrom_ioctl_changer_nslots(cdi);
}
/*
* Use the ioctls that are implemented through the generic_packet()
* interface. this may look at bit funny, but if -ENOTTY is
* returned that particular ioctl is not implemented and we
* let it go through the device specific ones.
*/
if (CDROM_CAN(CDC_GENERIC_PACKET)) {
ret = mmc_ioctl(cdi, cmd, arg);
if (ret != -ENOTTY)
return ret;
}
/*
* Note: most of the cdinfo() calls are commented out here,
* because they fill up the sys log when CD players poll
* the drive.
*/
switch (cmd) {
case CDROMSUBCHNL:
return cdrom_ioctl_get_subchnl(cdi, argp);
case CDROMREADTOCHDR:
return cdrom_ioctl_read_tochdr(cdi, argp);
case CDROMREADTOCENTRY:
return cdrom_ioctl_read_tocentry(cdi, argp);
case CDROMPLAYMSF:
return cdrom_ioctl_play_msf(cdi, argp);
case CDROMPLAYTRKIND:
return cdrom_ioctl_play_trkind(cdi, argp);
case CDROMVOLCTRL:
return cdrom_ioctl_volctrl(cdi, argp);
case CDROMVOLREAD:
return cdrom_ioctl_volread(cdi, argp);
case CDROMSTART:
case CDROMSTOP:
case CDROMPAUSE:
case CDROMRESUME:
return cdrom_ioctl_audioctl(cdi, cmd);
}
return -ENOSYS;
}
/*
* Required when we need to use READ_10 to issue other than 2048 block
* reads
*/
static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size)
{
struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
struct modesel_head mh;
memset(&mh, 0, sizeof(mh));
mh.block_desc_length = 0x08;
mh.block_length_med = (size >> 8) & 0xff;
mh.block_length_lo = size & 0xff;
memset(&cgc, 0, sizeof(cgc));
cgc.cmd[0] = 0x15;
cgc.cmd[1] = 1 << 4;
cgc.cmd[4] = 12;
cgc.buflen = sizeof(mh);
cgc.buffer = (char *) &mh;
cgc.data_direction = CGC_DATA_WRITE;
mh.block_desc_length = 0x08;
mh.block_length_med = (size >> 8) & 0xff;
mh.block_length_lo = size & 0xff;
return cdo->generic_packet(cdi, &cgc);
}
static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc,
int cmd)
{
struct request_sense sense;
struct cdrom_msf msf;
int blocksize = 0, format = 0, lba;
int ret;
switch (cmd) {
case CDROMREADRAW:
blocksize = CD_FRAMESIZE_RAW;
break;
case CDROMREADMODE1:
blocksize = CD_FRAMESIZE;
format = 2;
break;
case CDROMREADMODE2:
blocksize = CD_FRAMESIZE_RAW0;
break;
}
IOCTL_IN(arg, struct cdrom_msf, msf);
lba = msf_to_lba(msf.cdmsf_min0, msf.cdmsf_sec0, msf.cdmsf_frame0);
/* FIXME: we need upper bound checking, too!! */
if (lba < 0)
return -EINVAL;
cgc->buffer = kzalloc(blocksize, GFP_KERNEL);
if (cgc->buffer == NULL)
return -ENOMEM;
memset(&sense, 0, sizeof(sense));
cgc->sense = &sense;
cgc->data_direction = CGC_DATA_READ;
ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize);
if (ret && sense.sense_key == 0x05 &&
sense.asc == 0x20 &&
sense.ascq == 0x00) {
/*
* SCSI-II devices are not required to support
* READ_CD, so let's try switching block size
*/
/* FIXME: switch back again... */
ret = cdrom_switch_blocksize(cdi, blocksize);
if (ret)
goto out;
cgc->sense = NULL;
ret = cdrom_read_cd(cdi, cgc, lba, blocksize, 1);
ret |= cdrom_switch_blocksize(cdi, blocksize);
}
if (!ret && copy_to_user(arg, cgc->buffer, blocksize))
ret = -EFAULT;
out:
kfree(cgc->buffer);
return ret;
}
static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi,
void __user *arg)
{
struct cdrom_read_audio ra;
int lba;
IOCTL_IN(arg, struct cdrom_read_audio, ra);
if (ra.addr_format == CDROM_MSF)
lba = msf_to_lba(ra.addr.msf.minute,
ra.addr.msf.second,
ra.addr.msf.frame);
else if (ra.addr_format == CDROM_LBA)
lba = ra.addr.lba;
else
return -EINVAL;
/* FIXME: we need upper bound checking, too!! */
if (lba < 0 || ra.nframes <= 0 || ra.nframes > CD_FRAMES)
return -EINVAL;
return cdrom_read_cdda(cdi, ra.buf, lba, ra.nframes);
}
static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi,
void __user *arg)
{
int ret;
struct cdrom_subchnl q;
u_char requested, back;
IOCTL_IN(arg, struct cdrom_subchnl, q);
requested = q.cdsc_format;
if (!((requested == CDROM_MSF) ||
(requested == CDROM_LBA)))
return -EINVAL;
q.cdsc_format = CDROM_MSF;
ret = cdrom_read_subchannel(cdi, &q, 0);
if (ret)
return ret;
back = q.cdsc_format; /* local copy */
sanitize_format(&q.cdsc_absaddr, &back, requested);
sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
IOCTL_OUT(arg, struct cdrom_subchnl, q);
/* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
return 0;
}
static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc)
{
struct cdrom_device_ops *cdo = cdi->ops;
struct cdrom_msf msf;
cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
IOCTL_IN(arg, struct cdrom_msf, msf);
cgc->cmd[0] = GPCMD_PLAY_AUDIO_MSF;
cgc->cmd[3] = msf.cdmsf_min0;
cgc->cmd[4] = msf.cdmsf_sec0;
cgc->cmd[5] = msf.cdmsf_frame0;
cgc->cmd[6] = msf.cdmsf_min1;
cgc->cmd[7] = msf.cdmsf_sec1;
cgc->cmd[8] = msf.cdmsf_frame1;
cgc->data_direction = CGC_DATA_NONE;
return cdo->generic_packet(cdi, cgc);
}
static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc)
{
struct cdrom_device_ops *cdo = cdi->ops;
struct cdrom_blk blk;
cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
IOCTL_IN(arg, struct cdrom_blk, blk);
cgc->cmd[0] = GPCMD_PLAY_AUDIO_10;
cgc->cmd[2] = (blk.from >> 24) & 0xff;
cgc->cmd[3] = (blk.from >> 16) & 0xff;
cgc->cmd[4] = (blk.from >> 8) & 0xff;
cgc->cmd[5] = blk.from & 0xff;
cgc->cmd[7] = (blk.len >> 8) & 0xff;
cgc->cmd[8] = blk.len & 0xff;
cgc->data_direction = CGC_DATA_NONE;
return cdo->generic_packet(cdi, cgc);
}
static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc,
unsigned int cmd)
{
struct cdrom_volctrl volctrl;
unsigned char buffer[32];
char mask[sizeof(buffer)];
unsigned short offset;
int ret;
cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n");
IOCTL_IN(arg, struct cdrom_volctrl, volctrl);
cgc->buffer = buffer;
cgc->buflen = 24;
ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 0);
if (ret)
return ret;
/* originally the code depended on buffer[1] to determine
how much data is available for transfer. buffer[1] is
unfortunately ambigious and the only reliable way seem
to be to simply skip over the block descriptor... */
offset = 8 + be16_to_cpu(*(__be16 *)(buffer + 6));
if (offset + 16 > sizeof(buffer))
return -E2BIG;
if (offset + 16 > cgc->buflen) {
cgc->buflen = offset + 16;
ret = cdrom_mode_sense(cdi, cgc,
GPMODE_AUDIO_CTL_PAGE, 0);
if (ret)
return ret;
}
/* sanity check */
if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE ||
buffer[offset + 1] < 14)
return -EINVAL;
/* now we have the current volume settings. if it was only
a CDROMVOLREAD, return these values */
if (cmd == CDROMVOLREAD) {
volctrl.channel0 = buffer[offset+9];
volctrl.channel1 = buffer[offset+11];
volctrl.channel2 = buffer[offset+13];
volctrl.channel3 = buffer[offset+15];
IOCTL_OUT(arg, struct cdrom_volctrl, volctrl);
return 0;
}
/* get the volume mask */
cgc->buffer = mask;
ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 1);
if (ret)
return ret;
buffer[offset + 9] = volctrl.channel0 & mask[offset + 9];
buffer[offset + 11] = volctrl.channel1 & mask[offset + 11];
buffer[offset + 13] = volctrl.channel2 & mask[offset + 13];
buffer[offset + 15] = volctrl.channel3 & mask[offset + 15];
/* set volume */
cgc->buffer = buffer + offset - 8;
memset(cgc->buffer, 0, 8);
return cdrom_mode_select(cdi, cgc);
}
static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int cmd)
{
struct cdrom_device_ops *cdo = cdi->ops;
cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n");
cgc->cmd[0] = GPCMD_START_STOP_UNIT;
cgc->cmd[1] = 1;
cgc->cmd[4] = (cmd == CDROMSTART) ? 1 : 0;
cgc->data_direction = CGC_DATA_NONE;
return cdo->generic_packet(cdi, cgc);
}
static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int cmd)
{
struct cdrom_device_ops *cdo = cdi->ops;
cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n");
cgc->cmd[0] = GPCMD_PAUSE_RESUME;
cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0;
cgc->data_direction = CGC_DATA_NONE;
return cdo->generic_packet(cdi, cgc);
}
static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc)
{
int ret;
dvd_struct *s;
int size = sizeof(dvd_struct);
if (!CDROM_CAN(CDC_DVD))
return -ENOSYS;
s = kmalloc(size, GFP_KERNEL);
if (!s)
return -ENOMEM;
cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n");
if (copy_from_user(s, arg, size)) {
kfree(s);
return -EFAULT;
}
ret = dvd_read_struct(cdi, s, cgc);
if (ret)
goto out;
if (copy_to_user(arg, s, size))
ret = -EFAULT;
out:
kfree(s);
return ret;
}
static noinline int mmc_ioctl_dvd_auth(struct cdrom_device_info *cdi,
void __user *arg)
{
int ret;
dvd_authinfo ai;
if (!CDROM_CAN(CDC_DVD))
return -ENOSYS;
cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n");
IOCTL_IN(arg, dvd_authinfo, ai);
ret = dvd_do_auth(cdi, &ai);
if (ret)
return ret;
IOCTL_OUT(arg, dvd_authinfo, ai);
return 0;
}
static noinline int mmc_ioctl_cdrom_next_writable(struct cdrom_device_info *cdi,
void __user *arg)
{
int ret;
long next = 0;
cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n");
ret = cdrom_get_next_writable(cdi, &next);
if (ret)
return ret;
IOCTL_OUT(arg, long, next);
return 0;
}
static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi,
void __user *arg)
{
int ret;
long last = 0;
cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n");
ret = cdrom_get_last_written(cdi, &last);
if (ret)
return ret;
IOCTL_OUT(arg, long, last);
return 0;
}
static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
unsigned long arg)
{
struct packet_command cgc;
void __user *userptr = (void __user *)arg;
memset(&cgc, 0, sizeof(cgc));
/* build a unified command and queue it through
cdo->generic_packet() */
switch (cmd) {
case CDROMREADRAW:
case CDROMREADMODE1:
case CDROMREADMODE2:
return mmc_ioctl_cdrom_read_data(cdi, userptr, &cgc, cmd);
case CDROMREADAUDIO:
return mmc_ioctl_cdrom_read_audio(cdi, userptr);
case CDROMSUBCHNL:
return mmc_ioctl_cdrom_subchannel(cdi, userptr);
case CDROMPLAYMSF:
return mmc_ioctl_cdrom_play_msf(cdi, userptr, &cgc);
case CDROMPLAYBLK:
return mmc_ioctl_cdrom_play_blk(cdi, userptr, &cgc);
case CDROMVOLCTRL:
case CDROMVOLREAD:
return mmc_ioctl_cdrom_volume(cdi, userptr, &cgc, cmd);
case CDROMSTART:
case CDROMSTOP:
return mmc_ioctl_cdrom_start_stop(cdi, &cgc, cmd);
case CDROMPAUSE:
case CDROMRESUME:
return mmc_ioctl_cdrom_pause_resume(cdi, &cgc, cmd);
case DVD_READ_STRUCT:
return mmc_ioctl_dvd_read_struct(cdi, userptr, &cgc);
case DVD_AUTH:
return mmc_ioctl_dvd_auth(cdi, userptr);
case CDROM_NEXT_WRITABLE:
return mmc_ioctl_cdrom_next_writable(cdi, userptr);
case CDROM_LAST_WRITTEN:
return mmc_ioctl_cdrom_last_written(cdi, userptr);
}
return -ENOTTY;
}
static int cdrom_get_track_info(struct cdrom_device_info *cdi, __u16 track, __u8 type,
track_information *ti)
{
struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
int ret, buflen;
init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
cgc.cmd[1] = type & 3;
cgc.cmd[4] = (track & 0xff00) >> 8;
cgc.cmd[5] = track & 0xff;
cgc.cmd[8] = 8;
cgc.quiet = 1;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
buflen = be16_to_cpu(ti->track_information_length) +
sizeof(ti->track_information_length);
if (buflen > sizeof(track_information))
buflen = sizeof(track_information);
cgc.cmd[8] = cgc.buflen = buflen;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
/* return actual fill size */
return buflen;
}
/* requires CD R/RW */
static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di)
{
struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
int ret, buflen;
/* set up command and get the disc info */
init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_DISC_INFO;
cgc.cmd[8] = cgc.buflen = 2;
cgc.quiet = 1;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
/* not all drives have the same disc_info length, so requeue
* packet with the length the drive tells us it can supply
*/
buflen = be16_to_cpu(di->disc_information_length) +
sizeof(di->disc_information_length);
if (buflen > sizeof(disc_information))
buflen = sizeof(disc_information);
cgc.cmd[8] = cgc.buflen = buflen;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
/* return actual fill size */
return buflen;
}
/* return the last written block on the CD-R media. this is for the udf
file system. */
int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written)
{
struct cdrom_tocentry toc;
disc_information di;
track_information ti;
__u32 last_track;
int ret = -1, ti_size;
if (!CDROM_CAN(CDC_GENERIC_PACKET))
goto use_toc;
ret = cdrom_get_disc_info(cdi, &di);
if (ret < (int)(offsetof(typeof(di), last_track_lsb)
+ sizeof(di.last_track_lsb)))
goto use_toc;
/* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */
last_track = (di.last_track_msb << 8) | di.last_track_lsb;
ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
if (ti_size < (int)offsetof(typeof(ti), track_start))
goto use_toc;
/* if this track is blank, try the previous. */
if (ti.blank) {
if (last_track==1)
goto use_toc;
last_track--;
ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
}
if (ti_size < (int)(offsetof(typeof(ti), track_size)
+ sizeof(ti.track_size)))
goto use_toc;
/* if last recorded field is valid, return it. */
if (ti.lra_v && ti_size >= (int)(offsetof(typeof(ti), last_rec_address)
+ sizeof(ti.last_rec_address))) {
*last_written = be32_to_cpu(ti.last_rec_address);
} else {
/* make it up instead */
*last_written = be32_to_cpu(ti.track_start) +
be32_to_cpu(ti.track_size);
if (ti.free_blocks)
*last_written -= (be32_to_cpu(ti.free_blocks) + 7);
}
return 0;
/* this is where we end up if the drive either can't do a
GPCMD_READ_DISC_INFO or GPCMD_READ_TRACK_RZONE_INFO or if
it doesn't give enough information or fails. then we return
the toc contents. */
use_toc:
toc.cdte_format = CDROM_MSF;
toc.cdte_track = CDROM_LEADOUT;
if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc)))
return ret;
sanitize_format(&toc.cdte_addr, &toc.cdte_format, CDROM_LBA);
*last_written = toc.cdte_addr.lba;
return 0;
}
/* return the next writable block. also for udf file system. */
static int cdrom_get_next_writable(struct cdrom_device_info *cdi, long *next_writable)
{
disc_information di;
track_information ti;
__u16 last_track;
int ret, ti_size;
if (!CDROM_CAN(CDC_GENERIC_PACKET))
goto use_last_written;
ret = cdrom_get_disc_info(cdi, &di);
if (ret < 0 || ret < offsetof(typeof(di), last_track_lsb)
+ sizeof(di.last_track_lsb))
goto use_last_written;
/* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */
last_track = (di.last_track_msb << 8) | di.last_track_lsb;
ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
if (ti_size < 0 || ti_size < offsetof(typeof(ti), track_start))
goto use_last_written;
/* if this track is blank, try the previous. */
if (ti.blank) {
if (last_track == 1)
goto use_last_written;
last_track--;
ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
if (ti_size < 0)
goto use_last_written;
}
/* if next recordable address field is valid, use it. */
if (ti.nwa_v && ti_size >= offsetof(typeof(ti), next_writable)
+ sizeof(ti.next_writable)) {
*next_writable = be32_to_cpu(ti.next_writable);
return 0;
}
use_last_written:
if ((ret = cdrom_get_last_written(cdi, next_writable))) {
*next_writable = 0;
return ret;
} else {
*next_writable += 7;
return 0;
}
}
EXPORT_SYMBOL(cdrom_get_last_written);
EXPORT_SYMBOL(register_cdrom);
EXPORT_SYMBOL(unregister_cdrom);
EXPORT_SYMBOL(cdrom_open);
EXPORT_SYMBOL(cdrom_release);
EXPORT_SYMBOL(cdrom_ioctl);
EXPORT_SYMBOL(cdrom_media_changed);
EXPORT_SYMBOL(cdrom_number_of_slots);
EXPORT_SYMBOL(cdrom_mode_select);
EXPORT_SYMBOL(cdrom_mode_sense);
EXPORT_SYMBOL(init_cdrom_command);
EXPORT_SYMBOL(cdrom_get_media_event);
#ifdef CONFIG_SYSCTL
#define CDROM_STR_SIZE 1000
static struct cdrom_sysctl_settings {
char info[CDROM_STR_SIZE]; /* general info */
int autoclose; /* close tray upon mount, etc */
int autoeject; /* eject on umount */
int debug; /* turn on debugging messages */
int lock; /* lock the door on device open */
int check; /* check media type */
} cdrom_sysctl_settings;
enum cdrom_print_option {
CTL_NAME,
CTL_SPEED,
CTL_SLOTS,
CTL_CAPABILITY
};
static int cdrom_print_info(const char *header, int val, char *info,
int *pos, enum cdrom_print_option option)
{
const int max_size = sizeof(cdrom_sysctl_settings.info);
struct cdrom_device_info *cdi;
int ret;
ret = scnprintf(info + *pos, max_size - *pos, header);
if (!ret)
return 1;
*pos += ret;
list_for_each_entry(cdi, &cdrom_list, list) {
switch (option) {
case CTL_NAME:
ret = scnprintf(info + *pos, max_size - *pos,
"\t%s", cdi->name);
break;
case CTL_SPEED:
ret = scnprintf(info + *pos, max_size - *pos,
"\t%d", cdi->speed);
break;
case CTL_SLOTS:
ret = scnprintf(info + *pos, max_size - *pos,
"\t%d", cdi->capacity);
break;
case CTL_CAPABILITY:
ret = scnprintf(info + *pos, max_size - *pos,
"\t%d", CDROM_CAN(val) != 0);
break;
default:
pr_info("invalid option%d\n", option);
return 1;
}
if (!ret)
return 1;
*pos += ret;
}
return 0;
}
static int cdrom_sysctl_info(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int pos;
char *info = cdrom_sysctl_settings.info;
const int max_size = sizeof(cdrom_sysctl_settings.info);
if (!*lenp || (*ppos && !write)) {
*lenp = 0;
return 0;
}
mutex_lock(&cdrom_mutex);
pos = sprintf(info, "CD-ROM information, " VERSION "\n");
if (cdrom_print_info("\ndrive name:\t", 0, info, &pos, CTL_NAME))
goto done;
if (cdrom_print_info("\ndrive speed:\t", 0, info, &pos, CTL_SPEED))
goto done;
if (cdrom_print_info("\ndrive # of slots:", 0, info, &pos, CTL_SLOTS))
goto done;
if (cdrom_print_info("\nCan close tray:\t",
CDC_CLOSE_TRAY, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan open tray:\t",
CDC_OPEN_TRAY, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan lock tray:\t",
CDC_LOCK, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan change speed:",
CDC_SELECT_SPEED, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan select disk:",
CDC_SELECT_DISC, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan read multisession:",
CDC_MULTI_SESSION, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan read MCN:\t",
CDC_MCN, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nReports media changed:",
CDC_MEDIA_CHANGED, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan play audio:\t",
CDC_PLAY_AUDIO, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan write CD-R:\t",
CDC_CD_R, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan write CD-RW:",
CDC_CD_RW, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan read DVD:\t",
CDC_DVD, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan write DVD-R:",
CDC_DVD_R, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan write DVD-RAM:",
CDC_DVD_RAM, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan read MRW:\t",
CDC_MRW, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan write MRW:\t",
CDC_MRW_W, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan write RAM:\t",
CDC_RAM, info, &pos, CTL_CAPABILITY))
goto done;
if (!scnprintf(info + pos, max_size - pos, "\n\n"))
goto done;
doit:
mutex_unlock(&cdrom_mutex);
return proc_dostring(ctl, write, buffer, lenp, ppos);
done:
pr_info("info buffer too small\n");
goto doit;
}
/* Unfortunately, per device settings are not implemented through
procfs/sysctl yet. When they are, this will naturally disappear. For now
just update all drives. Later this will become the template on which
new registered drives will be based. */
static void cdrom_update_settings(void)
{
struct cdrom_device_info *cdi;
mutex_lock(&cdrom_mutex);
list_for_each_entry(cdi, &cdrom_list, list) {
if (autoclose && CDROM_CAN(CDC_CLOSE_TRAY))
cdi->options |= CDO_AUTO_CLOSE;
else if (!autoclose)
cdi->options &= ~CDO_AUTO_CLOSE;
if (autoeject && CDROM_CAN(CDC_OPEN_TRAY))
cdi->options |= CDO_AUTO_EJECT;
else if (!autoeject)
cdi->options &= ~CDO_AUTO_EJECT;
if (lockdoor && CDROM_CAN(CDC_LOCK))
cdi->options |= CDO_LOCK;
else if (!lockdoor)
cdi->options &= ~CDO_LOCK;
if (check_media_type)
cdi->options |= CDO_CHECK_TYPE;
else
cdi->options &= ~CDO_CHECK_TYPE;
}
mutex_unlock(&cdrom_mutex);
}
static int cdrom_sysctl_handler(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write) {
/* we only care for 1 or 0. */
autoclose = !!cdrom_sysctl_settings.autoclose;
autoeject = !!cdrom_sysctl_settings.autoeject;
debug = !!cdrom_sysctl_settings.debug;
lockdoor = !!cdrom_sysctl_settings.lock;
check_media_type = !!cdrom_sysctl_settings.check;
/* update the option flags according to the changes. we
don't have per device options through sysctl yet,
but we will have and then this will disappear. */
cdrom_update_settings();
}
return ret;
}
/* Place files in /proc/sys/dev/cdrom */
static ctl_table cdrom_table[] = {
{
.procname = "info",
.data = &cdrom_sysctl_settings.info,
.maxlen = CDROM_STR_SIZE,
.mode = 0444,
.proc_handler = cdrom_sysctl_info,
},
{
.procname = "autoclose",
.data = &cdrom_sysctl_settings.autoclose,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = cdrom_sysctl_handler,
},
{
.procname = "autoeject",
.data = &cdrom_sysctl_settings.autoeject,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = cdrom_sysctl_handler,
},
{
.procname = "debug",
.data = &cdrom_sysctl_settings.debug,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = cdrom_sysctl_handler,
},
{
.procname = "lock",
.data = &cdrom_sysctl_settings.lock,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = cdrom_sysctl_handler,
},
{
.procname = "check_media",
.data = &cdrom_sysctl_settings.check,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = cdrom_sysctl_handler
},
{ }
};
static ctl_table cdrom_cdrom_table[] = {
{
.procname = "cdrom",
.maxlen = 0,
.mode = 0555,
.child = cdrom_table,
},
{ }
};
/* Make sure that /proc/sys/dev is there */
static ctl_table cdrom_root_table[] = {
{
.procname = "dev",
.maxlen = 0,
.mode = 0555,
.child = cdrom_cdrom_table,
},
{ }
};
static struct ctl_table_header *cdrom_sysctl_header;
static void cdrom_sysctl_register(void)
{
static int initialized;
if (initialized == 1)
return;
cdrom_sysctl_header = register_sysctl_table(cdrom_root_table);
/* set the defaults */
cdrom_sysctl_settings.autoclose = autoclose;
cdrom_sysctl_settings.autoeject = autoeject;
cdrom_sysctl_settings.debug = debug;
cdrom_sysctl_settings.lock = lockdoor;
cdrom_sysctl_settings.check = check_media_type;
initialized = 1;
}
static void cdrom_sysctl_unregister(void)
{
if (cdrom_sysctl_header)
unregister_sysctl_table(cdrom_sysctl_header);
}
#else /* CONFIG_SYSCTL */
static void cdrom_sysctl_register(void)
{
}
static void cdrom_sysctl_unregister(void)
{
}
#endif /* CONFIG_SYSCTL */
static int __init cdrom_init(void)
{
cdrom_sysctl_register();
return 0;
}
static void __exit cdrom_exit(void)
{
pr_info("Uniform CD-ROM driver unloaded\n");
cdrom_sysctl_unregister();
}
module_init(cdrom_init);
module_exit(cdrom_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Fevax/kernel_samsung_exynos5422 | drivers/cdrom/cdrom.c | 3495 | 100490 | /* linux/drivers/cdrom/cdrom.c
Copyright (c) 1996, 1997 David A. van Leeuwen.
Copyright (c) 1997, 1998 Erik Andersen <andersee@debian.org>
Copyright (c) 1998, 1999 Jens Axboe <axboe@image.dk>
May be copied or modified under the terms of the GNU General Public
License. See linux/COPYING for more information.
Uniform CD-ROM driver for Linux.
See Documentation/cdrom/cdrom-standard.tex for usage information.
The routines in the file provide a uniform interface between the
software that uses CD-ROMs and the various low-level drivers that
actually talk to the hardware. Suggestions are welcome.
Patches that work are more welcome though. ;-)
To Do List:
----------------------------------
-- Modify sysctl/proc interface. I plan on having one directory per
drive, with entries for outputing general drive information, and sysctl
based tunable parameters such as whether the tray should auto-close for
that drive. Suggestions (or patches) for this welcome!
Revision History
----------------------------------
1.00 Date Unknown -- David van Leeuwen <david@tm.tno.nl>
-- Initial version by David A. van Leeuwen. I don't have a detailed
changelog for the 1.x series, David?
2.00 Dec 2, 1997 -- Erik Andersen <andersee@debian.org>
-- New maintainer! As David A. van Leeuwen has been too busy to actively
maintain and improve this driver, I am now carrying on the torch. If
you have a problem with this driver, please feel free to contact me.
-- Added (rudimentary) sysctl interface. I realize this is really weak
right now, and is _very_ badly implemented. It will be improved...
-- Modified CDROM_DISC_STATUS so that it is now incorporated into
the Uniform CD-ROM driver via the cdrom_count_tracks function.
The cdrom_count_tracks function helps resolve some of the false
assumptions of the CDROM_DISC_STATUS ioctl, and is also used to check
for the correct media type when mounting or playing audio from a CD.
-- Remove the calls to verify_area and only use the copy_from_user and
copy_to_user stuff, since these calls now provide their own memory
checking with the 2.1.x kernels.
-- Major update to return codes so that errors from low-level drivers
are passed on through (thanks to Gerd Knorr for pointing out this
problem).
-- Made it so if a function isn't implemented in a low-level driver,
ENOSYS is now returned instead of EINVAL.
-- Simplified some complex logic so that the source code is easier to read.
-- Other stuff I probably forgot to mention (lots of changes).
2.01 to 2.11 Dec 1997-Jan 1998
-- TO-DO! Write changelogs for 2.01 to 2.12.
2.12 Jan 24, 1998 -- Erik Andersen <andersee@debian.org>
-- Fixed a bug in the IOCTL_IN and IOCTL_OUT macros. It turns out that
copy_*_user does not return EFAULT on error, but instead returns the number
of bytes not copied. I was returning whatever non-zero stuff came back from
the copy_*_user functions directly, which would result in strange errors.
2.13 July 17, 1998 -- Erik Andersen <andersee@debian.org>
-- Fixed a bug in CDROM_SELECT_SPEED where you couldn't lower the speed
of the drive. Thanks to Tobias Ringstr|m <tori@prosolvia.se> for pointing
this out and providing a simple fix.
-- Fixed the procfs-unload-module bug with the fill_inode procfs callback.
thanks to Andrea Arcangeli
-- Fixed it so that the /proc entry now also shows up when cdrom is
compiled into the kernel. Before it only worked when loaded as a module.
2.14 August 17, 1998 -- Erik Andersen <andersee@debian.org>
-- Fixed a bug in cdrom_media_changed and handling of reporting that
the media had changed for devices that _don't_ implement media_changed.
Thanks to Grant R. Guenther <grant@torque.net> for spotting this bug.
-- Made a few things more pedanticly correct.
2.50 Oct 19, 1998 - Jens Axboe <axboe@image.dk>
-- New maintainers! Erik was too busy to continue the work on the driver,
so now Chris Zwilling <chris@cloudnet.com> and Jens Axboe <axboe@image.dk>
will do their best to follow in his footsteps
2.51 Dec 20, 1998 - Jens Axboe <axboe@image.dk>
-- Check if drive is capable of doing what we ask before blindly changing
cdi->options in various ioctl.
-- Added version to proc entry.
2.52 Jan 16, 1999 - Jens Axboe <axboe@image.dk>
-- Fixed an error in open_for_data where we would sometimes not return
the correct error value. Thanks Huba Gaspar <huba@softcell.hu>.
-- Fixed module usage count - usage was based on /proc/sys/dev
instead of /proc/sys/dev/cdrom. This could lead to an oops when other
modules had entries in dev. Feb 02 - real bug was in sysctl.c where
dev would be removed even though it was used. cdrom.c just illuminated
that bug.
2.53 Feb 22, 1999 - Jens Axboe <axboe@image.dk>
-- Fixup of several ioctl calls, in particular CDROM_SET_OPTIONS has
been "rewritten" because capabilities and options aren't in sync. They
should be...
-- Added CDROM_LOCKDOOR ioctl. Locks the door and keeps it that way.
-- Added CDROM_RESET ioctl.
-- Added CDROM_DEBUG ioctl. Enable debug messages on-the-fly.
-- Added CDROM_GET_CAPABILITY ioctl. This relieves userspace programs
from parsing /proc/sys/dev/cdrom/info.
2.54 Mar 15, 1999 - Jens Axboe <axboe@image.dk>
-- Check capability mask from low level driver when counting tracks as
per suggestion from Corey J. Scotts <cstotts@blue.weeg.uiowa.edu>.
2.55 Apr 25, 1999 - Jens Axboe <axboe@image.dk>
-- autoclose was mistakenly checked against CDC_OPEN_TRAY instead of
CDC_CLOSE_TRAY.
-- proc info didn't mask against capabilities mask.
3.00 Aug 5, 1999 - Jens Axboe <axboe@image.dk>
-- Unified audio ioctl handling across CD-ROM drivers. A lot of the
code was duplicated before. Drives that support the generic packet
interface are now being fed packets from here instead.
-- First attempt at adding support for MMC2 commands - for DVD and
CD-R(W) drives. Only the DVD parts are in now - the interface used is
the same as for the audio ioctls.
-- ioctl cleanups. if a drive couldn't play audio, it didn't get
a change to perform device specific ioctls as well.
-- Defined CDROM_CAN(CDC_XXX) for checking the capabilities.
-- Put in sysctl files for autoclose, autoeject, check_media, debug,
and lock.
-- /proc/sys/dev/cdrom/info has been updated to also contain info about
CD-Rx and DVD capabilities.
-- Now default to checking media type.
-- CDROM_SEND_PACKET ioctl added. The infrastructure was in place for
doing this anyway, with the generic_packet addition.
3.01 Aug 6, 1999 - Jens Axboe <axboe@image.dk>
-- Fix up the sysctl handling so that the option flags get set
correctly.
-- Fix up ioctl handling so the device specific ones actually get
called :).
3.02 Aug 8, 1999 - Jens Axboe <axboe@image.dk>
-- Fixed volume control on SCSI drives (or others with longer audio
page).
-- Fixed a couple of DVD minors. Thanks to Andrew T. Veliath
<andrewtv@usa.net> for telling me and for having defined the various
DVD structures and ioctls in the first place! He designed the original
DVD patches for ide-cd and while I rearranged and unified them, the
interface is still the same.
3.03 Sep 1, 1999 - Jens Axboe <axboe@image.dk>
-- Moved the rest of the audio ioctls from the CD-ROM drivers here. Only
CDROMREADTOCENTRY and CDROMREADTOCHDR are left.
-- Moved the CDROMREADxxx ioctls in here.
-- Defined the cdrom_get_last_written and cdrom_get_next_block as ioctls
and exported functions.
-- Erik Andersen <andersen@xmission.com> modified all SCMD_ commands
to now read GPCMD_ for the new generic packet interface. All low level
drivers are updated as well.
-- Various other cleanups.
3.04 Sep 12, 1999 - Jens Axboe <axboe@image.dk>
-- Fixed a couple of possible memory leaks (if an operation failed and
we didn't free the buffer before returning the error).
-- Integrated Uniform CD Changer handling from Richard Sharman
<rsharman@pobox.com>.
-- Defined CD_DVD and CD_CHANGER log levels.
-- Fixed the CDROMREADxxx ioctls.
-- CDROMPLAYTRKIND uses the GPCMD_PLAY_AUDIO_MSF command - too few
drives supported it. We lose the index part, however.
-- Small modifications to accommodate opens of /dev/hdc1, required
for ide-cd to handle multisession discs.
-- Export cdrom_mode_sense and cdrom_mode_select.
-- init_cdrom_command() for setting up a cgc command.
3.05 Oct 24, 1999 - Jens Axboe <axboe@image.dk>
-- Changed the interface for CDROM_SEND_PACKET. Before it was virtually
impossible to send the drive data in a sensible way.
-- Lowered stack usage in mmc_ioctl(), dvd_read_disckey(), and
dvd_read_manufact.
-- Added setup of write mode for packet writing.
-- Fixed CDDA ripping with cdda2wav - accept much larger requests of
number of frames and split the reads in blocks of 8.
3.06 Dec 13, 1999 - Jens Axboe <axboe@image.dk>
-- Added support for changing the region of DVD drives.
-- Added sense data to generic command.
3.07 Feb 2, 2000 - Jens Axboe <axboe@suse.de>
-- Do same "read header length" trick in cdrom_get_disc_info() as
we do in cdrom_get_track_info() -- some drive don't obey specs and
fail if they can't supply the full Mt Fuji size table.
-- Deleted stuff related to setting up write modes. It has a different
home now.
-- Clear header length in mode_select unconditionally.
-- Removed the register_disk() that was added, not needed here.
3.08 May 1, 2000 - Jens Axboe <axboe@suse.de>
-- Fix direction flag in setup_send_key and setup_report_key. This
gave some SCSI adapters problems.
-- Always return -EROFS for write opens
-- Convert to module_init/module_exit style init and remove some
of the #ifdef MODULE stuff
-- Fix several dvd errors - DVD_LU_SEND_ASF should pass agid,
DVD_HOST_SEND_RPC_STATE did not set buffer size in cdb, and
dvd_do_auth passed uninitialized data to drive because init_cdrom_command
did not clear a 0 sized buffer.
3.09 May 12, 2000 - Jens Axboe <axboe@suse.de>
-- Fix Video-CD on SCSI drives that don't support READ_CD command. In
that case switch block size and issue plain READ_10 again, then switch
back.
3.10 Jun 10, 2000 - Jens Axboe <axboe@suse.de>
-- Fix volume control on CD's - old SCSI-II drives now use their own
code, as doing MODE6 stuff in here is really not my intention.
-- Use READ_DISC_INFO for more reliable end-of-disc.
3.11 Jun 12, 2000 - Jens Axboe <axboe@suse.de>
-- Fix bug in getting rpc phase 2 region info.
-- Reinstate "correct" CDROMPLAYTRKIND
3.12 Oct 18, 2000 - Jens Axboe <axboe@suse.de>
-- Use quiet bit on packet commands not known to work
3.20 Dec 17, 2003 - Jens Axboe <axboe@suse.de>
-- Various fixes and lots of cleanups not listed :-)
-- Locking fixes
-- Mt Rainier support
-- DVD-RAM write open fixes
Nov 5 2001, Aug 8 2002. Modified by Andy Polyakov
<appro@fy.chalmers.se> to support MMC-3 compliant DVD+RW units.
Modified by Nigel Kukard <nkukard@lbsd.net> - support DVD+RW
2.4.x patch by Andy Polyakov <appro@fy.chalmers.se>
-------------------------------------------------------------------------*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define REVISION "Revision: 3.20"
#define VERSION "Id: cdrom.c 3.20 2003/12/17"
/* I use an error-log mask to give fine grain control over the type of
messages dumped to the system logs. The available masks include: */
#define CD_NOTHING 0x0
#define CD_WARNING 0x1
#define CD_REG_UNREG 0x2
#define CD_DO_IOCTL 0x4
#define CD_OPEN 0x8
#define CD_CLOSE 0x10
#define CD_COUNT_TRACKS 0x20
#define CD_CHANGER 0x40
#define CD_DVD 0x80
/* Define this to remove _all_ the debugging messages */
/* #define ERRLOGMASK CD_NOTHING */
#define ERRLOGMASK CD_WARNING
/* #define ERRLOGMASK (CD_WARNING|CD_OPEN|CD_COUNT_TRACKS|CD_CLOSE) */
/* #define ERRLOGMASK (CD_WARNING|CD_REG_UNREG|CD_DO_IOCTL|CD_OPEN|CD_CLOSE|CD_COUNT_TRACKS) */
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/major.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/cdrom.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/blkpg.h>
#include <linux/init.h>
#include <linux/fcntl.h>
#include <linux/blkdev.h>
#include <linux/times.h>
#include <asm/uaccess.h>
/* used to tell the module to turn on full debugging messages */
static bool debug;
/* default compatibility mode */
static bool autoclose=1;
static bool autoeject;
static bool lockdoor = 1;
/* will we ever get to use this... sigh. */
static bool check_media_type;
/* automatically restart mrw format */
static bool mrw_format_restart = 1;
module_param(debug, bool, 0);
module_param(autoclose, bool, 0);
module_param(autoeject, bool, 0);
module_param(lockdoor, bool, 0);
module_param(check_media_type, bool, 0);
module_param(mrw_format_restart, bool, 0);
static DEFINE_MUTEX(cdrom_mutex);
static const char *mrw_format_status[] = {
"not mrw",
"bgformat inactive",
"bgformat active",
"mrw complete",
};
static const char *mrw_address_space[] = { "DMA", "GAA" };
#if (ERRLOGMASK!=CD_NOTHING)
#define cdinfo(type, fmt, args...) \
do { \
if ((ERRLOGMASK & type) || debug == 1) \
pr_info(fmt, ##args); \
} while (0)
#else
#define cdinfo(type, fmt, args...) \
do { \
if (0 && (ERRLOGMASK & type) || debug == 1) \
pr_info(fmt, ##args); \
} while (0)
#endif
/* These are used to simplify getting data in from and back to user land */
#define IOCTL_IN(arg, type, in) \
if (copy_from_user(&(in), (type __user *) (arg), sizeof (in))) \
return -EFAULT;
#define IOCTL_OUT(arg, type, out) \
if (copy_to_user((type __user *) (arg), &(out), sizeof (out))) \
return -EFAULT;
/* The (cdo->capability & ~cdi->mask & CDC_XXX) construct was used in
a lot of places. This macro makes the code more clear. */
#define CDROM_CAN(type) (cdi->ops->capability & ~cdi->mask & (type))
/* used in the audio ioctls */
#define CHECKAUDIO if ((ret=check_for_audio_disc(cdi, cdo))) return ret
/*
* Another popular OS uses 7 seconds as the hard timeout for default
* commands, so it is a good choice for us as well.
*/
#define CDROM_DEF_TIMEOUT (7 * HZ)
/* Not-exported routines. */
static int open_for_data(struct cdrom_device_info * cdi);
static int check_for_audio_disc(struct cdrom_device_info * cdi,
struct cdrom_device_ops * cdo);
static void sanitize_format(union cdrom_addr *addr,
u_char * curr, u_char requested);
static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
unsigned long arg);
int cdrom_get_last_written(struct cdrom_device_info *, long *);
static int cdrom_get_next_writable(struct cdrom_device_info *, long *);
static void cdrom_count_tracks(struct cdrom_device_info *, tracktype*);
static int cdrom_mrw_exit(struct cdrom_device_info *cdi);
static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di);
static void cdrom_sysctl_register(void);
static LIST_HEAD(cdrom_list);
static int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
if (cgc->sense) {
cgc->sense->sense_key = 0x05;
cgc->sense->asc = 0x20;
cgc->sense->ascq = 0x00;
}
cgc->stat = -EIO;
return -EIO;
}
/* This macro makes sure we don't have to check on cdrom_device_ops
* existence in the run-time routines below. Change_capability is a
* hack to have the capability flags defined const, while we can still
* change it here without gcc complaining at every line.
*/
#define ENSURE(call, bits) if (cdo->call == NULL) *change_capability &= ~(bits)
int register_cdrom(struct cdrom_device_info *cdi)
{
static char banner_printed;
struct cdrom_device_ops *cdo = cdi->ops;
int *change_capability = (int *)&cdo->capability; /* hack */
cdinfo(CD_OPEN, "entering register_cdrom\n");
if (cdo->open == NULL || cdo->release == NULL)
return -EINVAL;
if (!banner_printed) {
pr_info("Uniform CD-ROM driver " REVISION "\n");
banner_printed = 1;
cdrom_sysctl_register();
}
ENSURE(drive_status, CDC_DRIVE_STATUS );
if (cdo->check_events == NULL && cdo->media_changed == NULL)
*change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
ENSURE(lock_door, CDC_LOCK);
ENSURE(select_speed, CDC_SELECT_SPEED);
ENSURE(get_last_session, CDC_MULTI_SESSION);
ENSURE(get_mcn, CDC_MCN);
ENSURE(reset, CDC_RESET);
ENSURE(generic_packet, CDC_GENERIC_PACKET);
cdi->mc_flags = 0;
cdo->n_minors = 0;
cdi->options = CDO_USE_FFLAGS;
if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
cdi->options |= (int) CDO_AUTO_CLOSE;
if (autoeject==1 && CDROM_CAN(CDC_OPEN_TRAY))
cdi->options |= (int) CDO_AUTO_EJECT;
if (lockdoor==1)
cdi->options |= (int) CDO_LOCK;
if (check_media_type==1)
cdi->options |= (int) CDO_CHECK_TYPE;
if (CDROM_CAN(CDC_MRW_W))
cdi->exit = cdrom_mrw_exit;
if (cdi->disk)
cdi->cdda_method = CDDA_BPC_FULL;
else
cdi->cdda_method = CDDA_OLD;
if (!cdo->generic_packet)
cdo->generic_packet = cdrom_dummy_generic_packet;
cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
mutex_lock(&cdrom_mutex);
list_add(&cdi->list, &cdrom_list);
mutex_unlock(&cdrom_mutex);
return 0;
}
#undef ENSURE
void unregister_cdrom(struct cdrom_device_info *cdi)
{
cdinfo(CD_OPEN, "entering unregister_cdrom\n");
mutex_lock(&cdrom_mutex);
list_del(&cdi->list);
mutex_unlock(&cdrom_mutex);
if (cdi->exit)
cdi->exit(cdi);
cdi->ops->n_minors--;
cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
}
int cdrom_get_media_event(struct cdrom_device_info *cdi,
struct media_event_desc *med)
{
struct packet_command cgc;
unsigned char buffer[8];
struct event_header *eh = (struct event_header *) buffer;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION;
cgc.cmd[1] = 1; /* IMMED */
cgc.cmd[4] = 1 << 4; /* media event */
cgc.cmd[8] = sizeof(buffer);
cgc.quiet = 1;
if (cdi->ops->generic_packet(cdi, &cgc))
return 1;
if (be16_to_cpu(eh->data_len) < sizeof(*med))
return 1;
if (eh->nea || eh->notification_class != 0x4)
return 1;
memcpy(med, &buffer[sizeof(*eh)], sizeof(*med));
return 0;
}
/*
* the first prototypes used 0x2c as the page code for the mrw mode page,
* subsequently this was changed to 0x03. probe the one used by this drive
*/
static int cdrom_mrw_probe_pc(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[16];
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.timeout = HZ;
cgc.quiet = 1;
if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC, 0)) {
cdi->mrw_mode_page = MRW_MODE_PC;
return 0;
} else if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC_PRE1, 0)) {
cdi->mrw_mode_page = MRW_MODE_PC_PRE1;
return 0;
}
return 1;
}
static int cdrom_is_mrw(struct cdrom_device_info *cdi, int *write)
{
struct packet_command cgc;
struct mrw_feature_desc *mfd;
unsigned char buffer[16];
int ret;
*write = 0;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
cgc.cmd[3] = CDF_MRW;
cgc.cmd[8] = sizeof(buffer);
cgc.quiet = 1;
if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
return ret;
mfd = (struct mrw_feature_desc *)&buffer[sizeof(struct feature_header)];
if (be16_to_cpu(mfd->feature_code) != CDF_MRW)
return 1;
*write = mfd->write;
if ((ret = cdrom_mrw_probe_pc(cdi))) {
*write = 0;
return ret;
}
return 0;
}
static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
{
struct packet_command cgc;
unsigned char buffer[12];
int ret;
pr_info("%sstarting format\n", cont ? "Re" : "");
/*
* FmtData bit set (bit 4), format type is 1
*/
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_WRITE);
cgc.cmd[0] = GPCMD_FORMAT_UNIT;
cgc.cmd[1] = (1 << 4) | 1;
cgc.timeout = 5 * 60 * HZ;
/*
* 4 byte format list header, 8 byte format list descriptor
*/
buffer[1] = 1 << 1;
buffer[3] = 8;
/*
* nr_blocks field
*/
buffer[4] = 0xff;
buffer[5] = 0xff;
buffer[6] = 0xff;
buffer[7] = 0xff;
buffer[8] = 0x24 << 2;
buffer[11] = cont;
ret = cdi->ops->generic_packet(cdi, &cgc);
if (ret)
pr_info("bgformat failed\n");
return ret;
}
static int cdrom_mrw_bgformat_susp(struct cdrom_device_info *cdi, int immed)
{
struct packet_command cgc;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_CLOSE_TRACK;
/*
* Session = 1, Track = 0
*/
cgc.cmd[1] = !!immed;
cgc.cmd[2] = 1 << 1;
cgc.timeout = 5 * 60 * HZ;
return cdi->ops->generic_packet(cdi, &cgc);
}
static int cdrom_flush_cache(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_FLUSH_CACHE;
cgc.timeout = 5 * 60 * HZ;
return cdi->ops->generic_packet(cdi, &cgc);
}
static int cdrom_mrw_exit(struct cdrom_device_info *cdi)
{
disc_information di;
int ret;
ret = cdrom_get_disc_info(cdi, &di);
if (ret < 0 || ret < (int)offsetof(typeof(di),disc_type))
return 1;
ret = 0;
if (di.mrw_status == CDM_MRW_BGFORMAT_ACTIVE) {
pr_info("issuing MRW background format suspend\n");
ret = cdrom_mrw_bgformat_susp(cdi, 0);
}
if (!ret && cdi->media_written)
ret = cdrom_flush_cache(cdi);
return ret;
}
static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
{
struct packet_command cgc;
struct mode_page_header *mph;
char buffer[16];
int ret, offset, size;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.buffer = buffer;
cgc.buflen = sizeof(buffer);
if ((ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0)))
return ret;
mph = (struct mode_page_header *) buffer;
offset = be16_to_cpu(mph->desc_length);
size = be16_to_cpu(mph->mode_data_length) + 2;
buffer[offset + 3] = space;
cgc.buflen = size;
if ((ret = cdrom_mode_select(cdi, &cgc)))
return ret;
pr_info("%s: mrw address space %s selected\n",
cdi->name, mrw_address_space[space]);
return 0;
}
static int cdrom_get_random_writable(struct cdrom_device_info *cdi,
struct rwrt_feature_desc *rfd)
{
struct packet_command cgc;
char buffer[24];
int ret;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION; /* often 0x46 */
cgc.cmd[3] = CDF_RWRT; /* often 0x0020 */
cgc.cmd[8] = sizeof(buffer); /* often 0x18 */
cgc.quiet = 1;
if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
return ret;
memcpy(rfd, &buffer[sizeof(struct feature_header)], sizeof (*rfd));
return 0;
}
static int cdrom_has_defect_mgt(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[16];
__be16 *feature_code;
int ret;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
cgc.cmd[3] = CDF_HWDM;
cgc.cmd[8] = sizeof(buffer);
cgc.quiet = 1;
if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
return ret;
feature_code = (__be16 *) &buffer[sizeof(struct feature_header)];
if (be16_to_cpu(*feature_code) == CDF_HWDM)
return 0;
return 1;
}
static int cdrom_is_random_writable(struct cdrom_device_info *cdi, int *write)
{
struct rwrt_feature_desc rfd;
int ret;
*write = 0;
if ((ret = cdrom_get_random_writable(cdi, &rfd)))
return ret;
if (CDF_RWRT == be16_to_cpu(rfd.feature_code))
*write = 1;
return 0;
}
static int cdrom_media_erasable(struct cdrom_device_info *cdi)
{
disc_information di;
int ret;
ret = cdrom_get_disc_info(cdi, &di);
if (ret < 0 || ret < offsetof(typeof(di), n_first_track))
return -1;
return di.erasable;
}
/*
* FIXME: check RO bit
*/
static int cdrom_dvdram_open_write(struct cdrom_device_info *cdi)
{
int ret = cdrom_media_erasable(cdi);
/*
* allow writable open if media info read worked and media is
* erasable, _or_ if it fails since not all drives support it
*/
if (!ret)
return 1;
return 0;
}
static int cdrom_mrw_open_write(struct cdrom_device_info *cdi)
{
disc_information di;
int ret;
/*
* always reset to DMA lba space on open
*/
if (cdrom_mrw_set_lba_space(cdi, MRW_LBA_DMA)) {
pr_err("failed setting lba address space\n");
return 1;
}
ret = cdrom_get_disc_info(cdi, &di);
if (ret < 0 || ret < offsetof(typeof(di),disc_type))
return 1;
if (!di.erasable)
return 1;
/*
* mrw_status
* 0 - not MRW formatted
* 1 - MRW bgformat started, but not running or complete
* 2 - MRW bgformat in progress
* 3 - MRW formatting complete
*/
ret = 0;
pr_info("open: mrw_status '%s'\n", mrw_format_status[di.mrw_status]);
if (!di.mrw_status)
ret = 1;
else if (di.mrw_status == CDM_MRW_BGFORMAT_INACTIVE &&
mrw_format_restart)
ret = cdrom_mrw_bgformat(cdi, 1);
return ret;
}
static int mo_open_write(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[255];
int ret;
init_cdrom_command(&cgc, &buffer, 4, CGC_DATA_READ);
cgc.quiet = 1;
/*
* obtain write protect information as per
* drivers/scsi/sd.c:sd_read_write_protect_flag
*/
ret = cdrom_mode_sense(cdi, &cgc, GPMODE_ALL_PAGES, 0);
if (ret)
ret = cdrom_mode_sense(cdi, &cgc, GPMODE_VENDOR_PAGE, 0);
if (ret) {
cgc.buflen = 255;
ret = cdrom_mode_sense(cdi, &cgc, GPMODE_ALL_PAGES, 0);
}
/* drive gave us no info, let the user go ahead */
if (ret)
return 0;
return buffer[3] & 0x80;
}
static int cdrom_ram_open_write(struct cdrom_device_info *cdi)
{
struct rwrt_feature_desc rfd;
int ret;
if ((ret = cdrom_has_defect_mgt(cdi)))
return ret;
if ((ret = cdrom_get_random_writable(cdi, &rfd)))
return ret;
else if (CDF_RWRT == be16_to_cpu(rfd.feature_code))
ret = !rfd.curr;
cdinfo(CD_OPEN, "can open for random write\n");
return ret;
}
static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[32];
int ret, mmc3_profile;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
cgc.cmd[1] = 0;
cgc.cmd[2] = cgc.cmd[3] = 0; /* Starting Feature Number */
cgc.cmd[8] = sizeof(buffer); /* Allocation Length */
cgc.quiet = 1;
if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
mmc3_profile = 0xffff;
else
mmc3_profile = (buffer[6] << 8) | buffer[7];
cdi->mmc3_profile = mmc3_profile;
}
static int cdrom_is_dvd_rw(struct cdrom_device_info *cdi)
{
switch (cdi->mmc3_profile) {
case 0x12: /* DVD-RAM */
case 0x1A: /* DVD+RW */
return 0;
default:
return 1;
}
}
/*
* returns 0 for ok to open write, non-0 to disallow
*/
static int cdrom_open_write(struct cdrom_device_info *cdi)
{
int mrw, mrw_write, ram_write;
int ret = 1;
mrw = 0;
if (!cdrom_is_mrw(cdi, &mrw_write))
mrw = 1;
if (CDROM_CAN(CDC_MO_DRIVE))
ram_write = 1;
else
(void) cdrom_is_random_writable(cdi, &ram_write);
if (mrw)
cdi->mask &= ~CDC_MRW;
else
cdi->mask |= CDC_MRW;
if (mrw_write)
cdi->mask &= ~CDC_MRW_W;
else
cdi->mask |= CDC_MRW_W;
if (ram_write)
cdi->mask &= ~CDC_RAM;
else
cdi->mask |= CDC_RAM;
if (CDROM_CAN(CDC_MRW_W))
ret = cdrom_mrw_open_write(cdi);
else if (CDROM_CAN(CDC_DVD_RAM))
ret = cdrom_dvdram_open_write(cdi);
else if (CDROM_CAN(CDC_RAM) &&
!CDROM_CAN(CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_MRW|CDC_MO_DRIVE))
ret = cdrom_ram_open_write(cdi);
else if (CDROM_CAN(CDC_MO_DRIVE))
ret = mo_open_write(cdi);
else if (!cdrom_is_dvd_rw(cdi))
ret = 0;
return ret;
}
static void cdrom_dvd_rw_close_write(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
if (cdi->mmc3_profile != 0x1a) {
cdinfo(CD_CLOSE, "%s: No DVD+RW\n", cdi->name);
return;
}
if (!cdi->media_written) {
cdinfo(CD_CLOSE, "%s: DVD+RW media clean\n", cdi->name);
return;
}
pr_info("%s: dirty DVD+RW media, \"finalizing\"\n", cdi->name);
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_FLUSH_CACHE;
cgc.timeout = 30*HZ;
cdi->ops->generic_packet(cdi, &cgc);
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_CLOSE_TRACK;
cgc.timeout = 3000*HZ;
cgc.quiet = 1;
cdi->ops->generic_packet(cdi, &cgc);
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_CLOSE_TRACK;
cgc.cmd[2] = 2; /* Close session */
cgc.quiet = 1;
cgc.timeout = 3000*HZ;
cdi->ops->generic_packet(cdi, &cgc);
cdi->media_written = 0;
}
static int cdrom_close_write(struct cdrom_device_info *cdi)
{
#if 0
return cdrom_flush_cache(cdi);
#else
return 0;
#endif
}
/* We use the open-option O_NONBLOCK to indicate that the
* purpose of opening is only for subsequent ioctl() calls; no device
* integrity checks are performed.
*
* We hope that all cd-player programs will adopt this convention. It
* is in their own interest: device control becomes a lot easier
* this way.
*/
int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode)
{
int ret;
cdinfo(CD_OPEN, "entering cdrom_open\n");
/* open is event synchronization point, check events first */
check_disk_change(bdev);
/* if this was a O_NONBLOCK open and we should honor the flags,
* do a quick open without drive/disc integrity checks. */
cdi->use_count++;
if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) {
ret = cdi->ops->open(cdi, 1);
} else {
ret = open_for_data(cdi);
if (ret)
goto err;
cdrom_mmc3_profile(cdi);
if (mode & FMODE_WRITE) {
ret = -EROFS;
if (cdrom_open_write(cdi))
goto err_release;
if (!CDROM_CAN(CDC_RAM))
goto err_release;
ret = 0;
cdi->media_written = 0;
}
}
if (ret)
goto err;
cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n",
cdi->name, cdi->use_count);
return 0;
err_release:
if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
cdi->ops->lock_door(cdi, 0);
cdinfo(CD_OPEN, "door unlocked.\n");
}
cdi->ops->release(cdi);
err:
cdi->use_count--;
return ret;
}
static
int open_for_data(struct cdrom_device_info * cdi)
{
int ret;
struct cdrom_device_ops *cdo = cdi->ops;
tracktype tracks;
cdinfo(CD_OPEN, "entering open_for_data\n");
/* Check if the driver can report drive status. If it can, we
can do clever things. If it can't, well, we at least tried! */
if (cdo->drive_status != NULL) {
ret = cdo->drive_status(cdi, CDSL_CURRENT);
cdinfo(CD_OPEN, "drive_status=%d\n", ret);
if (ret == CDS_TRAY_OPEN) {
cdinfo(CD_OPEN, "the tray is open...\n");
/* can/may i close it? */
if (CDROM_CAN(CDC_CLOSE_TRAY) &&
cdi->options & CDO_AUTO_CLOSE) {
cdinfo(CD_OPEN, "trying to close the tray.\n");
ret=cdo->tray_move(cdi,0);
if (ret) {
cdinfo(CD_OPEN, "bummer. tried to close the tray but failed.\n");
/* Ignore the error from the low
level driver. We don't care why it
couldn't close the tray. We only care
that there is no disc in the drive,
since that is the _REAL_ problem here.*/
ret=-ENOMEDIUM;
goto clean_up_and_return;
}
} else {
cdinfo(CD_OPEN, "bummer. this drive can't close the tray.\n");
ret=-ENOMEDIUM;
goto clean_up_and_return;
}
/* Ok, the door should be closed now.. Check again */
ret = cdo->drive_status(cdi, CDSL_CURRENT);
if ((ret == CDS_NO_DISC) || (ret==CDS_TRAY_OPEN)) {
cdinfo(CD_OPEN, "bummer. the tray is still not closed.\n");
cdinfo(CD_OPEN, "tray might not contain a medium.\n");
ret=-ENOMEDIUM;
goto clean_up_and_return;
}
cdinfo(CD_OPEN, "the tray is now closed.\n");
}
/* the door should be closed now, check for the disc */
ret = cdo->drive_status(cdi, CDSL_CURRENT);
if (ret!=CDS_DISC_OK) {
ret = -ENOMEDIUM;
goto clean_up_and_return;
}
}
cdrom_count_tracks(cdi, &tracks);
if (tracks.error == CDS_NO_DISC) {
cdinfo(CD_OPEN, "bummer. no disc.\n");
ret=-ENOMEDIUM;
goto clean_up_and_return;
}
/* CD-Players which don't use O_NONBLOCK, workman
* for example, need bit CDO_CHECK_TYPE cleared! */
if (tracks.data==0) {
if (cdi->options & CDO_CHECK_TYPE) {
/* give people a warning shot, now that CDO_CHECK_TYPE
is the default case! */
cdinfo(CD_OPEN, "bummer. wrong media type.\n");
cdinfo(CD_WARNING, "pid %d must open device O_NONBLOCK!\n",
(unsigned int)task_pid_nr(current));
ret=-EMEDIUMTYPE;
goto clean_up_and_return;
}
else {
cdinfo(CD_OPEN, "wrong media type, but CDO_CHECK_TYPE not set.\n");
}
}
cdinfo(CD_OPEN, "all seems well, opening the device.\n");
/* all seems well, we can open the device */
ret = cdo->open(cdi, 0); /* open for data */
cdinfo(CD_OPEN, "opening the device gave me %d.\n", ret);
/* After all this careful checking, we shouldn't have problems
opening the device, but we don't want the device locked if
this somehow fails... */
if (ret) {
cdinfo(CD_OPEN, "open device failed.\n");
goto clean_up_and_return;
}
if (CDROM_CAN(CDC_LOCK) && (cdi->options & CDO_LOCK)) {
cdo->lock_door(cdi, 1);
cdinfo(CD_OPEN, "door locked.\n");
}
cdinfo(CD_OPEN, "device opened successfully.\n");
return ret;
/* Something failed. Try to unlock the drive, because some drivers
(notably ide-cd) lock the drive after every command. This produced
a nasty bug where after mount failed, the drive would remain locked!
This ensures that the drive gets unlocked after a mount fails. This
is a goto to avoid bloating the driver with redundant code. */
clean_up_and_return:
cdinfo(CD_OPEN, "open failed.\n");
if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
cdo->lock_door(cdi, 0);
cdinfo(CD_OPEN, "door unlocked.\n");
}
return ret;
}
/* This code is similar to that in open_for_data. The routine is called
whenever an audio play operation is requested.
*/
static int check_for_audio_disc(struct cdrom_device_info * cdi,
struct cdrom_device_ops * cdo)
{
int ret;
tracktype tracks;
cdinfo(CD_OPEN, "entering check_for_audio_disc\n");
if (!(cdi->options & CDO_CHECK_TYPE))
return 0;
if (cdo->drive_status != NULL) {
ret = cdo->drive_status(cdi, CDSL_CURRENT);
cdinfo(CD_OPEN, "drive_status=%d\n", ret);
if (ret == CDS_TRAY_OPEN) {
cdinfo(CD_OPEN, "the tray is open...\n");
/* can/may i close it? */
if (CDROM_CAN(CDC_CLOSE_TRAY) &&
cdi->options & CDO_AUTO_CLOSE) {
cdinfo(CD_OPEN, "trying to close the tray.\n");
ret=cdo->tray_move(cdi,0);
if (ret) {
cdinfo(CD_OPEN, "bummer. tried to close tray but failed.\n");
/* Ignore the error from the low
level driver. We don't care why it
couldn't close the tray. We only care
that there is no disc in the drive,
since that is the _REAL_ problem here.*/
return -ENOMEDIUM;
}
} else {
cdinfo(CD_OPEN, "bummer. this driver can't close the tray.\n");
return -ENOMEDIUM;
}
/* Ok, the door should be closed now.. Check again */
ret = cdo->drive_status(cdi, CDSL_CURRENT);
if ((ret == CDS_NO_DISC) || (ret==CDS_TRAY_OPEN)) {
cdinfo(CD_OPEN, "bummer. the tray is still not closed.\n");
return -ENOMEDIUM;
}
if (ret!=CDS_DISC_OK) {
cdinfo(CD_OPEN, "bummer. disc isn't ready.\n");
return -EIO;
}
cdinfo(CD_OPEN, "the tray is now closed.\n");
}
}
cdrom_count_tracks(cdi, &tracks);
if (tracks.error)
return(tracks.error);
if (tracks.audio==0)
return -EMEDIUMTYPE;
return 0;
}
void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode)
{
struct cdrom_device_ops *cdo = cdi->ops;
int opened_for_data;
cdinfo(CD_CLOSE, "entering cdrom_release\n");
if (cdi->use_count > 0)
cdi->use_count--;
if (cdi->use_count == 0) {
cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name);
cdrom_dvd_rw_close_write(cdi);
if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) {
cdinfo(CD_CLOSE, "Unlocking door!\n");
cdo->lock_door(cdi, 0);
}
}
opened_for_data = !(cdi->options & CDO_USE_FFLAGS) ||
!(mode & FMODE_NDELAY);
/*
* flush cache on last write release
*/
if (CDROM_CAN(CDC_RAM) && !cdi->use_count && cdi->for_data)
cdrom_close_write(cdi);
cdo->release(cdi);
if (cdi->use_count == 0) { /* last process that closes dev*/
if (opened_for_data &&
cdi->options & CDO_AUTO_EJECT && CDROM_CAN(CDC_OPEN_TRAY))
cdo->tray_move(cdi, 1);
}
}
static int cdrom_read_mech_status(struct cdrom_device_info *cdi,
struct cdrom_changer_info *buf)
{
struct packet_command cgc;
struct cdrom_device_ops *cdo = cdi->ops;
int length;
/*
* Sanyo changer isn't spec compliant (doesn't use regular change
* LOAD_UNLOAD command, and it doesn't implement the mech status
* command below
*/
if (cdi->sanyo_slot) {
buf->hdr.nslots = 3;
buf->hdr.curslot = cdi->sanyo_slot == 3 ? 0 : cdi->sanyo_slot;
for (length = 0; length < 3; length++) {
buf->slots[length].disc_present = 1;
buf->slots[length].change = 0;
}
return 0;
}
length = sizeof(struct cdrom_mechstat_header) +
cdi->capacity * sizeof(struct cdrom_slot);
init_cdrom_command(&cgc, buf, length, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_MECHANISM_STATUS;
cgc.cmd[8] = (length >> 8) & 0xff;
cgc.cmd[9] = length & 0xff;
return cdo->generic_packet(cdi, &cgc);
}
static int cdrom_slot_status(struct cdrom_device_info *cdi, int slot)
{
struct cdrom_changer_info *info;
int ret;
cdinfo(CD_CHANGER, "entering cdrom_slot_status()\n");
if (cdi->sanyo_slot)
return CDS_NO_INFO;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
if ((ret = cdrom_read_mech_status(cdi, info)))
goto out_free;
if (info->slots[slot].disc_present)
ret = CDS_DISC_OK;
else
ret = CDS_NO_DISC;
out_free:
kfree(info);
return ret;
}
/* Return the number of slots for an ATAPI/SCSI cdrom,
* return 1 if not a changer.
*/
int cdrom_number_of_slots(struct cdrom_device_info *cdi)
{
int status;
int nslots = 1;
struct cdrom_changer_info *info;
cdinfo(CD_CHANGER, "entering cdrom_number_of_slots()\n");
/* cdrom_read_mech_status requires a valid value for capacity: */
cdi->capacity = 0;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
if ((status = cdrom_read_mech_status(cdi, info)) == 0)
nslots = info->hdr.nslots;
kfree(info);
return nslots;
}
/* If SLOT < 0, unload the current slot. Otherwise, try to load SLOT. */
static int cdrom_load_unload(struct cdrom_device_info *cdi, int slot)
{
struct packet_command cgc;
cdinfo(CD_CHANGER, "entering cdrom_load_unload()\n");
if (cdi->sanyo_slot && slot < 0)
return 0;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_LOAD_UNLOAD;
cgc.cmd[4] = 2 + (slot >= 0);
cgc.cmd[8] = slot;
cgc.timeout = 60 * HZ;
/* The Sanyo 3 CD changer uses byte 7 of the
GPCMD_TEST_UNIT_READY to command to switch CDs instead of
using the GPCMD_LOAD_UNLOAD opcode. */
if (cdi->sanyo_slot && -1 < slot) {
cgc.cmd[0] = GPCMD_TEST_UNIT_READY;
cgc.cmd[7] = slot;
cgc.cmd[4] = cgc.cmd[8] = 0;
cdi->sanyo_slot = slot ? slot : 3;
}
return cdi->ops->generic_packet(cdi, &cgc);
}
static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
{
struct cdrom_changer_info *info;
int curslot;
int ret;
cdinfo(CD_CHANGER, "entering cdrom_select_disc()\n");
if (!CDROM_CAN(CDC_SELECT_DISC))
return -EDRIVE_CANT_DO_THIS;
if (cdi->ops->check_events)
cdi->ops->check_events(cdi, 0, slot);
else
cdi->ops->media_changed(cdi, slot);
if (slot == CDSL_NONE) {
/* set media changed bits, on both queues */
cdi->mc_flags = 0x3;
return cdrom_load_unload(cdi, -1);
}
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
if ((ret = cdrom_read_mech_status(cdi, info))) {
kfree(info);
return ret;
}
curslot = info->hdr.curslot;
kfree(info);
if (cdi->use_count > 1 || cdi->keeplocked) {
if (slot == CDSL_CURRENT) {
return curslot;
} else {
return -EBUSY;
}
}
/* Specifying CDSL_CURRENT will attempt to load the currnet slot,
which is useful if it had been previously unloaded.
Whether it can or not, it returns the current slot.
Similarly, if slot happens to be the current one, we still
try and load it. */
if (slot == CDSL_CURRENT)
slot = curslot;
/* set media changed bits on both queues */
cdi->mc_flags = 0x3;
if ((ret = cdrom_load_unload(cdi, slot)))
return ret;
return slot;
}
/*
* As cdrom implements an extra ioctl consumer for media changed
* event, it needs to buffer ->check_events() output, such that event
* is not lost for both the usual VFS and ioctl paths.
* cdi->{vfs|ioctl}_events are used to buffer pending events for each
* path.
*
* XXX: Locking is non-existent. cdi->ops->check_events() can be
* called in parallel and buffering fields are accessed without any
* exclusion. The original media_changed code had the same problem.
* It might be better to simply deprecate CDROM_MEDIA_CHANGED ioctl
* and remove this cruft altogether. It doesn't have much usefulness
* at this point.
*/
static void cdrom_update_events(struct cdrom_device_info *cdi,
unsigned int clearing)
{
unsigned int events;
events = cdi->ops->check_events(cdi, clearing, CDSL_CURRENT);
cdi->vfs_events |= events;
cdi->ioctl_events |= events;
}
unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
unsigned int clearing)
{
unsigned int events;
cdrom_update_events(cdi, clearing);
events = cdi->vfs_events;
cdi->vfs_events = 0;
return events;
}
EXPORT_SYMBOL(cdrom_check_events);
/* We want to make media_changed accessible to the user through an
* ioctl. The main problem now is that we must double-buffer the
* low-level implementation, to assure that the VFS and the user both
* see a medium change once.
*/
static
int media_changed(struct cdrom_device_info *cdi, int queue)
{
unsigned int mask = (1 << (queue & 1));
int ret = !!(cdi->mc_flags & mask);
bool changed;
if (!CDROM_CAN(CDC_MEDIA_CHANGED))
return ret;
/* changed since last call? */
if (cdi->ops->check_events) {
BUG_ON(!queue); /* shouldn't be called from VFS path */
cdrom_update_events(cdi, DISK_EVENT_MEDIA_CHANGE);
changed = cdi->ioctl_events & DISK_EVENT_MEDIA_CHANGE;
cdi->ioctl_events = 0;
} else
changed = cdi->ops->media_changed(cdi, CDSL_CURRENT);
if (changed) {
cdi->mc_flags = 0x3; /* set bit on both queues */
ret |= 1;
cdi->media_written = 0;
}
cdi->mc_flags &= ~mask; /* clear bit */
return ret;
}
int cdrom_media_changed(struct cdrom_device_info *cdi)
{
/* This talks to the VFS, which doesn't like errors - just 1 or 0.
* Returning "0" is always safe (media hasn't been changed). Do that
* if the low-level cdrom driver dosn't support media changed. */
if (cdi == NULL || cdi->ops->media_changed == NULL)
return 0;
if (!CDROM_CAN(CDC_MEDIA_CHANGED))
return 0;
return media_changed(cdi, 0);
}
/* badly broken, I know. Is due for a fixup anytime. */
static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype* tracks)
{
struct cdrom_tochdr header;
struct cdrom_tocentry entry;
int ret, i;
tracks->data=0;
tracks->audio=0;
tracks->cdi=0;
tracks->xa=0;
tracks->error=0;
cdinfo(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
/* Grab the TOC header so we can see how many tracks there are */
if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header))) {
if (ret == -ENOMEDIUM)
tracks->error = CDS_NO_DISC;
else
tracks->error = CDS_NO_INFO;
return;
}
/* check what type of tracks are on this disc */
entry.cdte_format = CDROM_MSF;
for (i = header.cdth_trk0; i <= header.cdth_trk1; i++) {
entry.cdte_track = i;
if (cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry)) {
tracks->error=CDS_NO_INFO;
return;
}
if (entry.cdte_ctrl & CDROM_DATA_TRACK) {
if (entry.cdte_format == 0x10)
tracks->cdi++;
else if (entry.cdte_format == 0x20)
tracks->xa++;
else
tracks->data++;
} else
tracks->audio++;
cdinfo(CD_COUNT_TRACKS, "track %d: format=%d, ctrl=%d\n",
i, entry.cdte_format, entry.cdte_ctrl);
}
cdinfo(CD_COUNT_TRACKS, "disc has %d tracks: %d=audio %d=data %d=Cd-I %d=XA\n",
header.cdth_trk1, tracks->audio, tracks->data,
tracks->cdi, tracks->xa);
}
/* Requests to the low-level drivers will /always/ be done in the
following format convention:
CDROM_LBA: all data-related requests.
CDROM_MSF: all audio-related requests.
However, a low-level implementation is allowed to refuse this
request, and return information in its own favorite format.
It doesn't make sense /at all/ to ask for a play_audio in LBA
format, or ask for multi-session info in MSF format. However, for
backward compatibility these format requests will be satisfied, but
the requests to the low-level drivers will be sanitized in the more
meaningful format indicated above.
*/
static
void sanitize_format(union cdrom_addr *addr,
u_char * curr, u_char requested)
{
if (*curr == requested)
return; /* nothing to be done! */
if (requested == CDROM_LBA) {
addr->lba = (int) addr->msf.frame +
75 * (addr->msf.second - 2 + 60 * addr->msf.minute);
} else { /* CDROM_MSF */
int lba = addr->lba;
addr->msf.frame = lba % 75;
lba /= 75;
lba += 2;
addr->msf.second = lba % 60;
addr->msf.minute = lba / 60;
}
*curr = requested;
}
void init_cdrom_command(struct packet_command *cgc, void *buf, int len,
int type)
{
memset(cgc, 0, sizeof(struct packet_command));
if (buf)
memset(buf, 0, len);
cgc->buffer = (char *) buf;
cgc->buflen = len;
cgc->data_direction = type;
cgc->timeout = CDROM_DEF_TIMEOUT;
}
/* DVD handling */
#define copy_key(dest,src) memcpy((dest), (src), sizeof(dvd_key))
#define copy_chal(dest,src) memcpy((dest), (src), sizeof(dvd_challenge))
static void setup_report_key(struct packet_command *cgc, unsigned agid, unsigned type)
{
cgc->cmd[0] = GPCMD_REPORT_KEY;
cgc->cmd[10] = type | (agid << 6);
switch (type) {
case 0: case 8: case 5: {
cgc->buflen = 8;
break;
}
case 1: {
cgc->buflen = 16;
break;
}
case 2: case 4: {
cgc->buflen = 12;
break;
}
}
cgc->cmd[9] = cgc->buflen;
cgc->data_direction = CGC_DATA_READ;
}
static void setup_send_key(struct packet_command *cgc, unsigned agid, unsigned type)
{
cgc->cmd[0] = GPCMD_SEND_KEY;
cgc->cmd[10] = type | (agid << 6);
switch (type) {
case 1: {
cgc->buflen = 16;
break;
}
case 3: {
cgc->buflen = 12;
break;
}
case 6: {
cgc->buflen = 8;
break;
}
}
cgc->cmd[9] = cgc->buflen;
cgc->data_direction = CGC_DATA_WRITE;
}
static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
{
int ret;
u_char buf[20];
struct packet_command cgc;
struct cdrom_device_ops *cdo = cdi->ops;
rpc_state_t rpc_state;
memset(buf, 0, sizeof(buf));
init_cdrom_command(&cgc, buf, 0, CGC_DATA_READ);
switch (ai->type) {
/* LU data send */
case DVD_LU_SEND_AGID:
cdinfo(CD_DVD, "entering DVD_LU_SEND_AGID\n");
cgc.quiet = 1;
setup_report_key(&cgc, ai->lsa.agid, 0);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
ai->lsa.agid = buf[7] >> 6;
/* Returning data, let host change state */
break;
case DVD_LU_SEND_KEY1:
cdinfo(CD_DVD, "entering DVD_LU_SEND_KEY1\n");
setup_report_key(&cgc, ai->lsk.agid, 2);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
copy_key(ai->lsk.key, &buf[4]);
/* Returning data, let host change state */
break;
case DVD_LU_SEND_CHALLENGE:
cdinfo(CD_DVD, "entering DVD_LU_SEND_CHALLENGE\n");
setup_report_key(&cgc, ai->lsc.agid, 1);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
copy_chal(ai->lsc.chal, &buf[4]);
/* Returning data, let host change state */
break;
/* Post-auth key */
case DVD_LU_SEND_TITLE_KEY:
cdinfo(CD_DVD, "entering DVD_LU_SEND_TITLE_KEY\n");
cgc.quiet = 1;
setup_report_key(&cgc, ai->lstk.agid, 4);
cgc.cmd[5] = ai->lstk.lba;
cgc.cmd[4] = ai->lstk.lba >> 8;
cgc.cmd[3] = ai->lstk.lba >> 16;
cgc.cmd[2] = ai->lstk.lba >> 24;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
ai->lstk.cpm = (buf[4] >> 7) & 1;
ai->lstk.cp_sec = (buf[4] >> 6) & 1;
ai->lstk.cgms = (buf[4] >> 4) & 3;
copy_key(ai->lstk.title_key, &buf[5]);
/* Returning data, let host change state */
break;
case DVD_LU_SEND_ASF:
cdinfo(CD_DVD, "entering DVD_LU_SEND_ASF\n");
setup_report_key(&cgc, ai->lsasf.agid, 5);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
ai->lsasf.asf = buf[7] & 1;
break;
/* LU data receive (LU changes state) */
case DVD_HOST_SEND_CHALLENGE:
cdinfo(CD_DVD, "entering DVD_HOST_SEND_CHALLENGE\n");
setup_send_key(&cgc, ai->hsc.agid, 1);
buf[1] = 0xe;
copy_chal(&buf[4], ai->hsc.chal);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
ai->type = DVD_LU_SEND_KEY1;
break;
case DVD_HOST_SEND_KEY2:
cdinfo(CD_DVD, "entering DVD_HOST_SEND_KEY2\n");
setup_send_key(&cgc, ai->hsk.agid, 3);
buf[1] = 0xa;
copy_key(&buf[4], ai->hsk.key);
if ((ret = cdo->generic_packet(cdi, &cgc))) {
ai->type = DVD_AUTH_FAILURE;
return ret;
}
ai->type = DVD_AUTH_ESTABLISHED;
break;
/* Misc */
case DVD_INVALIDATE_AGID:
cgc.quiet = 1;
cdinfo(CD_DVD, "entering DVD_INVALIDATE_AGID\n");
setup_report_key(&cgc, ai->lsa.agid, 0x3f);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
break;
/* Get region settings */
case DVD_LU_SEND_RPC_STATE:
cdinfo(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n");
setup_report_key(&cgc, 0, 8);
memset(&rpc_state, 0, sizeof(rpc_state_t));
cgc.buffer = (char *) &rpc_state;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
ai->lrpcs.type = rpc_state.type_code;
ai->lrpcs.vra = rpc_state.vra;
ai->lrpcs.ucca = rpc_state.ucca;
ai->lrpcs.region_mask = rpc_state.region_mask;
ai->lrpcs.rpc_scheme = rpc_state.rpc_scheme;
break;
/* Set region settings */
case DVD_HOST_SEND_RPC_STATE:
cdinfo(CD_DVD, "entering DVD_HOST_SEND_RPC_STATE\n");
setup_send_key(&cgc, 0, 6);
buf[1] = 6;
buf[4] = ai->hrpcs.pdrc;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
break;
default:
cdinfo(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type);
return -ENOTTY;
}
return 0;
}
static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s,
struct packet_command *cgc)
{
unsigned char buf[21], *base;
struct dvd_layer *layer;
struct cdrom_device_ops *cdo = cdi->ops;
int ret, layer_num = s->physical.layer_num;
if (layer_num >= DVD_LAYERS)
return -EINVAL;
init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
cgc->cmd[6] = layer_num;
cgc->cmd[7] = s->type;
cgc->cmd[9] = cgc->buflen & 0xff;
/*
* refrain from reporting errors on non-existing layers (mainly)
*/
cgc->quiet = 1;
ret = cdo->generic_packet(cdi, cgc);
if (ret)
return ret;
base = &buf[4];
layer = &s->physical.layer[layer_num];
/*
* place the data... really ugly, but at least we won't have to
* worry about endianess in userspace.
*/
memset(layer, 0, sizeof(*layer));
layer->book_version = base[0] & 0xf;
layer->book_type = base[0] >> 4;
layer->min_rate = base[1] & 0xf;
layer->disc_size = base[1] >> 4;
layer->layer_type = base[2] & 0xf;
layer->track_path = (base[2] >> 4) & 1;
layer->nlayers = (base[2] >> 5) & 3;
layer->track_density = base[3] & 0xf;
layer->linear_density = base[3] >> 4;
layer->start_sector = base[5] << 16 | base[6] << 8 | base[7];
layer->end_sector = base[9] << 16 | base[10] << 8 | base[11];
layer->end_sector_l0 = base[13] << 16 | base[14] << 8 | base[15];
layer->bca = base[16] >> 7;
return 0;
}
static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s,
struct packet_command *cgc)
{
int ret;
u_char buf[8];
struct cdrom_device_ops *cdo = cdi->ops;
init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
cgc->cmd[6] = s->copyright.layer_num;
cgc->cmd[7] = s->type;
cgc->cmd[8] = cgc->buflen >> 8;
cgc->cmd[9] = cgc->buflen & 0xff;
ret = cdo->generic_packet(cdi, cgc);
if (ret)
return ret;
s->copyright.cpst = buf[4];
s->copyright.rmi = buf[5];
return 0;
}
static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s,
struct packet_command *cgc)
{
int ret, size;
u_char *buf;
struct cdrom_device_ops *cdo = cdi->ops;
size = sizeof(s->disckey.value) + 4;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
cgc->cmd[7] = s->type;
cgc->cmd[8] = size >> 8;
cgc->cmd[9] = size & 0xff;
cgc->cmd[10] = s->disckey.agid << 6;
ret = cdo->generic_packet(cdi, cgc);
if (!ret)
memcpy(s->disckey.value, &buf[4], sizeof(s->disckey.value));
kfree(buf);
return ret;
}
static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s,
struct packet_command *cgc)
{
int ret, size = 4 + 188;
u_char *buf;
struct cdrom_device_ops *cdo = cdi->ops;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
cgc->cmd[7] = s->type;
cgc->cmd[9] = cgc->buflen & 0xff;
ret = cdo->generic_packet(cdi, cgc);
if (ret)
goto out;
s->bca.len = buf[0] << 8 | buf[1];
if (s->bca.len < 12 || s->bca.len > 188) {
cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len);
ret = -EIO;
goto out;
}
memcpy(s->bca.value, &buf[4], s->bca.len);
ret = 0;
out:
kfree(buf);
return ret;
}
static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
struct packet_command *cgc)
{
int ret = 0, size;
u_char *buf;
struct cdrom_device_ops *cdo = cdi->ops;
size = sizeof(s->manufact.value) + 4;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
cgc->cmd[7] = s->type;
cgc->cmd[8] = size >> 8;
cgc->cmd[9] = size & 0xff;
ret = cdo->generic_packet(cdi, cgc);
if (ret)
goto out;
s->manufact.len = buf[0] << 8 | buf[1];
if (s->manufact.len < 0) {
cdinfo(CD_WARNING, "Received invalid manufacture info length"
" (%d)\n", s->manufact.len);
ret = -EIO;
} else {
if (s->manufact.len > 2048) {
cdinfo(CD_WARNING, "Received invalid manufacture info "
"length (%d): truncating to 2048\n",
s->manufact.len);
s->manufact.len = 2048;
}
memcpy(s->manufact.value, &buf[4], s->manufact.len);
}
out:
kfree(buf);
return ret;
}
static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s,
struct packet_command *cgc)
{
switch (s->type) {
case DVD_STRUCT_PHYSICAL:
return dvd_read_physical(cdi, s, cgc);
case DVD_STRUCT_COPYRIGHT:
return dvd_read_copyright(cdi, s, cgc);
case DVD_STRUCT_DISCKEY:
return dvd_read_disckey(cdi, s, cgc);
case DVD_STRUCT_BCA:
return dvd_read_bca(cdi, s, cgc);
case DVD_STRUCT_MANUFACT:
return dvd_read_manufact(cdi, s, cgc);
default:
cdinfo(CD_WARNING, ": Invalid DVD structure read requested (%d)\n",
s->type);
return -EINVAL;
}
}
int cdrom_mode_sense(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int page_code, int page_control)
{
struct cdrom_device_ops *cdo = cdi->ops;
memset(cgc->cmd, 0, sizeof(cgc->cmd));
cgc->cmd[0] = GPCMD_MODE_SENSE_10;
cgc->cmd[2] = page_code | (page_control << 6);
cgc->cmd[7] = cgc->buflen >> 8;
cgc->cmd[8] = cgc->buflen & 0xff;
cgc->data_direction = CGC_DATA_READ;
return cdo->generic_packet(cdi, cgc);
}
int cdrom_mode_select(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
struct cdrom_device_ops *cdo = cdi->ops;
memset(cgc->cmd, 0, sizeof(cgc->cmd));
memset(cgc->buffer, 0, 2);
cgc->cmd[0] = GPCMD_MODE_SELECT_10;
cgc->cmd[1] = 0x10; /* PF */
cgc->cmd[7] = cgc->buflen >> 8;
cgc->cmd[8] = cgc->buflen & 0xff;
cgc->data_direction = CGC_DATA_WRITE;
return cdo->generic_packet(cdi, cgc);
}
static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
struct cdrom_subchnl *subchnl, int mcn)
{
struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
char buffer[32];
int ret;
init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_SUBCHANNEL;
cgc.cmd[1] = 2; /* MSF addressing */
cgc.cmd[2] = 0x40; /* request subQ data */
cgc.cmd[3] = mcn ? 2 : 1;
cgc.cmd[8] = 16;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
subchnl->cdsc_audiostatus = cgc.buffer[1];
subchnl->cdsc_format = CDROM_MSF;
subchnl->cdsc_ctrl = cgc.buffer[5] & 0xf;
subchnl->cdsc_trk = cgc.buffer[6];
subchnl->cdsc_ind = cgc.buffer[7];
subchnl->cdsc_reladdr.msf.minute = cgc.buffer[13];
subchnl->cdsc_reladdr.msf.second = cgc.buffer[14];
subchnl->cdsc_reladdr.msf.frame = cgc.buffer[15];
subchnl->cdsc_absaddr.msf.minute = cgc.buffer[9];
subchnl->cdsc_absaddr.msf.second = cgc.buffer[10];
subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11];
return 0;
}
/*
* Specific READ_10 interface
*/
static int cdrom_read_cd(struct cdrom_device_info *cdi,
struct packet_command *cgc, int lba,
int blocksize, int nblocks)
{
struct cdrom_device_ops *cdo = cdi->ops;
memset(&cgc->cmd, 0, sizeof(cgc->cmd));
cgc->cmd[0] = GPCMD_READ_10;
cgc->cmd[2] = (lba >> 24) & 0xff;
cgc->cmd[3] = (lba >> 16) & 0xff;
cgc->cmd[4] = (lba >> 8) & 0xff;
cgc->cmd[5] = lba & 0xff;
cgc->cmd[6] = (nblocks >> 16) & 0xff;
cgc->cmd[7] = (nblocks >> 8) & 0xff;
cgc->cmd[8] = nblocks & 0xff;
cgc->buflen = blocksize * nblocks;
return cdo->generic_packet(cdi, cgc);
}
/* very generic interface for reading the various types of blocks */
static int cdrom_read_block(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int lba, int nblocks, int format, int blksize)
{
struct cdrom_device_ops *cdo = cdi->ops;
memset(&cgc->cmd, 0, sizeof(cgc->cmd));
cgc->cmd[0] = GPCMD_READ_CD;
/* expected sector size - cdda,mode1,etc. */
cgc->cmd[1] = format << 2;
/* starting address */
cgc->cmd[2] = (lba >> 24) & 0xff;
cgc->cmd[3] = (lba >> 16) & 0xff;
cgc->cmd[4] = (lba >> 8) & 0xff;
cgc->cmd[5] = lba & 0xff;
/* number of blocks */
cgc->cmd[6] = (nblocks >> 16) & 0xff;
cgc->cmd[7] = (nblocks >> 8) & 0xff;
cgc->cmd[8] = nblocks & 0xff;
cgc->buflen = blksize * nblocks;
/* set the header info returned */
switch (blksize) {
case CD_FRAMESIZE_RAW0 : cgc->cmd[9] = 0x58; break;
case CD_FRAMESIZE_RAW1 : cgc->cmd[9] = 0x78; break;
case CD_FRAMESIZE_RAW : cgc->cmd[9] = 0xf8; break;
default : cgc->cmd[9] = 0x10;
}
return cdo->generic_packet(cdi, cgc);
}
static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
int lba, int nframes)
{
struct packet_command cgc;
int ret = 0;
int nr;
cdi->last_sense = 0;
memset(&cgc, 0, sizeof(cgc));
/*
* start with will ra.nframes size, back down if alloc fails
*/
nr = nframes;
do {
cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
if (cgc.buffer)
break;
nr >>= 1;
} while (nr);
if (!nr)
return -ENOMEM;
cgc.data_direction = CGC_DATA_READ;
while (nframes > 0) {
if (nr > nframes)
nr = nframes;
ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
if (ret)
break;
if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
ret = -EFAULT;
break;
}
ubuf += CD_FRAMESIZE_RAW * nr;
nframes -= nr;
lba += nr;
}
kfree(cgc.buffer);
return ret;
}
static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
int lba, int nframes)
{
struct request_queue *q = cdi->disk->queue;
struct request *rq;
struct bio *bio;
unsigned int len;
int nr, ret = 0;
if (!q)
return -ENXIO;
cdi->last_sense = 0;
while (nframes) {
nr = nframes;
if (cdi->cdda_method == CDDA_BPC_SINGLE)
nr = 1;
if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
len = nr * CD_FRAMESIZE_RAW;
rq = blk_get_request(q, READ, GFP_KERNEL);
if (!rq) {
ret = -ENOMEM;
break;
}
ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
if (ret) {
blk_put_request(rq);
break;
}
rq->cmd[0] = GPCMD_READ_CD;
rq->cmd[1] = 1 << 2;
rq->cmd[2] = (lba >> 24) & 0xff;
rq->cmd[3] = (lba >> 16) & 0xff;
rq->cmd[4] = (lba >> 8) & 0xff;
rq->cmd[5] = lba & 0xff;
rq->cmd[6] = (nr >> 16) & 0xff;
rq->cmd[7] = (nr >> 8) & 0xff;
rq->cmd[8] = nr & 0xff;
rq->cmd[9] = 0xf8;
rq->cmd_len = 12;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->timeout = 60 * HZ;
bio = rq->bio;
if (blk_execute_rq(q, cdi->disk, rq, 0)) {
struct request_sense *s = rq->sense;
ret = -EIO;
cdi->last_sense = s->sense_key;
}
if (blk_rq_unmap_user(bio))
ret = -EFAULT;
blk_put_request(rq);
if (ret)
break;
nframes -= nr;
lba += nr;
ubuf += len;
}
return ret;
}
static int cdrom_read_cdda(struct cdrom_device_info *cdi, __u8 __user *ubuf,
int lba, int nframes)
{
int ret;
if (cdi->cdda_method == CDDA_OLD)
return cdrom_read_cdda_old(cdi, ubuf, lba, nframes);
retry:
/*
* for anything else than success and io error, we need to retry
*/
ret = cdrom_read_cdda_bpc(cdi, ubuf, lba, nframes);
if (!ret || ret != -EIO)
return ret;
/*
* I've seen drives get sense 4/8/3 udma crc errors on multi
* frame dma, so drop to single frame dma if we need to
*/
if (cdi->cdda_method == CDDA_BPC_FULL && nframes > 1) {
pr_info("dropping to single frame dma\n");
cdi->cdda_method = CDDA_BPC_SINGLE;
goto retry;
}
/*
* so we have an io error of some sort with multi frame dma. if the
* condition wasn't a hardware error
* problems, not for any error
*/
if (cdi->last_sense != 0x04 && cdi->last_sense != 0x0b)
return ret;
pr_info("dropping to old style cdda (sense=%x)\n", cdi->last_sense);
cdi->cdda_method = CDDA_OLD;
return cdrom_read_cdda_old(cdi, ubuf, lba, nframes);
}
static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_multisession ms_info;
u8 requested_format;
int ret;
cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n");
if (!(cdi->ops->capability & CDC_MULTI_SESSION))
return -ENOSYS;
if (copy_from_user(&ms_info, argp, sizeof(ms_info)))
return -EFAULT;
requested_format = ms_info.addr_format;
if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
return -EINVAL;
ms_info.addr_format = CDROM_LBA;
ret = cdi->ops->get_last_session(cdi, &ms_info);
if (ret)
return ret;
sanitize_format(&ms_info.addr, &ms_info.addr_format, requested_format);
if (copy_to_user(argp, &ms_info, sizeof(ms_info)))
return -EFAULT;
cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n");
return 0;
}
static int cdrom_ioctl_eject(struct cdrom_device_info *cdi)
{
cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n");
if (!CDROM_CAN(CDC_OPEN_TRAY))
return -ENOSYS;
if (cdi->use_count != 1 || cdi->keeplocked)
return -EBUSY;
if (CDROM_CAN(CDC_LOCK)) {
int ret = cdi->ops->lock_door(cdi, 0);
if (ret)
return ret;
}
return cdi->ops->tray_move(cdi, 1);
}
static int cdrom_ioctl_closetray(struct cdrom_device_info *cdi)
{
cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n");
if (!CDROM_CAN(CDC_CLOSE_TRAY))
return -ENOSYS;
return cdi->ops->tray_move(cdi, 0);
}
static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n");
if (!CDROM_CAN(CDC_OPEN_TRAY))
return -ENOSYS;
if (cdi->keeplocked)
return -EBUSY;
cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT);
if (arg)
cdi->options |= CDO_AUTO_CLOSE | CDO_AUTO_EJECT;
return 0;
}
static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
unsigned long arg)
{
struct cdrom_changer_info *info;
int ret;
cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n");
if (!CDROM_CAN(CDC_MEDIA_CHANGED))
return -ENOSYS;
/* cannot select disc or select current disc */
if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
return media_changed(cdi, 1);
if ((unsigned int)arg >= cdi->capacity)
return -EINVAL;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
ret = cdrom_read_mech_status(cdi, info);
if (!ret)
ret = info->slots[arg].change;
kfree(info);
return ret;
}
static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n");
/*
* Options need to be in sync with capability.
* Too late for that, so we have to check each one separately.
*/
switch (arg) {
case CDO_USE_FFLAGS:
case CDO_CHECK_TYPE:
break;
case CDO_LOCK:
if (!CDROM_CAN(CDC_LOCK))
return -ENOSYS;
break;
case 0:
return cdi->options;
/* default is basically CDO_[AUTO_CLOSE|AUTO_EJECT] */
default:
if (!CDROM_CAN(arg))
return -ENOSYS;
}
cdi->options |= (int) arg;
return cdi->options;
}
static int cdrom_ioctl_clear_options(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n");
cdi->options &= ~(int) arg;
return cdi->options;
}
static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n");
if (!CDROM_CAN(CDC_SELECT_SPEED))
return -ENOSYS;
return cdi->ops->select_speed(cdi, arg);
}
static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n");
if (!CDROM_CAN(CDC_SELECT_DISC))
return -ENOSYS;
if (arg != CDSL_CURRENT && arg != CDSL_NONE) {
if ((int)arg >= cdi->capacity)
return -EINVAL;
}
/*
* ->select_disc is a hook to allow a driver-specific way of
* seleting disc. However, since there is no equivalent hook for
* cdrom_slot_status this may not actually be useful...
*/
if (cdi->ops->select_disc)
return cdi->ops->select_disc(cdi, arg);
cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n");
return cdrom_select_disc(cdi, arg);
}
static int cdrom_ioctl_reset(struct cdrom_device_info *cdi,
struct block_device *bdev)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n");
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!CDROM_CAN(CDC_RESET))
return -ENOSYS;
invalidate_bdev(bdev);
return cdi->ops->reset(cdi);
}
static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl");
if (!CDROM_CAN(CDC_LOCK))
return -EDRIVE_CANT_DO_THIS;
cdi->keeplocked = arg ? 1 : 0;
/*
* Don't unlock the door on multiple opens by default, but allow
* root to do so.
*/
if (cdi->use_count != 1 && !arg && !capable(CAP_SYS_ADMIN))
return -EBUSY;
return cdi->ops->lock_door(cdi, arg);
}
static int cdrom_ioctl_debug(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis");
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
debug = arg ? 1 : 0;
return debug;
}
static int cdrom_ioctl_get_capability(struct cdrom_device_info *cdi)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n");
return (cdi->ops->capability & ~cdi->mask);
}
/*
* The following function is implemented, although very few audio
* discs give Universal Product Code information, which should just be
* the Medium Catalog Number on the box. Note, that the way the code
* is written on the CD is /not/ uniform across all discs!
*/
static int cdrom_ioctl_get_mcn(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_mcn mcn;
int ret;
cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n");
if (!(cdi->ops->capability & CDC_MCN))
return -ENOSYS;
ret = cdi->ops->get_mcn(cdi, &mcn);
if (ret)
return ret;
if (copy_to_user(argp, &mcn, sizeof(mcn)))
return -EFAULT;
cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n");
return 0;
}
static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
unsigned long arg)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n");
if (!(cdi->ops->capability & CDC_DRIVE_STATUS))
return -ENOSYS;
if (!CDROM_CAN(CDC_SELECT_DISC) ||
(arg == CDSL_CURRENT || arg == CDSL_NONE))
return cdi->ops->drive_status(cdi, CDSL_CURRENT);
if (((int)arg >= cdi->capacity))
return -EINVAL;
return cdrom_slot_status(cdi, arg);
}
/*
* Ok, this is where problems start. The current interface for the
* CDROM_DISC_STATUS ioctl is flawed. It makes the false assumption that
* CDs are all CDS_DATA_1 or all CDS_AUDIO, etc. Unfortunately, while this
* is often the case, it is also very common for CDs to have some tracks
* with data, and some tracks with audio. Just because I feel like it,
* I declare the following to be the best way to cope. If the CD has ANY
* data tracks on it, it will be returned as a data CD. If it has any XA
* tracks, I will return it as that. Now I could simplify this interface
* by combining these returns with the above, but this more clearly
* demonstrates the problem with the current interface. Too bad this
* wasn't designed to use bitmasks... -Erik
*
* Well, now we have the option CDS_MIXED: a mixed-type CD.
* User level programmers might feel the ioctl is not very useful.
* ---david
*/
static int cdrom_ioctl_disc_status(struct cdrom_device_info *cdi)
{
tracktype tracks;
cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n");
cdrom_count_tracks(cdi, &tracks);
if (tracks.error)
return tracks.error;
/* Policy mode on */
if (tracks.audio > 0) {
if (!tracks.data && !tracks.cdi && !tracks.xa)
return CDS_AUDIO;
else
return CDS_MIXED;
}
if (tracks.cdi > 0)
return CDS_XA_2_2;
if (tracks.xa > 0)
return CDS_XA_2_1;
if (tracks.data > 0)
return CDS_DATA_1;
/* Policy mode off */
cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n");
return CDS_NO_INFO;
}
static int cdrom_ioctl_changer_nslots(struct cdrom_device_info *cdi)
{
cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n");
return cdi->capacity;
}
static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_subchnl q;
u8 requested, back;
int ret;
/* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/
if (copy_from_user(&q, argp, sizeof(q)))
return -EFAULT;
requested = q.cdsc_format;
if (requested != CDROM_MSF && requested != CDROM_LBA)
return -EINVAL;
q.cdsc_format = CDROM_MSF;
ret = cdi->ops->audio_ioctl(cdi, CDROMSUBCHNL, &q);
if (ret)
return ret;
back = q.cdsc_format; /* local copy */
sanitize_format(&q.cdsc_absaddr, &back, requested);
sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
if (copy_to_user(argp, &q, sizeof(q)))
return -EFAULT;
/* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
return 0;
}
static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_tochdr header;
int ret;
/* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */
if (copy_from_user(&header, argp, sizeof(header)))
return -EFAULT;
ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
if (ret)
return ret;
if (copy_to_user(argp, &header, sizeof(header)))
return -EFAULT;
/* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */
return 0;
}
static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_tocentry entry;
u8 requested_format;
int ret;
/* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */
if (copy_from_user(&entry, argp, sizeof(entry)))
return -EFAULT;
requested_format = entry.cdte_format;
if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
return -EINVAL;
/* make interface to low-level uniform */
entry.cdte_format = CDROM_MSF;
ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry);
if (ret)
return ret;
sanitize_format(&entry.cdte_addr, &entry.cdte_format, requested_format);
if (copy_to_user(argp, &entry, sizeof(entry)))
return -EFAULT;
/* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */
return 0;
}
static int cdrom_ioctl_play_msf(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_msf msf;
cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
if (copy_from_user(&msf, argp, sizeof(msf)))
return -EFAULT;
return cdi->ops->audio_ioctl(cdi, CDROMPLAYMSF, &msf);
}
static int cdrom_ioctl_play_trkind(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_ti ti;
int ret;
cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
if (copy_from_user(&ti, argp, sizeof(ti)))
return -EFAULT;
ret = check_for_audio_disc(cdi, cdi->ops);
if (ret)
return ret;
return cdi->ops->audio_ioctl(cdi, CDROMPLAYTRKIND, &ti);
}
static int cdrom_ioctl_volctrl(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_volctrl volume;
cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
if (copy_from_user(&volume, argp, sizeof(volume)))
return -EFAULT;
return cdi->ops->audio_ioctl(cdi, CDROMVOLCTRL, &volume);
}
static int cdrom_ioctl_volread(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_volctrl volume;
int ret;
cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
ret = cdi->ops->audio_ioctl(cdi, CDROMVOLREAD, &volume);
if (ret)
return ret;
if (copy_to_user(argp, &volume, sizeof(volume)))
return -EFAULT;
return 0;
}
static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi,
unsigned int cmd)
{
int ret;
cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
ret = check_for_audio_disc(cdi, cdi->ops);
if (ret)
return ret;
return cdi->ops->audio_ioctl(cdi, cmd, NULL);
}
/*
* Just about every imaginable ioctl is supported in the Uniform layer
* these days.
* ATAPI / SCSI specific code now mainly resides in mmc_ioctl().
*/
int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
fmode_t mode, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
int ret;
/*
* Try the generic SCSI command ioctl's first.
*/
ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
if (ret != -ENOTTY)
return ret;
switch (cmd) {
case CDROMMULTISESSION:
return cdrom_ioctl_multisession(cdi, argp);
case CDROMEJECT:
return cdrom_ioctl_eject(cdi);
case CDROMCLOSETRAY:
return cdrom_ioctl_closetray(cdi);
case CDROMEJECT_SW:
return cdrom_ioctl_eject_sw(cdi, arg);
case CDROM_MEDIA_CHANGED:
return cdrom_ioctl_media_changed(cdi, arg);
case CDROM_SET_OPTIONS:
return cdrom_ioctl_set_options(cdi, arg);
case CDROM_CLEAR_OPTIONS:
return cdrom_ioctl_clear_options(cdi, arg);
case CDROM_SELECT_SPEED:
return cdrom_ioctl_select_speed(cdi, arg);
case CDROM_SELECT_DISC:
return cdrom_ioctl_select_disc(cdi, arg);
case CDROMRESET:
return cdrom_ioctl_reset(cdi, bdev);
case CDROM_LOCKDOOR:
return cdrom_ioctl_lock_door(cdi, arg);
case CDROM_DEBUG:
return cdrom_ioctl_debug(cdi, arg);
case CDROM_GET_CAPABILITY:
return cdrom_ioctl_get_capability(cdi);
case CDROM_GET_MCN:
return cdrom_ioctl_get_mcn(cdi, argp);
case CDROM_DRIVE_STATUS:
return cdrom_ioctl_drive_status(cdi, arg);
case CDROM_DISC_STATUS:
return cdrom_ioctl_disc_status(cdi);
case CDROM_CHANGER_NSLOTS:
return cdrom_ioctl_changer_nslots(cdi);
}
/*
* Use the ioctls that are implemented through the generic_packet()
* interface. this may look at bit funny, but if -ENOTTY is
* returned that particular ioctl is not implemented and we
* let it go through the device specific ones.
*/
if (CDROM_CAN(CDC_GENERIC_PACKET)) {
ret = mmc_ioctl(cdi, cmd, arg);
if (ret != -ENOTTY)
return ret;
}
/*
* Note: most of the cdinfo() calls are commented out here,
* because they fill up the sys log when CD players poll
* the drive.
*/
switch (cmd) {
case CDROMSUBCHNL:
return cdrom_ioctl_get_subchnl(cdi, argp);
case CDROMREADTOCHDR:
return cdrom_ioctl_read_tochdr(cdi, argp);
case CDROMREADTOCENTRY:
return cdrom_ioctl_read_tocentry(cdi, argp);
case CDROMPLAYMSF:
return cdrom_ioctl_play_msf(cdi, argp);
case CDROMPLAYTRKIND:
return cdrom_ioctl_play_trkind(cdi, argp);
case CDROMVOLCTRL:
return cdrom_ioctl_volctrl(cdi, argp);
case CDROMVOLREAD:
return cdrom_ioctl_volread(cdi, argp);
case CDROMSTART:
case CDROMSTOP:
case CDROMPAUSE:
case CDROMRESUME:
return cdrom_ioctl_audioctl(cdi, cmd);
}
return -ENOSYS;
}
/*
* Required when we need to use READ_10 to issue other than 2048 block
* reads
*/
static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size)
{
struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
struct modesel_head mh;
memset(&mh, 0, sizeof(mh));
mh.block_desc_length = 0x08;
mh.block_length_med = (size >> 8) & 0xff;
mh.block_length_lo = size & 0xff;
memset(&cgc, 0, sizeof(cgc));
cgc.cmd[0] = 0x15;
cgc.cmd[1] = 1 << 4;
cgc.cmd[4] = 12;
cgc.buflen = sizeof(mh);
cgc.buffer = (char *) &mh;
cgc.data_direction = CGC_DATA_WRITE;
mh.block_desc_length = 0x08;
mh.block_length_med = (size >> 8) & 0xff;
mh.block_length_lo = size & 0xff;
return cdo->generic_packet(cdi, &cgc);
}
static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc,
int cmd)
{
struct request_sense sense;
struct cdrom_msf msf;
int blocksize = 0, format = 0, lba;
int ret;
switch (cmd) {
case CDROMREADRAW:
blocksize = CD_FRAMESIZE_RAW;
break;
case CDROMREADMODE1:
blocksize = CD_FRAMESIZE;
format = 2;
break;
case CDROMREADMODE2:
blocksize = CD_FRAMESIZE_RAW0;
break;
}
IOCTL_IN(arg, struct cdrom_msf, msf);
lba = msf_to_lba(msf.cdmsf_min0, msf.cdmsf_sec0, msf.cdmsf_frame0);
/* FIXME: we need upper bound checking, too!! */
if (lba < 0)
return -EINVAL;
cgc->buffer = kzalloc(blocksize, GFP_KERNEL);
if (cgc->buffer == NULL)
return -ENOMEM;
memset(&sense, 0, sizeof(sense));
cgc->sense = &sense;
cgc->data_direction = CGC_DATA_READ;
ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize);
if (ret && sense.sense_key == 0x05 &&
sense.asc == 0x20 &&
sense.ascq == 0x00) {
/*
* SCSI-II devices are not required to support
* READ_CD, so let's try switching block size
*/
/* FIXME: switch back again... */
ret = cdrom_switch_blocksize(cdi, blocksize);
if (ret)
goto out;
cgc->sense = NULL;
ret = cdrom_read_cd(cdi, cgc, lba, blocksize, 1);
ret |= cdrom_switch_blocksize(cdi, blocksize);
}
if (!ret && copy_to_user(arg, cgc->buffer, blocksize))
ret = -EFAULT;
out:
kfree(cgc->buffer);
return ret;
}
static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi,
void __user *arg)
{
struct cdrom_read_audio ra;
int lba;
IOCTL_IN(arg, struct cdrom_read_audio, ra);
if (ra.addr_format == CDROM_MSF)
lba = msf_to_lba(ra.addr.msf.minute,
ra.addr.msf.second,
ra.addr.msf.frame);
else if (ra.addr_format == CDROM_LBA)
lba = ra.addr.lba;
else
return -EINVAL;
/* FIXME: we need upper bound checking, too!! */
if (lba < 0 || ra.nframes <= 0 || ra.nframes > CD_FRAMES)
return -EINVAL;
return cdrom_read_cdda(cdi, ra.buf, lba, ra.nframes);
}
static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi,
void __user *arg)
{
int ret;
struct cdrom_subchnl q;
u_char requested, back;
IOCTL_IN(arg, struct cdrom_subchnl, q);
requested = q.cdsc_format;
if (!((requested == CDROM_MSF) ||
(requested == CDROM_LBA)))
return -EINVAL;
q.cdsc_format = CDROM_MSF;
ret = cdrom_read_subchannel(cdi, &q, 0);
if (ret)
return ret;
back = q.cdsc_format; /* local copy */
sanitize_format(&q.cdsc_absaddr, &back, requested);
sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
IOCTL_OUT(arg, struct cdrom_subchnl, q);
/* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
return 0;
}
static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc)
{
struct cdrom_device_ops *cdo = cdi->ops;
struct cdrom_msf msf;
cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
IOCTL_IN(arg, struct cdrom_msf, msf);
cgc->cmd[0] = GPCMD_PLAY_AUDIO_MSF;
cgc->cmd[3] = msf.cdmsf_min0;
cgc->cmd[4] = msf.cdmsf_sec0;
cgc->cmd[5] = msf.cdmsf_frame0;
cgc->cmd[6] = msf.cdmsf_min1;
cgc->cmd[7] = msf.cdmsf_sec1;
cgc->cmd[8] = msf.cdmsf_frame1;
cgc->data_direction = CGC_DATA_NONE;
return cdo->generic_packet(cdi, cgc);
}
static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc)
{
struct cdrom_device_ops *cdo = cdi->ops;
struct cdrom_blk blk;
cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
IOCTL_IN(arg, struct cdrom_blk, blk);
cgc->cmd[0] = GPCMD_PLAY_AUDIO_10;
cgc->cmd[2] = (blk.from >> 24) & 0xff;
cgc->cmd[3] = (blk.from >> 16) & 0xff;
cgc->cmd[4] = (blk.from >> 8) & 0xff;
cgc->cmd[5] = blk.from & 0xff;
cgc->cmd[7] = (blk.len >> 8) & 0xff;
cgc->cmd[8] = blk.len & 0xff;
cgc->data_direction = CGC_DATA_NONE;
return cdo->generic_packet(cdi, cgc);
}
static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc,
unsigned int cmd)
{
struct cdrom_volctrl volctrl;
unsigned char buffer[32];
char mask[sizeof(buffer)];
unsigned short offset;
int ret;
cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n");
IOCTL_IN(arg, struct cdrom_volctrl, volctrl);
cgc->buffer = buffer;
cgc->buflen = 24;
ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 0);
if (ret)
return ret;
/* originally the code depended on buffer[1] to determine
how much data is available for transfer. buffer[1] is
unfortunately ambigious and the only reliable way seem
to be to simply skip over the block descriptor... */
offset = 8 + be16_to_cpu(*(__be16 *)(buffer + 6));
if (offset + 16 > sizeof(buffer))
return -E2BIG;
if (offset + 16 > cgc->buflen) {
cgc->buflen = offset + 16;
ret = cdrom_mode_sense(cdi, cgc,
GPMODE_AUDIO_CTL_PAGE, 0);
if (ret)
return ret;
}
/* sanity check */
if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE ||
buffer[offset + 1] < 14)
return -EINVAL;
/* now we have the current volume settings. if it was only
a CDROMVOLREAD, return these values */
if (cmd == CDROMVOLREAD) {
volctrl.channel0 = buffer[offset+9];
volctrl.channel1 = buffer[offset+11];
volctrl.channel2 = buffer[offset+13];
volctrl.channel3 = buffer[offset+15];
IOCTL_OUT(arg, struct cdrom_volctrl, volctrl);
return 0;
}
/* get the volume mask */
cgc->buffer = mask;
ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 1);
if (ret)
return ret;
buffer[offset + 9] = volctrl.channel0 & mask[offset + 9];
buffer[offset + 11] = volctrl.channel1 & mask[offset + 11];
buffer[offset + 13] = volctrl.channel2 & mask[offset + 13];
buffer[offset + 15] = volctrl.channel3 & mask[offset + 15];
/* set volume */
cgc->buffer = buffer + offset - 8;
memset(cgc->buffer, 0, 8);
return cdrom_mode_select(cdi, cgc);
}
static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int cmd)
{
struct cdrom_device_ops *cdo = cdi->ops;
cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n");
cgc->cmd[0] = GPCMD_START_STOP_UNIT;
cgc->cmd[1] = 1;
cgc->cmd[4] = (cmd == CDROMSTART) ? 1 : 0;
cgc->data_direction = CGC_DATA_NONE;
return cdo->generic_packet(cdi, cgc);
}
static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int cmd)
{
struct cdrom_device_ops *cdo = cdi->ops;
cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n");
cgc->cmd[0] = GPCMD_PAUSE_RESUME;
cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0;
cgc->data_direction = CGC_DATA_NONE;
return cdo->generic_packet(cdi, cgc);
}
static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc)
{
int ret;
dvd_struct *s;
int size = sizeof(dvd_struct);
if (!CDROM_CAN(CDC_DVD))
return -ENOSYS;
s = kmalloc(size, GFP_KERNEL);
if (!s)
return -ENOMEM;
cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n");
if (copy_from_user(s, arg, size)) {
kfree(s);
return -EFAULT;
}
ret = dvd_read_struct(cdi, s, cgc);
if (ret)
goto out;
if (copy_to_user(arg, s, size))
ret = -EFAULT;
out:
kfree(s);
return ret;
}
static noinline int mmc_ioctl_dvd_auth(struct cdrom_device_info *cdi,
void __user *arg)
{
int ret;
dvd_authinfo ai;
if (!CDROM_CAN(CDC_DVD))
return -ENOSYS;
cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n");
IOCTL_IN(arg, dvd_authinfo, ai);
ret = dvd_do_auth(cdi, &ai);
if (ret)
return ret;
IOCTL_OUT(arg, dvd_authinfo, ai);
return 0;
}
static noinline int mmc_ioctl_cdrom_next_writable(struct cdrom_device_info *cdi,
void __user *arg)
{
int ret;
long next = 0;
cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n");
ret = cdrom_get_next_writable(cdi, &next);
if (ret)
return ret;
IOCTL_OUT(arg, long, next);
return 0;
}
static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi,
void __user *arg)
{
int ret;
long last = 0;
cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n");
ret = cdrom_get_last_written(cdi, &last);
if (ret)
return ret;
IOCTL_OUT(arg, long, last);
return 0;
}
static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
unsigned long arg)
{
struct packet_command cgc;
void __user *userptr = (void __user *)arg;
memset(&cgc, 0, sizeof(cgc));
/* build a unified command and queue it through
cdo->generic_packet() */
switch (cmd) {
case CDROMREADRAW:
case CDROMREADMODE1:
case CDROMREADMODE2:
return mmc_ioctl_cdrom_read_data(cdi, userptr, &cgc, cmd);
case CDROMREADAUDIO:
return mmc_ioctl_cdrom_read_audio(cdi, userptr);
case CDROMSUBCHNL:
return mmc_ioctl_cdrom_subchannel(cdi, userptr);
case CDROMPLAYMSF:
return mmc_ioctl_cdrom_play_msf(cdi, userptr, &cgc);
case CDROMPLAYBLK:
return mmc_ioctl_cdrom_play_blk(cdi, userptr, &cgc);
case CDROMVOLCTRL:
case CDROMVOLREAD:
return mmc_ioctl_cdrom_volume(cdi, userptr, &cgc, cmd);
case CDROMSTART:
case CDROMSTOP:
return mmc_ioctl_cdrom_start_stop(cdi, &cgc, cmd);
case CDROMPAUSE:
case CDROMRESUME:
return mmc_ioctl_cdrom_pause_resume(cdi, &cgc, cmd);
case DVD_READ_STRUCT:
return mmc_ioctl_dvd_read_struct(cdi, userptr, &cgc);
case DVD_AUTH:
return mmc_ioctl_dvd_auth(cdi, userptr);
case CDROM_NEXT_WRITABLE:
return mmc_ioctl_cdrom_next_writable(cdi, userptr);
case CDROM_LAST_WRITTEN:
return mmc_ioctl_cdrom_last_written(cdi, userptr);
}
return -ENOTTY;
}
static int cdrom_get_track_info(struct cdrom_device_info *cdi, __u16 track, __u8 type,
track_information *ti)
{
struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
int ret, buflen;
init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
cgc.cmd[1] = type & 3;
cgc.cmd[4] = (track & 0xff00) >> 8;
cgc.cmd[5] = track & 0xff;
cgc.cmd[8] = 8;
cgc.quiet = 1;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
buflen = be16_to_cpu(ti->track_information_length) +
sizeof(ti->track_information_length);
if (buflen > sizeof(track_information))
buflen = sizeof(track_information);
cgc.cmd[8] = cgc.buflen = buflen;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
/* return actual fill size */
return buflen;
}
/* requires CD R/RW */
static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di)
{
struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
int ret, buflen;
/* set up command and get the disc info */
init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_DISC_INFO;
cgc.cmd[8] = cgc.buflen = 2;
cgc.quiet = 1;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
/* not all drives have the same disc_info length, so requeue
* packet with the length the drive tells us it can supply
*/
buflen = be16_to_cpu(di->disc_information_length) +
sizeof(di->disc_information_length);
if (buflen > sizeof(disc_information))
buflen = sizeof(disc_information);
cgc.cmd[8] = cgc.buflen = buflen;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
/* return actual fill size */
return buflen;
}
/* return the last written block on the CD-R media. this is for the udf
file system. */
int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written)
{
struct cdrom_tocentry toc;
disc_information di;
track_information ti;
__u32 last_track;
int ret = -1, ti_size;
if (!CDROM_CAN(CDC_GENERIC_PACKET))
goto use_toc;
ret = cdrom_get_disc_info(cdi, &di);
if (ret < (int)(offsetof(typeof(di), last_track_lsb)
+ sizeof(di.last_track_lsb)))
goto use_toc;
/* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */
last_track = (di.last_track_msb << 8) | di.last_track_lsb;
ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
if (ti_size < (int)offsetof(typeof(ti), track_start))
goto use_toc;
/* if this track is blank, try the previous. */
if (ti.blank) {
if (last_track==1)
goto use_toc;
last_track--;
ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
}
if (ti_size < (int)(offsetof(typeof(ti), track_size)
+ sizeof(ti.track_size)))
goto use_toc;
/* if last recorded field is valid, return it. */
if (ti.lra_v && ti_size >= (int)(offsetof(typeof(ti), last_rec_address)
+ sizeof(ti.last_rec_address))) {
*last_written = be32_to_cpu(ti.last_rec_address);
} else {
/* make it up instead */
*last_written = be32_to_cpu(ti.track_start) +
be32_to_cpu(ti.track_size);
if (ti.free_blocks)
*last_written -= (be32_to_cpu(ti.free_blocks) + 7);
}
return 0;
/* this is where we end up if the drive either can't do a
GPCMD_READ_DISC_INFO or GPCMD_READ_TRACK_RZONE_INFO or if
it doesn't give enough information or fails. then we return
the toc contents. */
use_toc:
toc.cdte_format = CDROM_MSF;
toc.cdte_track = CDROM_LEADOUT;
if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc)))
return ret;
sanitize_format(&toc.cdte_addr, &toc.cdte_format, CDROM_LBA);
*last_written = toc.cdte_addr.lba;
return 0;
}
/* return the next writable block. also for udf file system. */
static int cdrom_get_next_writable(struct cdrom_device_info *cdi, long *next_writable)
{
disc_information di;
track_information ti;
__u16 last_track;
int ret, ti_size;
if (!CDROM_CAN(CDC_GENERIC_PACKET))
goto use_last_written;
ret = cdrom_get_disc_info(cdi, &di);
if (ret < 0 || ret < offsetof(typeof(di), last_track_lsb)
+ sizeof(di.last_track_lsb))
goto use_last_written;
/* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */
last_track = (di.last_track_msb << 8) | di.last_track_lsb;
ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
if (ti_size < 0 || ti_size < offsetof(typeof(ti), track_start))
goto use_last_written;
/* if this track is blank, try the previous. */
if (ti.blank) {
if (last_track == 1)
goto use_last_written;
last_track--;
ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
if (ti_size < 0)
goto use_last_written;
}
/* if next recordable address field is valid, use it. */
if (ti.nwa_v && ti_size >= offsetof(typeof(ti), next_writable)
+ sizeof(ti.next_writable)) {
*next_writable = be32_to_cpu(ti.next_writable);
return 0;
}
use_last_written:
if ((ret = cdrom_get_last_written(cdi, next_writable))) {
*next_writable = 0;
return ret;
} else {
*next_writable += 7;
return 0;
}
}
EXPORT_SYMBOL(cdrom_get_last_written);
EXPORT_SYMBOL(register_cdrom);
EXPORT_SYMBOL(unregister_cdrom);
EXPORT_SYMBOL(cdrom_open);
EXPORT_SYMBOL(cdrom_release);
EXPORT_SYMBOL(cdrom_ioctl);
EXPORT_SYMBOL(cdrom_media_changed);
EXPORT_SYMBOL(cdrom_number_of_slots);
EXPORT_SYMBOL(cdrom_mode_select);
EXPORT_SYMBOL(cdrom_mode_sense);
EXPORT_SYMBOL(init_cdrom_command);
EXPORT_SYMBOL(cdrom_get_media_event);
#ifdef CONFIG_SYSCTL
#define CDROM_STR_SIZE 1000
static struct cdrom_sysctl_settings {
char info[CDROM_STR_SIZE]; /* general info */
int autoclose; /* close tray upon mount, etc */
int autoeject; /* eject on umount */
int debug; /* turn on debugging messages */
int lock; /* lock the door on device open */
int check; /* check media type */
} cdrom_sysctl_settings;
enum cdrom_print_option {
CTL_NAME,
CTL_SPEED,
CTL_SLOTS,
CTL_CAPABILITY
};
static int cdrom_print_info(const char *header, int val, char *info,
int *pos, enum cdrom_print_option option)
{
const int max_size = sizeof(cdrom_sysctl_settings.info);
struct cdrom_device_info *cdi;
int ret;
ret = scnprintf(info + *pos, max_size - *pos, header);
if (!ret)
return 1;
*pos += ret;
list_for_each_entry(cdi, &cdrom_list, list) {
switch (option) {
case CTL_NAME:
ret = scnprintf(info + *pos, max_size - *pos,
"\t%s", cdi->name);
break;
case CTL_SPEED:
ret = scnprintf(info + *pos, max_size - *pos,
"\t%d", cdi->speed);
break;
case CTL_SLOTS:
ret = scnprintf(info + *pos, max_size - *pos,
"\t%d", cdi->capacity);
break;
case CTL_CAPABILITY:
ret = scnprintf(info + *pos, max_size - *pos,
"\t%d", CDROM_CAN(val) != 0);
break;
default:
pr_info("invalid option%d\n", option);
return 1;
}
if (!ret)
return 1;
*pos += ret;
}
return 0;
}
static int cdrom_sysctl_info(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int pos;
char *info = cdrom_sysctl_settings.info;
const int max_size = sizeof(cdrom_sysctl_settings.info);
if (!*lenp || (*ppos && !write)) {
*lenp = 0;
return 0;
}
mutex_lock(&cdrom_mutex);
pos = sprintf(info, "CD-ROM information, " VERSION "\n");
if (cdrom_print_info("\ndrive name:\t", 0, info, &pos, CTL_NAME))
goto done;
if (cdrom_print_info("\ndrive speed:\t", 0, info, &pos, CTL_SPEED))
goto done;
if (cdrom_print_info("\ndrive # of slots:", 0, info, &pos, CTL_SLOTS))
goto done;
if (cdrom_print_info("\nCan close tray:\t",
CDC_CLOSE_TRAY, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan open tray:\t",
CDC_OPEN_TRAY, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan lock tray:\t",
CDC_LOCK, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan change speed:",
CDC_SELECT_SPEED, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan select disk:",
CDC_SELECT_DISC, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan read multisession:",
CDC_MULTI_SESSION, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan read MCN:\t",
CDC_MCN, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nReports media changed:",
CDC_MEDIA_CHANGED, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan play audio:\t",
CDC_PLAY_AUDIO, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan write CD-R:\t",
CDC_CD_R, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan write CD-RW:",
CDC_CD_RW, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan read DVD:\t",
CDC_DVD, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan write DVD-R:",
CDC_DVD_R, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan write DVD-RAM:",
CDC_DVD_RAM, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan read MRW:\t",
CDC_MRW, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan write MRW:\t",
CDC_MRW_W, info, &pos, CTL_CAPABILITY))
goto done;
if (cdrom_print_info("\nCan write RAM:\t",
CDC_RAM, info, &pos, CTL_CAPABILITY))
goto done;
if (!scnprintf(info + pos, max_size - pos, "\n\n"))
goto done;
doit:
mutex_unlock(&cdrom_mutex);
return proc_dostring(ctl, write, buffer, lenp, ppos);
done:
pr_info("info buffer too small\n");
goto doit;
}
/* Unfortunately, per device settings are not implemented through
procfs/sysctl yet. When they are, this will naturally disappear. For now
just update all drives. Later this will become the template on which
new registered drives will be based. */
static void cdrom_update_settings(void)
{
struct cdrom_device_info *cdi;
mutex_lock(&cdrom_mutex);
list_for_each_entry(cdi, &cdrom_list, list) {
if (autoclose && CDROM_CAN(CDC_CLOSE_TRAY))
cdi->options |= CDO_AUTO_CLOSE;
else if (!autoclose)
cdi->options &= ~CDO_AUTO_CLOSE;
if (autoeject && CDROM_CAN(CDC_OPEN_TRAY))
cdi->options |= CDO_AUTO_EJECT;
else if (!autoeject)
cdi->options &= ~CDO_AUTO_EJECT;
if (lockdoor && CDROM_CAN(CDC_LOCK))
cdi->options |= CDO_LOCK;
else if (!lockdoor)
cdi->options &= ~CDO_LOCK;
if (check_media_type)
cdi->options |= CDO_CHECK_TYPE;
else
cdi->options &= ~CDO_CHECK_TYPE;
}
mutex_unlock(&cdrom_mutex);
}
static int cdrom_sysctl_handler(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write) {
/* we only care for 1 or 0. */
autoclose = !!cdrom_sysctl_settings.autoclose;
autoeject = !!cdrom_sysctl_settings.autoeject;
debug = !!cdrom_sysctl_settings.debug;
lockdoor = !!cdrom_sysctl_settings.lock;
check_media_type = !!cdrom_sysctl_settings.check;
/* update the option flags according to the changes. we
don't have per device options through sysctl yet,
but we will have and then this will disappear. */
cdrom_update_settings();
}
return ret;
}
/* Place files in /proc/sys/dev/cdrom */
static ctl_table cdrom_table[] = {
{
.procname = "info",
.data = &cdrom_sysctl_settings.info,
.maxlen = CDROM_STR_SIZE,
.mode = 0444,
.proc_handler = cdrom_sysctl_info,
},
{
.procname = "autoclose",
.data = &cdrom_sysctl_settings.autoclose,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = cdrom_sysctl_handler,
},
{
.procname = "autoeject",
.data = &cdrom_sysctl_settings.autoeject,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = cdrom_sysctl_handler,
},
{
.procname = "debug",
.data = &cdrom_sysctl_settings.debug,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = cdrom_sysctl_handler,
},
{
.procname = "lock",
.data = &cdrom_sysctl_settings.lock,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = cdrom_sysctl_handler,
},
{
.procname = "check_media",
.data = &cdrom_sysctl_settings.check,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = cdrom_sysctl_handler
},
{ }
};
static ctl_table cdrom_cdrom_table[] = {
{
.procname = "cdrom",
.maxlen = 0,
.mode = 0555,
.child = cdrom_table,
},
{ }
};
/* Make sure that /proc/sys/dev is there */
static ctl_table cdrom_root_table[] = {
{
.procname = "dev",
.maxlen = 0,
.mode = 0555,
.child = cdrom_cdrom_table,
},
{ }
};
static struct ctl_table_header *cdrom_sysctl_header;
static void cdrom_sysctl_register(void)
{
static int initialized;
if (initialized == 1)
return;
cdrom_sysctl_header = register_sysctl_table(cdrom_root_table);
/* set the defaults */
cdrom_sysctl_settings.autoclose = autoclose;
cdrom_sysctl_settings.autoeject = autoeject;
cdrom_sysctl_settings.debug = debug;
cdrom_sysctl_settings.lock = lockdoor;
cdrom_sysctl_settings.check = check_media_type;
initialized = 1;
}
static void cdrom_sysctl_unregister(void)
{
if (cdrom_sysctl_header)
unregister_sysctl_table(cdrom_sysctl_header);
}
#else /* CONFIG_SYSCTL */
static void cdrom_sysctl_register(void)
{
}
static void cdrom_sysctl_unregister(void)
{
}
#endif /* CONFIG_SYSCTL */
static int __init cdrom_init(void)
{
cdrom_sysctl_register();
return 0;
}
static void __exit cdrom_exit(void)
{
pr_info("Uniform CD-ROM driver unloaded\n");
cdrom_sysctl_unregister();
}
module_init(cdrom_init);
module_exit(cdrom_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
McBane87/Sony_Tablet_Z_LP.454_Kernel | drivers/tty/vt/vc_screen.c | 3495 | 14742 | /*
* Provide access to virtual console memory.
* /dev/vcs0: the screen as it is being viewed right now (possibly scrolled)
* /dev/vcsN: the screen of /dev/ttyN (1 <= N <= 63)
* [minor: N]
*
* /dev/vcsaN: idem, but including attributes, and prefixed with
* the 4 bytes lines,columns,x,y (as screendump used to give).
* Attribute/character pair is in native endianity.
* [minor: N+128]
*
* This replaces screendump and part of selection, so that the system
* administrator can control access using file system permissions.
*
* aeb@cwi.nl - efter Friedas begravelse - 950211
*
* machek@k332.feld.cvut.cz - modified not to send characters to wrong console
* - fixed some fatal off-by-one bugs (0-- no longer == -1 -> looping and looping and looping...)
* - making it shorter - scr_readw are macros which expand in PRETTY long code
*/
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/tty.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/vt_kern.h>
#include <linux/selection.h>
#include <linux/kbd_kern.h>
#include <linux/console.h>
#include <linux/device.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#undef attr
#undef org
#undef addr
#define HEADER_SIZE 4
#define CON_BUF_SIZE (CONFIG_BASE_SMALL ? 256 : PAGE_SIZE)
struct vcs_poll_data {
struct notifier_block notifier;
unsigned int cons_num;
bool seen_last_update;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
};
static int
vcs_notifier(struct notifier_block *nb, unsigned long code, void *_param)
{
struct vt_notifier_param *param = _param;
struct vc_data *vc = param->vc;
struct vcs_poll_data *poll =
container_of(nb, struct vcs_poll_data, notifier);
int currcons = poll->cons_num;
if (code != VT_UPDATE)
return NOTIFY_DONE;
if (currcons == 0)
currcons = fg_console;
else
currcons--;
if (currcons != vc->vc_num)
return NOTIFY_DONE;
poll->seen_last_update = false;
wake_up_interruptible(&poll->waitq);
kill_fasync(&poll->fasync, SIGIO, POLL_IN);
return NOTIFY_OK;
}
static void
vcs_poll_data_free(struct vcs_poll_data *poll)
{
unregister_vt_notifier(&poll->notifier);
kfree(poll);
}
static struct vcs_poll_data *
vcs_poll_data_get(struct file *file)
{
struct vcs_poll_data *poll = file->private_data;
if (poll)
return poll;
poll = kzalloc(sizeof(*poll), GFP_KERNEL);
if (!poll)
return NULL;
poll->cons_num = iminor(file->f_path.dentry->d_inode) & 127;
init_waitqueue_head(&poll->waitq);
poll->notifier.notifier_call = vcs_notifier;
if (register_vt_notifier(&poll->notifier) != 0) {
kfree(poll);
return NULL;
}
/*
* This code may be called either through ->poll() or ->fasync().
* If we have two threads using the same file descriptor, they could
* both enter this function, both notice that the structure hasn't
* been allocated yet and go ahead allocating it in parallel, but
* only one of them must survive and be shared otherwise we'd leak
* memory with a dangling notifier callback.
*/
spin_lock(&file->f_lock);
if (!file->private_data) {
file->private_data = poll;
} else {
/* someone else raced ahead of us */
vcs_poll_data_free(poll);
poll = file->private_data;
}
spin_unlock(&file->f_lock);
return poll;
}
/*
* Returns VC for inode.
* Must be called with console_lock.
*/
static struct vc_data*
vcs_vc(struct inode *inode, int *viewed)
{
unsigned int currcons = iminor(inode) & 127;
WARN_CONSOLE_UNLOCKED();
if (currcons == 0) {
currcons = fg_console;
if (viewed)
*viewed = 1;
} else {
currcons--;
if (viewed)
*viewed = 0;
}
return vc_cons[currcons].d;
}
/*
* Returns size for VC carried by inode.
* Must be called with console_lock.
*/
static int
vcs_size(struct inode *inode)
{
int size;
int minor = iminor(inode);
struct vc_data *vc;
WARN_CONSOLE_UNLOCKED();
vc = vcs_vc(inode, NULL);
if (!vc)
return -ENXIO;
size = vc->vc_rows * vc->vc_cols;
if (minor & 128)
size = 2*size + HEADER_SIZE;
return size;
}
static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
{
int size;
console_lock();
size = vcs_size(file->f_path.dentry->d_inode);
console_unlock();
if (size < 0)
return size;
switch (orig) {
default:
return -EINVAL;
case 2:
offset += size;
break;
case 1:
offset += file->f_pos;
case 0:
break;
}
if (offset < 0 || offset > size) {
return -EINVAL;
}
file->f_pos = offset;
return file->f_pos;
}
static ssize_t
vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct inode *inode = file->f_path.dentry->d_inode;
unsigned int currcons = iminor(inode);
struct vc_data *vc;
struct vcs_poll_data *poll;
long pos;
long attr, read;
int col, maxcol, viewed;
unsigned short *org = NULL;
ssize_t ret;
char *con_buf;
con_buf = (char *) __get_free_page(GFP_KERNEL);
if (!con_buf)
return -ENOMEM;
pos = *ppos;
/* Select the proper current console and verify
* sanity of the situation under the console lock.
*/
console_lock();
attr = (currcons & 128);
ret = -ENXIO;
vc = vcs_vc(inode, &viewed);
if (!vc)
goto unlock_out;
ret = -EINVAL;
if (pos < 0)
goto unlock_out;
poll = file->private_data;
if (count && poll)
poll->seen_last_update = true;
read = 0;
ret = 0;
while (count) {
char *con_buf0, *con_buf_start;
long this_round, size;
ssize_t orig_count;
long p = pos;
/* Check whether we are above size each round,
* as copy_to_user at the end of this loop
* could sleep.
*/
size = vcs_size(inode);
if (size < 0) {
if (read)
break;
ret = size;
goto unlock_out;
}
if (pos >= size)
break;
if (count > size - pos)
count = size - pos;
this_round = count;
if (this_round > CON_BUF_SIZE)
this_round = CON_BUF_SIZE;
/* Perform the whole read into the local con_buf.
* Then we can drop the console spinlock and safely
* attempt to move it to userspace.
*/
con_buf_start = con_buf0 = con_buf;
orig_count = this_round;
maxcol = vc->vc_cols;
if (!attr) {
org = screen_pos(vc, p, viewed);
col = p % maxcol;
p += maxcol - col;
while (this_round-- > 0) {
*con_buf0++ = (vcs_scr_readw(vc, org++) & 0xff);
if (++col == maxcol) {
org = screen_pos(vc, p, viewed);
col = 0;
p += maxcol;
}
}
} else {
if (p < HEADER_SIZE) {
size_t tmp_count;
con_buf0[0] = (char)vc->vc_rows;
con_buf0[1] = (char)vc->vc_cols;
getconsxy(vc, con_buf0 + 2);
con_buf_start += p;
this_round += p;
if (this_round > CON_BUF_SIZE) {
this_round = CON_BUF_SIZE;
orig_count = this_round - p;
}
tmp_count = HEADER_SIZE;
if (tmp_count > this_round)
tmp_count = this_round;
/* Advance state pointers and move on. */
this_round -= tmp_count;
p = HEADER_SIZE;
con_buf0 = con_buf + HEADER_SIZE;
/* If this_round >= 0, then p is even... */
} else if (p & 1) {
/* Skip first byte for output if start address is odd
* Update region sizes up/down depending on free
* space in buffer.
*/
con_buf_start++;
if (this_round < CON_BUF_SIZE)
this_round++;
else
orig_count--;
}
if (this_round > 0) {
unsigned short *tmp_buf = (unsigned short *)con_buf0;
p -= HEADER_SIZE;
p /= 2;
col = p % maxcol;
org = screen_pos(vc, p, viewed);
p += maxcol - col;
/* Buffer has even length, so we can always copy
* character + attribute. We do not copy last byte
* to userspace if this_round is odd.
*/
this_round = (this_round + 1) >> 1;
while (this_round) {
*tmp_buf++ = vcs_scr_readw(vc, org++);
this_round --;
if (++col == maxcol) {
org = screen_pos(vc, p, viewed);
col = 0;
p += maxcol;
}
}
}
}
/* Finally, release the console semaphore while we push
* all the data to userspace from our temporary buffer.
*
* AKPM: Even though it's a semaphore, we should drop it because
* the pagefault handling code may want to call printk().
*/
console_unlock();
ret = copy_to_user(buf, con_buf_start, orig_count);
console_lock();
if (ret) {
read += (orig_count - ret);
ret = -EFAULT;
break;
}
buf += orig_count;
pos += orig_count;
read += orig_count;
count -= orig_count;
}
*ppos += read;
if (read)
ret = read;
unlock_out:
console_unlock();
free_page((unsigned long) con_buf);
return ret;
}
static ssize_t
vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct inode *inode = file->f_path.dentry->d_inode;
unsigned int currcons = iminor(inode);
struct vc_data *vc;
long pos;
long attr, size, written;
char *con_buf0;
int col, maxcol, viewed;
u16 *org0 = NULL, *org = NULL;
size_t ret;
char *con_buf;
con_buf = (char *) __get_free_page(GFP_KERNEL);
if (!con_buf)
return -ENOMEM;
pos = *ppos;
/* Select the proper current console and verify
* sanity of the situation under the console lock.
*/
console_lock();
attr = (currcons & 128);
ret = -ENXIO;
vc = vcs_vc(inode, &viewed);
if (!vc)
goto unlock_out;
size = vcs_size(inode);
ret = -EINVAL;
if (pos < 0 || pos > size)
goto unlock_out;
if (count > size - pos)
count = size - pos;
written = 0;
while (count) {
long this_round = count;
size_t orig_count;
long p;
if (this_round > CON_BUF_SIZE)
this_round = CON_BUF_SIZE;
/* Temporarily drop the console lock so that we can read
* in the write data from userspace safely.
*/
console_unlock();
ret = copy_from_user(con_buf, buf, this_round);
console_lock();
if (ret) {
this_round -= ret;
if (!this_round) {
/* Abort loop if no data were copied. Otherwise
* fail with -EFAULT.
*/
if (written)
break;
ret = -EFAULT;
goto unlock_out;
}
}
/* The vcs_size might have changed while we slept to grab
* the user buffer, so recheck.
* Return data written up to now on failure.
*/
size = vcs_size(inode);
if (size < 0) {
if (written)
break;
ret = size;
goto unlock_out;
}
if (pos >= size)
break;
if (this_round > size - pos)
this_round = size - pos;
/* OK, now actually push the write to the console
* under the lock using the local kernel buffer.
*/
con_buf0 = con_buf;
orig_count = this_round;
maxcol = vc->vc_cols;
p = pos;
if (!attr) {
org0 = org = screen_pos(vc, p, viewed);
col = p % maxcol;
p += maxcol - col;
while (this_round > 0) {
unsigned char c = *con_buf0++;
this_round--;
vcs_scr_writew(vc,
(vcs_scr_readw(vc, org) & 0xff00) | c, org);
org++;
if (++col == maxcol) {
org = screen_pos(vc, p, viewed);
col = 0;
p += maxcol;
}
}
} else {
if (p < HEADER_SIZE) {
char header[HEADER_SIZE];
getconsxy(vc, header + 2);
while (p < HEADER_SIZE && this_round > 0) {
this_round--;
header[p++] = *con_buf0++;
}
if (!viewed)
putconsxy(vc, header + 2);
}
p -= HEADER_SIZE;
col = (p/2) % maxcol;
if (this_round > 0) {
org0 = org = screen_pos(vc, p/2, viewed);
if ((p & 1) && this_round > 0) {
char c;
this_round--;
c = *con_buf0++;
#ifdef __BIG_ENDIAN
vcs_scr_writew(vc, c |
(vcs_scr_readw(vc, org) & 0xff00), org);
#else
vcs_scr_writew(vc, (c << 8) |
(vcs_scr_readw(vc, org) & 0xff), org);
#endif
org++;
p++;
if (++col == maxcol) {
org = screen_pos(vc, p/2, viewed);
col = 0;
}
}
p /= 2;
p += maxcol - col;
}
while (this_round > 1) {
unsigned short w;
w = get_unaligned(((unsigned short *)con_buf0));
vcs_scr_writew(vc, w, org++);
con_buf0 += 2;
this_round -= 2;
if (++col == maxcol) {
org = screen_pos(vc, p, viewed);
col = 0;
p += maxcol;
}
}
if (this_round > 0) {
unsigned char c;
c = *con_buf0++;
#ifdef __BIG_ENDIAN
vcs_scr_writew(vc, (vcs_scr_readw(vc, org) & 0xff) | (c << 8), org);
#else
vcs_scr_writew(vc, (vcs_scr_readw(vc, org) & 0xff00) | c, org);
#endif
}
}
count -= orig_count;
written += orig_count;
buf += orig_count;
pos += orig_count;
if (org0)
update_region(vc, (unsigned long)(org0), org - org0);
}
*ppos += written;
ret = written;
if (written)
vcs_scr_updated(vc);
unlock_out:
console_unlock();
free_page((unsigned long) con_buf);
return ret;
}
static unsigned int
vcs_poll(struct file *file, poll_table *wait)
{
struct vcs_poll_data *poll = vcs_poll_data_get(file);
int ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
if (poll) {
poll_wait(file, &poll->waitq, wait);
if (poll->seen_last_update)
ret = DEFAULT_POLLMASK;
}
return ret;
}
static int
vcs_fasync(int fd, struct file *file, int on)
{
struct vcs_poll_data *poll = file->private_data;
if (!poll) {
/* don't allocate anything if all we want is disable fasync */
if (!on)
return 0;
poll = vcs_poll_data_get(file);
if (!poll)
return -ENOMEM;
}
return fasync_helper(fd, file, on, &poll->fasync);
}
static int
vcs_open(struct inode *inode, struct file *filp)
{
unsigned int currcons = iminor(inode) & 127;
int ret = 0;
console_lock();
if(currcons && !vc_cons_allocated(currcons-1))
ret = -ENXIO;
console_unlock();
return ret;
}
static int vcs_release(struct inode *inode, struct file *file)
{
struct vcs_poll_data *poll = file->private_data;
if (poll)
vcs_poll_data_free(poll);
return 0;
}
static const struct file_operations vcs_fops = {
.llseek = vcs_lseek,
.read = vcs_read,
.write = vcs_write,
.poll = vcs_poll,
.fasync = vcs_fasync,
.open = vcs_open,
.release = vcs_release,
};
static struct class *vc_class;
void vcs_make_sysfs(int index)
{
device_create(vc_class, NULL, MKDEV(VCS_MAJOR, index + 1), NULL,
"vcs%u", index + 1);
device_create(vc_class, NULL, MKDEV(VCS_MAJOR, index + 129), NULL,
"vcsa%u", index + 1);
}
void vcs_remove_sysfs(int index)
{
device_destroy(vc_class, MKDEV(VCS_MAJOR, index + 1));
device_destroy(vc_class, MKDEV(VCS_MAJOR, index + 129));
}
int __init vcs_init(void)
{
unsigned int i;
if (register_chrdev(VCS_MAJOR, "vcs", &vcs_fops))
panic("unable to get major %d for vcs device", VCS_MAJOR);
vc_class = class_create(THIS_MODULE, "vc");
device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 0), NULL, "vcs");
device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 128), NULL, "vcsa");
for (i = 0; i < MIN_NR_CONSOLES; i++)
vcs_make_sysfs(i);
return 0;
}
| gpl-2.0 |
SlimRoms/kernel_samsung_tuna | arch/sh/kernel/cpu/sh2a/fpu.c | 4007 | 13875 | /*
* Save/restore floating point context for signal handlers.
*
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* FIXME! These routines can be optimized in big endian case.
*/
#include <linux/sched.h>
#include <linux/signal.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/fpu.h>
/* The PR (precision) bit in the FP Status Register must be clear when
* an frchg instruction is executed, otherwise the instruction is undefined.
* Executing frchg with PR set causes a trap on some SH4 implementations.
*/
#define FPSCR_RCHG 0x00000000
/*
* Save FPU registers onto task structure.
*/
void save_fpu(struct task_struct *tsk)
{
unsigned long dummy;
enable_fpu();
asm volatile("sts.l fpul, @-%0\n\t"
"sts.l fpscr, @-%0\n\t"
"fmov.s fr15, @-%0\n\t"
"fmov.s fr14, @-%0\n\t"
"fmov.s fr13, @-%0\n\t"
"fmov.s fr12, @-%0\n\t"
"fmov.s fr11, @-%0\n\t"
"fmov.s fr10, @-%0\n\t"
"fmov.s fr9, @-%0\n\t"
"fmov.s fr8, @-%0\n\t"
"fmov.s fr7, @-%0\n\t"
"fmov.s fr6, @-%0\n\t"
"fmov.s fr5, @-%0\n\t"
"fmov.s fr4, @-%0\n\t"
"fmov.s fr3, @-%0\n\t"
"fmov.s fr2, @-%0\n\t"
"fmov.s fr1, @-%0\n\t"
"fmov.s fr0, @-%0\n\t"
"lds %3, fpscr\n\t"
: "=r" (dummy)
: "0" ((char *)(&tsk->thread.xstate->hardfpu.status)),
"r" (FPSCR_RCHG),
"r" (FPSCR_INIT)
: "memory");
disable_fpu();
}
void restore_fpu(struct task_struct *tsk)
{
unsigned long dummy;
enable_fpu();
asm volatile("fmov.s @%0+, fr0\n\t"
"fmov.s @%0+, fr1\n\t"
"fmov.s @%0+, fr2\n\t"
"fmov.s @%0+, fr3\n\t"
"fmov.s @%0+, fr4\n\t"
"fmov.s @%0+, fr5\n\t"
"fmov.s @%0+, fr6\n\t"
"fmov.s @%0+, fr7\n\t"
"fmov.s @%0+, fr8\n\t"
"fmov.s @%0+, fr9\n\t"
"fmov.s @%0+, fr10\n\t"
"fmov.s @%0+, fr11\n\t"
"fmov.s @%0+, fr12\n\t"
"fmov.s @%0+, fr13\n\t"
"fmov.s @%0+, fr14\n\t"
"fmov.s @%0+, fr15\n\t"
"lds.l @%0+, fpscr\n\t"
"lds.l @%0+, fpul\n\t"
: "=r" (dummy)
: "0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
: "memory");
disable_fpu();
}
/*
* Emulate arithmetic ops on denormalized number for some FPU insns.
*/
/* denormalized float * float */
static int denormal_mulf(int hx, int hy)
{
unsigned int ix, iy;
unsigned long long m, n;
int exp, w;
ix = hx & 0x7fffffff;
iy = hy & 0x7fffffff;
if (iy < 0x00800000 || ix == 0)
return ((hx ^ hy) & 0x80000000);
exp = (iy & 0x7f800000) >> 23;
ix &= 0x007fffff;
iy = (iy & 0x007fffff) | 0x00800000;
m = (unsigned long long)ix * iy;
n = m;
w = -1;
while (n) { n >>= 1; w++; }
/* FIXME: use guard bits */
exp += w - 126 - 46;
if (exp > 0)
ix = ((int) (m >> (w - 23)) & 0x007fffff) | (exp << 23);
else if (exp + 22 >= 0)
ix = (int) (m >> (w - 22 - exp)) & 0x007fffff;
else
ix = 0;
ix |= (hx ^ hy) & 0x80000000;
return ix;
}
/* denormalized double * double */
static void mult64(unsigned long long x, unsigned long long y,
unsigned long long *highp, unsigned long long *lowp)
{
unsigned long long sub0, sub1, sub2, sub3;
unsigned long long high, low;
sub0 = (x >> 32) * (unsigned long) (y >> 32);
sub1 = (x & 0xffffffffLL) * (unsigned long) (y >> 32);
sub2 = (x >> 32) * (unsigned long) (y & 0xffffffffLL);
sub3 = (x & 0xffffffffLL) * (unsigned long) (y & 0xffffffffLL);
low = sub3;
high = 0LL;
sub3 += (sub1 << 32);
if (low > sub3)
high++;
low = sub3;
sub3 += (sub2 << 32);
if (low > sub3)
high++;
low = sub3;
high += (sub1 >> 32) + (sub2 >> 32);
high += sub0;
*lowp = low;
*highp = high;
}
static inline long long rshift64(unsigned long long mh,
unsigned long long ml, int n)
{
if (n >= 64)
return mh >> (n - 64);
return (mh << (64 - n)) | (ml >> n);
}
static long long denormal_muld(long long hx, long long hy)
{
unsigned long long ix, iy;
unsigned long long mh, ml, nh, nl;
int exp, w;
ix = hx & 0x7fffffffffffffffLL;
iy = hy & 0x7fffffffffffffffLL;
if (iy < 0x0010000000000000LL || ix == 0)
return ((hx ^ hy) & 0x8000000000000000LL);
exp = (iy & 0x7ff0000000000000LL) >> 52;
ix &= 0x000fffffffffffffLL;
iy = (iy & 0x000fffffffffffffLL) | 0x0010000000000000LL;
mult64(ix, iy, &mh, &ml);
nh = mh;
nl = ml;
w = -1;
if (nh) {
while (nh) { nh >>= 1; w++;}
w += 64;
} else
while (nl) { nl >>= 1; w++;}
/* FIXME: use guard bits */
exp += w - 1022 - 52 * 2;
if (exp > 0)
ix = (rshift64(mh, ml, w - 52) & 0x000fffffffffffffLL)
| ((long long)exp << 52);
else if (exp + 51 >= 0)
ix = rshift64(mh, ml, w - 51 - exp) & 0x000fffffffffffffLL;
else
ix = 0;
ix |= (hx ^ hy) & 0x8000000000000000LL;
return ix;
}
/* ix - iy where iy: denormal and ix, iy >= 0 */
static int denormal_subf1(unsigned int ix, unsigned int iy)
{
int frac;
int exp;
if (ix < 0x00800000)
return ix - iy;
exp = (ix & 0x7f800000) >> 23;
if (exp - 1 > 31)
return ix;
iy >>= exp - 1;
if (iy == 0)
return ix;
frac = (ix & 0x007fffff) | 0x00800000;
frac -= iy;
while (frac < 0x00800000) {
if (--exp == 0)
return frac;
frac <<= 1;
}
return (exp << 23) | (frac & 0x007fffff);
}
/* ix + iy where iy: denormal and ix, iy >= 0 */
static int denormal_addf1(unsigned int ix, unsigned int iy)
{
int frac;
int exp;
if (ix < 0x00800000)
return ix + iy;
exp = (ix & 0x7f800000) >> 23;
if (exp - 1 > 31)
return ix;
iy >>= exp - 1;
if (iy == 0)
return ix;
frac = (ix & 0x007fffff) | 0x00800000;
frac += iy;
if (frac >= 0x01000000) {
frac >>= 1;
++exp;
}
return (exp << 23) | (frac & 0x007fffff);
}
static int denormal_addf(int hx, int hy)
{
unsigned int ix, iy;
int sign;
if ((hx ^ hy) & 0x80000000) {
sign = hx & 0x80000000;
ix = hx & 0x7fffffff;
iy = hy & 0x7fffffff;
if (iy < 0x00800000) {
ix = denormal_subf1(ix, iy);
if ((int) ix < 0) {
ix = -ix;
sign ^= 0x80000000;
}
} else {
ix = denormal_subf1(iy, ix);
sign ^= 0x80000000;
}
} else {
sign = hx & 0x80000000;
ix = hx & 0x7fffffff;
iy = hy & 0x7fffffff;
if (iy < 0x00800000)
ix = denormal_addf1(ix, iy);
else
ix = denormal_addf1(iy, ix);
}
return sign | ix;
}
/* ix - iy where iy: denormal and ix, iy >= 0 */
static long long denormal_subd1(unsigned long long ix, unsigned long long iy)
{
long long frac;
int exp;
if (ix < 0x0010000000000000LL)
return ix - iy;
exp = (ix & 0x7ff0000000000000LL) >> 52;
if (exp - 1 > 63)
return ix;
iy >>= exp - 1;
if (iy == 0)
return ix;
frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
frac -= iy;
while (frac < 0x0010000000000000LL) {
if (--exp == 0)
return frac;
frac <<= 1;
}
return ((long long)exp << 52) | (frac & 0x000fffffffffffffLL);
}
/* ix + iy where iy: denormal and ix, iy >= 0 */
static long long denormal_addd1(unsigned long long ix, unsigned long long iy)
{
long long frac;
long long exp;
if (ix < 0x0010000000000000LL)
return ix + iy;
exp = (ix & 0x7ff0000000000000LL) >> 52;
if (exp - 1 > 63)
return ix;
iy >>= exp - 1;
if (iy == 0)
return ix;
frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
frac += iy;
if (frac >= 0x0020000000000000LL) {
frac >>= 1;
++exp;
}
return (exp << 52) | (frac & 0x000fffffffffffffLL);
}
static long long denormal_addd(long long hx, long long hy)
{
unsigned long long ix, iy;
long long sign;
if ((hx ^ hy) & 0x8000000000000000LL) {
sign = hx & 0x8000000000000000LL;
ix = hx & 0x7fffffffffffffffLL;
iy = hy & 0x7fffffffffffffffLL;
if (iy < 0x0010000000000000LL) {
ix = denormal_subd1(ix, iy);
if ((int) ix < 0) {
ix = -ix;
sign ^= 0x8000000000000000LL;
}
} else {
ix = denormal_subd1(iy, ix);
sign ^= 0x8000000000000000LL;
}
} else {
sign = hx & 0x8000000000000000LL;
ix = hx & 0x7fffffffffffffffLL;
iy = hy & 0x7fffffffffffffffLL;
if (iy < 0x0010000000000000LL)
ix = denormal_addd1(ix, iy);
else
ix = denormal_addd1(iy, ix);
}
return sign | ix;
}
/**
* denormal_to_double - Given denormalized float number,
* store double float
*
* @fpu: Pointer to sh_fpu_hard structure
* @n: Index to FP register
*/
static void
denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
{
unsigned long du, dl;
unsigned long x = fpu->fpul;
int exp = 1023 - 126;
if (x != 0 && (x & 0x7f800000) == 0) {
du = (x & 0x80000000);
while ((x & 0x00800000) == 0) {
x <<= 1;
exp--;
}
x &= 0x007fffff;
du |= (exp << 20) | (x >> 3);
dl = x << 29;
fpu->fp_regs[n] = du;
fpu->fp_regs[n+1] = dl;
}
}
/**
* ieee_fpe_handler - Handle denormalized number exception
*
* @regs: Pointer to register structure
*
* Returns 1 when it's handled (should not cause exception).
*/
static int
ieee_fpe_handler (struct pt_regs *regs)
{
unsigned short insn = *(unsigned short *) regs->pc;
unsigned short finsn;
unsigned long nextpc;
int nib[4] = {
(insn >> 12) & 0xf,
(insn >> 8) & 0xf,
(insn >> 4) & 0xf,
insn & 0xf};
if (nib[0] == 0xb ||
(nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
regs->pr = regs->pc + 4;
if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
if (regs->sr & 1)
nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
else
nextpc = regs->pc + 4;
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
if (regs->sr & 1)
nextpc = regs->pc + 4;
else
nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x4 && nib[3] == 0xb &&
(nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
nextpc = regs->regs[nib[1]];
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x0 && nib[3] == 0x3 &&
(nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
nextpc = regs->pc + 4 + regs->regs[nib[1]];
finsn = *(unsigned short *) (regs->pc + 2);
} else if (insn == 0x000b) { /* rts */
nextpc = regs->pr;
finsn = *(unsigned short *) (regs->pc + 2);
} else {
nextpc = regs->pc + 2;
finsn = insn;
}
#define FPSCR_FPU_ERROR (1 << 17)
if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
struct task_struct *tsk = current;
if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_FPU_ERROR)) {
/* FPU error */
denormal_to_double (&tsk->thread.xstate->hardfpu,
(finsn >> 8) & 0xf);
} else
return 0;
regs->pc = nextpc;
return 1;
} else if ((finsn & 0xf00f) == 0xf002) { /* fmul */
struct task_struct *tsk = current;
int fpscr;
int n, m, prec;
unsigned int hx, hy;
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & (1 << 19);
if ((fpscr & FPSCR_FPU_ERROR)
&& (prec && ((hx & 0x7fffffff) < 0x00100000
|| (hy & 0x7fffffff) < 0x00100000))) {
long long llx, lly;
/* FPU error because of denormal */
llx = ((long long) hx << 32)
| tsk->thread.xstate->hardfpu.fp_regs[n+1];
lly = ((long long) hy << 32)
| tsk->thread.xstate->hardfpu.fp_regs[m+1];
if ((hx & 0x7fffffff) >= 0x00100000)
llx = denormal_muld(lly, llx);
else
llx = denormal_muld(llx, lly);
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_FPU_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
/* FPU error because of denormal */
if ((hx & 0x7fffffff) >= 0x00800000)
hx = denormal_mulf(hy, hx);
else
hx = denormal_mulf(hx, hy);
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
regs->pc = nextpc;
return 1;
} else if ((finsn & 0xf00e) == 0xf000) { /* fadd, fsub */
struct task_struct *tsk = current;
int fpscr;
int n, m, prec;
unsigned int hx, hy;
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & (1 << 19);
if ((fpscr & FPSCR_FPU_ERROR)
&& (prec && ((hx & 0x7fffffff) < 0x00100000
|| (hy & 0x7fffffff) < 0x00100000))) {
long long llx, lly;
/* FPU error because of denormal */
llx = ((long long) hx << 32)
| tsk->thread.xstate->hardfpu.fp_regs[n+1];
lly = ((long long) hy << 32)
| tsk->thread.xstate->hardfpu.fp_regs[m+1];
if ((finsn & 0xf00f) == 0xf000)
llx = denormal_addd(llx, lly);
else
llx = denormal_addd(llx, lly ^ (1LL << 63));
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_FPU_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
/* FPU error because of denormal */
if ((finsn & 0xf00f) == 0xf000)
hx = denormal_addf(hx, hy);
else
hx = denormal_addf(hx, hy ^ 0x80000000);
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
regs->pc = nextpc;
return 1;
}
return 0;
}
BUILD_TRAP_HANDLER(fpu_error)
{
struct task_struct *tsk = current;
TRAP_HANDLER_DECL;
__unlazy_fpu(tsk, regs);
if (ieee_fpe_handler(regs)) {
tsk->thread.xstate->hardfpu.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
grab_fpu(regs);
restore_fpu(tsk);
task_thread_info(tsk)->status |= TS_USEDFPU;
return;
}
force_sig(SIGFPE, tsk);
}
| gpl-2.0 |
osmc/vero-linux | drivers/bluetooth/bcm203x.c | 4263 | 6913 | /*
*
* Broadcom Blutonium firmware driver
*
* Copyright (C) 2003 Maxim Krasnyansky <maxk@qualcomm.com>
* Copyright (C) 2003 Marcel Holtmann <marcel@holtmann.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/usb.h>
#include <net/bluetooth/bluetooth.h>
#define VERSION "1.2"
static const struct usb_device_id bcm203x_table[] = {
/* Broadcom Blutonium (BCM2033) */
{ USB_DEVICE(0x0a5c, 0x2033) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, bcm203x_table);
#define BCM203X_ERROR 0
#define BCM203X_RESET 1
#define BCM203X_LOAD_MINIDRV 2
#define BCM203X_SELECT_MEMORY 3
#define BCM203X_CHECK_MEMORY 4
#define BCM203X_LOAD_FIRMWARE 5
#define BCM203X_CHECK_FIRMWARE 6
#define BCM203X_IN_EP 0x81
#define BCM203X_OUT_EP 0x02
struct bcm203x_data {
struct usb_device *udev;
unsigned long state;
struct work_struct work;
atomic_t shutdown;
struct urb *urb;
unsigned char *buffer;
unsigned char *fw_data;
unsigned int fw_size;
unsigned int fw_sent;
};
static void bcm203x_complete(struct urb *urb)
{
struct bcm203x_data *data = urb->context;
struct usb_device *udev = urb->dev;
int len;
BT_DBG("udev %p urb %p", udev, urb);
if (urb->status) {
BT_ERR("URB failed with status %d", urb->status);
data->state = BCM203X_ERROR;
return;
}
switch (data->state) {
case BCM203X_LOAD_MINIDRV:
memcpy(data->buffer, "#", 1);
usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, BCM203X_OUT_EP),
data->buffer, 1, bcm203x_complete, data);
data->state = BCM203X_SELECT_MEMORY;
/* use workqueue to have a small delay */
schedule_work(&data->work);
break;
case BCM203X_SELECT_MEMORY:
usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, BCM203X_IN_EP),
data->buffer, 32, bcm203x_complete, data, 1);
data->state = BCM203X_CHECK_MEMORY;
if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0)
BT_ERR("Can't submit URB");
break;
case BCM203X_CHECK_MEMORY:
if (data->buffer[0] != '#') {
BT_ERR("Memory select failed");
data->state = BCM203X_ERROR;
break;
}
data->state = BCM203X_LOAD_FIRMWARE;
case BCM203X_LOAD_FIRMWARE:
if (data->fw_sent == data->fw_size) {
usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, BCM203X_IN_EP),
data->buffer, 32, bcm203x_complete, data, 1);
data->state = BCM203X_CHECK_FIRMWARE;
} else {
len = min_t(uint, data->fw_size - data->fw_sent, 4096);
usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, BCM203X_OUT_EP),
data->fw_data + data->fw_sent, len, bcm203x_complete, data);
data->fw_sent += len;
}
if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0)
BT_ERR("Can't submit URB");
break;
case BCM203X_CHECK_FIRMWARE:
if (data->buffer[0] != '.') {
BT_ERR("Firmware loading failed");
data->state = BCM203X_ERROR;
break;
}
data->state = BCM203X_RESET;
break;
}
}
static void bcm203x_work(struct work_struct *work)
{
struct bcm203x_data *data =
container_of(work, struct bcm203x_data, work);
if (atomic_read(&data->shutdown))
return;
if (usb_submit_urb(data->urb, GFP_KERNEL) < 0)
BT_ERR("Can't submit URB");
}
static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
const struct firmware *firmware;
struct usb_device *udev = interface_to_usbdev(intf);
struct bcm203x_data *data;
int size;
BT_DBG("intf %p id %p", intf, id);
if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
return -ENODEV;
data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
if (!data) {
BT_ERR("Can't allocate memory for data structure");
return -ENOMEM;
}
data->udev = udev;
data->state = BCM203X_LOAD_MINIDRV;
data->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!data->urb) {
BT_ERR("Can't allocate URB");
return -ENOMEM;
}
if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) {
BT_ERR("Mini driver request failed");
usb_free_urb(data->urb);
return -EIO;
}
BT_DBG("minidrv data %p size %zu", firmware->data, firmware->size);
size = max_t(uint, firmware->size, 4096);
data->buffer = kmalloc(size, GFP_KERNEL);
if (!data->buffer) {
BT_ERR("Can't allocate memory for mini driver");
release_firmware(firmware);
usb_free_urb(data->urb);
return -ENOMEM;
}
memcpy(data->buffer, firmware->data, firmware->size);
usb_fill_bulk_urb(data->urb, udev, usb_sndbulkpipe(udev, BCM203X_OUT_EP),
data->buffer, firmware->size, bcm203x_complete, data);
release_firmware(firmware);
if (request_firmware(&firmware, "BCM2033-FW.bin", &udev->dev) < 0) {
BT_ERR("Firmware request failed");
usb_free_urb(data->urb);
kfree(data->buffer);
return -EIO;
}
BT_DBG("firmware data %p size %zu", firmware->data, firmware->size);
data->fw_data = kmemdup(firmware->data, firmware->size, GFP_KERNEL);
if (!data->fw_data) {
BT_ERR("Can't allocate memory for firmware image");
release_firmware(firmware);
usb_free_urb(data->urb);
kfree(data->buffer);
return -ENOMEM;
}
data->fw_size = firmware->size;
data->fw_sent = 0;
release_firmware(firmware);
INIT_WORK(&data->work, bcm203x_work);
usb_set_intfdata(intf, data);
/* use workqueue to have a small delay */
schedule_work(&data->work);
return 0;
}
static void bcm203x_disconnect(struct usb_interface *intf)
{
struct bcm203x_data *data = usb_get_intfdata(intf);
BT_DBG("intf %p", intf);
atomic_inc(&data->shutdown);
cancel_work_sync(&data->work);
usb_kill_urb(data->urb);
usb_set_intfdata(intf, NULL);
usb_free_urb(data->urb);
kfree(data->fw_data);
kfree(data->buffer);
}
static struct usb_driver bcm203x_driver = {
.name = "bcm203x",
.probe = bcm203x_probe,
.disconnect = bcm203x_disconnect,
.id_table = bcm203x_table,
.disable_hub_initiated_lpm = 1,
};
module_usb_driver(bcm203x_driver);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Broadcom Blutonium firmware driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("BCM2033-MD.hex");
MODULE_FIRMWARE("BCM2033-FW.bin");
| gpl-2.0 |
myfluxi/android_kernel_lge_hammerhead | arch/arm/mach-s3c24xx/s3c2412.c | 4775 | 6038 | /* linux/arch/arm/mach-s3c2412/s3c2412.c
*
* Copyright (c) 2006 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* http://armlinux.simtec.co.uk/.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <asm/proc-fns.h>
#include <asm/irq.h>
#include <asm/system_misc.h>
#include <plat/cpu-freq.h>
#include <mach/regs-clock.h>
#include <plat/regs-serial.h>
#include <mach/regs-power.h>
#include <mach/regs-gpio.h>
#include <mach/regs-gpioj.h>
#include <mach/regs-dsc.h>
#include <plat/regs-spi.h>
#include <mach/regs-s3c2412.h>
#include <plat/s3c2412.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/clock.h>
#include <plat/pm.h>
#include <plat/pll.h>
#include <plat/nand-core.h>
#ifndef CONFIG_CPU_S3C2412_ONLY
void __iomem *s3c24xx_va_gpio2 = S3C24XX_VA_GPIO;
static inline void s3c2412_init_gpio2(void)
{
s3c24xx_va_gpio2 = S3C24XX_VA_GPIO + 0x10;
}
#else
#define s3c2412_init_gpio2() do { } while(0)
#endif
/* Initial IO mappings */
static struct map_desc s3c2412_iodesc[] __initdata = {
IODESC_ENT(CLKPWR),
IODESC_ENT(TIMER),
IODESC_ENT(WATCHDOG),
{
.virtual = (unsigned long)S3C2412_VA_SSMC,
.pfn = __phys_to_pfn(S3C2412_PA_SSMC),
.length = SZ_1M,
.type = MT_DEVICE,
},
{
.virtual = (unsigned long)S3C2412_VA_EBI,
.pfn = __phys_to_pfn(S3C2412_PA_EBI),
.length = SZ_1M,
.type = MT_DEVICE,
},
};
/* uart registration process */
void __init s3c2412_init_uarts(struct s3c2410_uartcfg *cfg, int no)
{
s3c24xx_init_uartdevs("s3c2412-uart", s3c2410_uart_resources, cfg, no);
/* rename devices that are s3c2412/s3c2413 specific */
s3c_device_sdi.name = "s3c2412-sdi";
s3c_device_lcd.name = "s3c2412-lcd";
s3c_nand_setname("s3c2412-nand");
/* alter IRQ of SDI controller */
s3c_device_sdi.resource[1].start = IRQ_S3C2412_SDI;
s3c_device_sdi.resource[1].end = IRQ_S3C2412_SDI;
/* spi channel related changes, s3c2412/13 specific */
s3c_device_spi0.name = "s3c2412-spi";
s3c_device_spi0.resource[0].end = S3C24XX_PA_SPI + 0x24;
s3c_device_spi1.name = "s3c2412-spi";
s3c_device_spi1.resource[0].start = S3C24XX_PA_SPI + S3C2412_SPI1;
s3c_device_spi1.resource[0].end = S3C24XX_PA_SPI + S3C2412_SPI1 + 0x24;
}
/* s3c2412_idle
*
* use the standard idle call by ensuring the idle mode
* in power config, then issuing the idle co-processor
* instruction
*/
static void s3c2412_idle(void)
{
unsigned long tmp;
/* ensure our idle mode is to go to idle */
tmp = __raw_readl(S3C2412_PWRCFG);
tmp &= ~S3C2412_PWRCFG_STANDBYWFI_MASK;
tmp |= S3C2412_PWRCFG_STANDBYWFI_IDLE;
__raw_writel(tmp, S3C2412_PWRCFG);
cpu_do_idle();
}
void s3c2412_restart(char mode, const char *cmd)
{
if (mode == 's')
soft_restart(0);
/* errata "Watch-dog/Software Reset Problem" specifies that
* this reset must be done with the SYSCLK sourced from
* EXTCLK instead of FOUT to avoid a glitch in the reset
* mechanism.
*
* See the watchdog section of the S3C2412 manual for more
* information on this fix.
*/
__raw_writel(0x00, S3C2412_CLKSRC);
__raw_writel(S3C2412_SWRST_RESET, S3C2412_SWRST);
mdelay(1);
}
/* s3c2412_map_io
*
* register the standard cpu IO areas, and any passed in from the
* machine specific initialisation.
*/
void __init s3c2412_map_io(void)
{
/* move base of IO */
s3c2412_init_gpio2();
/* set our idle function */
arm_pm_idle = s3c2412_idle;
/* register our io-tables */
iotable_init(s3c2412_iodesc, ARRAY_SIZE(s3c2412_iodesc));
}
void __init_or_cpufreq s3c2412_setup_clocks(void)
{
struct clk *xtal_clk;
unsigned long tmp;
unsigned long xtal;
unsigned long fclk;
unsigned long hclk;
unsigned long pclk;
xtal_clk = clk_get(NULL, "xtal");
xtal = clk_get_rate(xtal_clk);
clk_put(xtal_clk);
/* now we've got our machine bits initialised, work out what
* clocks we've got */
fclk = s3c24xx_get_pll(__raw_readl(S3C2410_MPLLCON), xtal * 2);
clk_mpll.rate = fclk;
tmp = __raw_readl(S3C2410_CLKDIVN);
/* work out clock scalings */
hclk = fclk / ((tmp & S3C2412_CLKDIVN_HDIVN_MASK) + 1);
hclk /= ((tmp & S3C2412_CLKDIVN_ARMDIVN) ? 2 : 1);
pclk = hclk / ((tmp & S3C2412_CLKDIVN_PDIVN) ? 2 : 1);
/* print brieft summary of clocks, etc */
printk("S3C2412: core %ld.%03ld MHz, memory %ld.%03ld MHz, peripheral %ld.%03ld MHz\n",
print_mhz(fclk), print_mhz(hclk), print_mhz(pclk));
s3c24xx_setup_clocks(fclk, hclk, pclk);
}
void __init s3c2412_init_clocks(int xtal)
{
/* initialise the clocks here, to allow other things like the
* console to use them
*/
s3c24xx_register_baseclocks(xtal);
s3c2412_setup_clocks();
s3c2412_baseclk_add();
}
/* need to register the subsystem before we actually register the device, and
* we also need to ensure that it has been initialised before any of the
* drivers even try to use it (even if not on an s3c2412 based system)
* as a driver which may support both 2410 and 2440 may try and use it.
*/
struct bus_type s3c2412_subsys = {
.name = "s3c2412-core",
.dev_name = "s3c2412-core",
};
static int __init s3c2412_core_init(void)
{
return subsys_system_register(&s3c2412_subsys, NULL);
}
core_initcall(s3c2412_core_init);
static struct device s3c2412_dev = {
.bus = &s3c2412_subsys,
};
int __init s3c2412_init(void)
{
printk("S3C2412: Initialising architecture\n");
#ifdef CONFIG_PM
register_syscore_ops(&s3c2412_pm_syscore_ops);
#endif
register_syscore_ops(&s3c24xx_irq_syscore_ops);
return device_register(&s3c2412_dev);
}
| gpl-2.0 |
htc-mirror/ville-ics-crc-3.0.8-ca24d1e | drivers/block/drbd/drbd_actlog.c | 7847 | 34765 | /*
drbd_actlog.c
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
drbd is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
drbd is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with drbd; see the file COPYING. If not, write to
the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/slab.h>
#include <linux/drbd.h>
#include "drbd_int.h"
#include "drbd_wrappers.h"
/* We maintain a trivial checksum in our on disk activity log.
* With that we can ensure correct operation even when the storage
* device might do a partial (last) sector write while losing power.
*/
struct __packed al_transaction {
u32 magic;
u32 tr_number;
struct __packed {
u32 pos;
u32 extent; } updates[1 + AL_EXTENTS_PT];
u32 xor_sum;
};
struct update_odbm_work {
struct drbd_work w;
unsigned int enr;
};
struct update_al_work {
struct drbd_work w;
struct lc_element *al_ext;
struct completion event;
unsigned int enr;
/* if old_enr != LC_FREE, write corresponding bitmap sector, too */
unsigned int old_enr;
};
struct drbd_atodb_wait {
atomic_t count;
struct completion io_done;
struct drbd_conf *mdev;
int error;
};
int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev,
struct page *page, sector_t sector,
int rw, int size)
{
struct bio *bio;
struct drbd_md_io md_io;
int ok;
md_io.mdev = mdev;
init_completion(&md_io.event);
md_io.error = 0;
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_bdev = bdev->md_bdev;
bio->bi_sector = sector;
ok = (bio_add_page(bio, page, size, 0) == size);
if (!ok)
goto out;
bio->bi_private = &md_io;
bio->bi_end_io = drbd_md_io_complete;
bio->bi_rw = rw;
if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_endio(bio, -EIO);
else
submit_bio(rw, bio);
wait_for_completion(&md_io.event);
ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
out:
bio_put(bio);
return ok;
}
int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
sector_t sector, int rw)
{
int logical_block_size, mask, ok;
int offset = 0;
struct page *iop = mdev->md_io_page;
D_ASSERT(mutex_is_locked(&mdev->md_io_mutex));
BUG_ON(!bdev->md_bdev);
logical_block_size = bdev_logical_block_size(bdev->md_bdev);
if (logical_block_size == 0)
logical_block_size = MD_SECTOR_SIZE;
/* in case logical_block_size != 512 [ s390 only? ] */
if (logical_block_size != MD_SECTOR_SIZE) {
mask = (logical_block_size / MD_SECTOR_SIZE) - 1;
D_ASSERT(mask == 1 || mask == 3 || mask == 7);
D_ASSERT(logical_block_size == (mask+1) * MD_SECTOR_SIZE);
offset = sector & mask;
sector = sector & ~mask;
iop = mdev->md_io_tmpp;
if (rw & WRITE) {
/* these are GFP_KERNEL pages, pre-allocated
* on device initialization */
void *p = page_address(mdev->md_io_page);
void *hp = page_address(mdev->md_io_tmpp);
ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector,
READ, logical_block_size);
if (unlikely(!ok)) {
dev_err(DEV, "drbd_md_sync_page_io(,%llus,"
"READ [logical_block_size!=512]) failed!\n",
(unsigned long long)sector);
return 0;
}
memcpy(hp + offset*MD_SECTOR_SIZE, p, MD_SECTOR_SIZE);
}
}
if (sector < drbd_md_first_sector(bdev) ||
sector > drbd_md_last_sector(bdev))
dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
current->comm, current->pid, __func__,
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, logical_block_size);
if (unlikely(!ok)) {
dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed!\n",
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
return 0;
}
if (logical_block_size != MD_SECTOR_SIZE && !(rw & WRITE)) {
void *p = page_address(mdev->md_io_page);
void *hp = page_address(mdev->md_io_tmpp);
memcpy(p, hp + offset*MD_SECTOR_SIZE, MD_SECTOR_SIZE);
}
return ok;
}
static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
{
struct lc_element *al_ext;
struct lc_element *tmp;
unsigned long al_flags = 0;
int wake;
spin_lock_irq(&mdev->al_lock);
tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
if (unlikely(tmp != NULL)) {
struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
spin_unlock_irq(&mdev->al_lock);
if (wake)
wake_up(&mdev->al_wait);
return NULL;
}
}
al_ext = lc_get(mdev->act_log, enr);
al_flags = mdev->act_log->flags;
spin_unlock_irq(&mdev->al_lock);
/*
if (!al_ext) {
if (al_flags & LC_STARVING)
dev_warn(DEV, "Have to wait for LRU element (AL too small?)\n");
if (al_flags & LC_DIRTY)
dev_warn(DEV, "Ongoing AL update (AL device too slow?)\n");
}
*/
return al_ext;
}
void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
{
unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
struct lc_element *al_ext;
struct update_al_work al_work;
D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr)));
if (al_ext->lc_number != enr) {
/* drbd_al_write_transaction(mdev,al_ext,enr);
* recurses into generic_make_request(), which
* disallows recursion, bios being serialized on the
* current->bio_tail list now.
* we have to delegate updates to the activity log
* to the worker thread. */
init_completion(&al_work.event);
al_work.al_ext = al_ext;
al_work.enr = enr;
al_work.old_enr = al_ext->lc_number;
al_work.w.cb = w_al_write_transaction;
drbd_queue_work_front(&mdev->data.work, &al_work.w);
wait_for_completion(&al_work.event);
mdev->al_writ_cnt++;
spin_lock_irq(&mdev->al_lock);
lc_changed(mdev->act_log, al_ext);
spin_unlock_irq(&mdev->al_lock);
wake_up(&mdev->al_wait);
}
}
void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
{
unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
struct lc_element *extent;
unsigned long flags;
spin_lock_irqsave(&mdev->al_lock, flags);
extent = lc_find(mdev->act_log, enr);
if (!extent) {
spin_unlock_irqrestore(&mdev->al_lock, flags);
dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr);
return;
}
if (lc_put(mdev->act_log, extent) == 0)
wake_up(&mdev->al_wait);
spin_unlock_irqrestore(&mdev->al_lock, flags);
}
#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
/* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT
* are still coupled, or assume too much about their relation.
* Code below will not work if this is violated.
* Will be cleaned up with some followup patch.
*/
# error FIXME
#endif
static unsigned int al_extent_to_bm_page(unsigned int al_enr)
{
return al_enr >>
/* bit to page */
((PAGE_SHIFT + 3) -
/* al extent number to bit */
(AL_EXTENT_SHIFT - BM_BLOCK_SHIFT));
}
static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
{
return rs_enr >>
/* bit to page */
((PAGE_SHIFT + 3) -
/* al extent number to bit */
(BM_EXT_SHIFT - BM_BLOCK_SHIFT));
}
int
w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
{
struct update_al_work *aw = container_of(w, struct update_al_work, w);
struct lc_element *updated = aw->al_ext;
const unsigned int new_enr = aw->enr;
const unsigned int evicted = aw->old_enr;
struct al_transaction *buffer;
sector_t sector;
int i, n, mx;
unsigned int extent_nr;
u32 xor_sum = 0;
if (!get_ldev(mdev)) {
dev_err(DEV,
"disk is %s, cannot start al transaction (-%d +%d)\n",
drbd_disk_str(mdev->state.disk), evicted, new_enr);
complete(&((struct update_al_work *)w)->event);
return 1;
}
/* do we have to do a bitmap write, first?
* TODO reduce maximum latency:
* submit both bios, then wait for both,
* instead of doing two synchronous sector writes.
* For now, we must not write the transaction,
* if we cannot write out the bitmap of the evicted extent. */
if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
drbd_bm_write_page(mdev, al_extent_to_bm_page(evicted));
/* The bitmap write may have failed, causing a state change. */
if (mdev->state.disk < D_INCONSISTENT) {
dev_err(DEV,
"disk is %s, cannot write al transaction (-%d +%d)\n",
drbd_disk_str(mdev->state.disk), evicted, new_enr);
complete(&((struct update_al_work *)w)->event);
put_ldev(mdev);
return 1;
}
mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
buffer = (struct al_transaction *)page_address(mdev->md_io_page);
buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
n = lc_index_of(mdev->act_log, updated);
buffer->updates[0].pos = cpu_to_be32(n);
buffer->updates[0].extent = cpu_to_be32(new_enr);
xor_sum ^= new_enr;
mx = min_t(int, AL_EXTENTS_PT,
mdev->act_log->nr_elements - mdev->al_tr_cycle);
for (i = 0; i < mx; i++) {
unsigned idx = mdev->al_tr_cycle + i;
extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number;
buffer->updates[i+1].pos = cpu_to_be32(idx);
buffer->updates[i+1].extent = cpu_to_be32(extent_nr);
xor_sum ^= extent_nr;
}
for (; i < AL_EXTENTS_PT; i++) {
buffer->updates[i+1].pos = __constant_cpu_to_be32(-1);
buffer->updates[i+1].extent = __constant_cpu_to_be32(LC_FREE);
xor_sum ^= LC_FREE;
}
mdev->al_tr_cycle += AL_EXTENTS_PT;
if (mdev->al_tr_cycle >= mdev->act_log->nr_elements)
mdev->al_tr_cycle = 0;
buffer->xor_sum = cpu_to_be32(xor_sum);
sector = mdev->ldev->md.md_offset
+ mdev->ldev->md.al_offset + mdev->al_tr_pos;
if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
drbd_chk_io_error(mdev, 1, true);
if (++mdev->al_tr_pos >
div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
mdev->al_tr_pos = 0;
D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
mdev->al_tr_number++;
mutex_unlock(&mdev->md_io_mutex);
complete(&((struct update_al_work *)w)->event);
put_ldev(mdev);
return 1;
}
/**
* drbd_al_read_tr() - Read a single transaction from the on disk activity log
* @mdev: DRBD device.
* @bdev: Block device to read form.
* @b: pointer to an al_transaction.
* @index: On disk slot of the transaction to read.
*
* Returns -1 on IO error, 0 on checksum error and 1 upon success.
*/
static int drbd_al_read_tr(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev,
struct al_transaction *b,
int index)
{
sector_t sector;
int rv, i;
u32 xor_sum = 0;
sector = bdev->md.md_offset + bdev->md.al_offset + index;
/* Dont process error normally,
* as this is done before disk is attached! */
if (!drbd_md_sync_page_io(mdev, bdev, sector, READ))
return -1;
rv = (be32_to_cpu(b->magic) == DRBD_MAGIC);
for (i = 0; i < AL_EXTENTS_PT + 1; i++)
xor_sum ^= be32_to_cpu(b->updates[i].extent);
rv &= (xor_sum == be32_to_cpu(b->xor_sum));
return rv;
}
/**
* drbd_al_read_log() - Restores the activity log from its on disk representation.
* @mdev: DRBD device.
* @bdev: Block device to read form.
*
* Returns 1 on success, returns 0 when reading the log failed due to IO errors.
*/
int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
{
struct al_transaction *buffer;
int i;
int rv;
int mx;
int active_extents = 0;
int transactions = 0;
int found_valid = 0;
int from = 0;
int to = 0;
u32 from_tnr = 0;
u32 to_tnr = 0;
u32 cnr;
mx = div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT);
/* lock out all other meta data io for now,
* and make sure the page is mapped.
*/
mutex_lock(&mdev->md_io_mutex);
buffer = page_address(mdev->md_io_page);
/* Find the valid transaction in the log */
for (i = 0; i <= mx; i++) {
rv = drbd_al_read_tr(mdev, bdev, buffer, i);
if (rv == 0)
continue;
if (rv == -1) {
mutex_unlock(&mdev->md_io_mutex);
return 0;
}
cnr = be32_to_cpu(buffer->tr_number);
if (++found_valid == 1) {
from = i;
to = i;
from_tnr = cnr;
to_tnr = cnr;
continue;
}
if ((int)cnr - (int)from_tnr < 0) {
D_ASSERT(from_tnr - cnr + i - from == mx+1);
from = i;
from_tnr = cnr;
}
if ((int)cnr - (int)to_tnr > 0) {
D_ASSERT(cnr - to_tnr == i - to);
to = i;
to_tnr = cnr;
}
}
if (!found_valid) {
dev_warn(DEV, "No usable activity log found.\n");
mutex_unlock(&mdev->md_io_mutex);
return 1;
}
/* Read the valid transactions.
* dev_info(DEV, "Reading from %d to %d.\n",from,to); */
i = from;
while (1) {
int j, pos;
unsigned int extent_nr;
unsigned int trn;
rv = drbd_al_read_tr(mdev, bdev, buffer, i);
ERR_IF(rv == 0) goto cancel;
if (rv == -1) {
mutex_unlock(&mdev->md_io_mutex);
return 0;
}
trn = be32_to_cpu(buffer->tr_number);
spin_lock_irq(&mdev->al_lock);
/* This loop runs backwards because in the cyclic
elements there might be an old version of the
updated element (in slot 0). So the element in slot 0
can overwrite old versions. */
for (j = AL_EXTENTS_PT; j >= 0; j--) {
pos = be32_to_cpu(buffer->updates[j].pos);
extent_nr = be32_to_cpu(buffer->updates[j].extent);
if (extent_nr == LC_FREE)
continue;
lc_set(mdev->act_log, extent_nr, pos);
active_extents++;
}
spin_unlock_irq(&mdev->al_lock);
transactions++;
cancel:
if (i == to)
break;
i++;
if (i > mx)
i = 0;
}
mdev->al_tr_number = to_tnr+1;
mdev->al_tr_pos = to;
if (++mdev->al_tr_pos >
div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
mdev->al_tr_pos = 0;
/* ok, we are done with it */
mutex_unlock(&mdev->md_io_mutex);
dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n",
transactions, active_extents);
return 1;
}
/**
* drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents
* @mdev: DRBD device.
*/
void drbd_al_apply_to_bm(struct drbd_conf *mdev)
{
unsigned int enr;
unsigned long add = 0;
char ppb[10];
int i, tmp;
wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
for (i = 0; i < mdev->act_log->nr_elements; i++) {
enr = lc_element_by_index(mdev->act_log, i)->lc_number;
if (enr == LC_FREE)
continue;
tmp = drbd_bm_ALe_set_all(mdev, enr);
dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
add += tmp;
}
lc_unlock(mdev->act_log);
wake_up(&mdev->al_wait);
dev_info(DEV, "Marked additional %s as out-of-sync based on AL.\n",
ppsize(ppb, Bit2KB(add)));
}
static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
{
int rv;
spin_lock_irq(&mdev->al_lock);
rv = (al_ext->refcnt == 0);
if (likely(rv))
lc_del(mdev->act_log, al_ext);
spin_unlock_irq(&mdev->al_lock);
return rv;
}
/**
* drbd_al_shrink() - Removes all active extents form the activity log
* @mdev: DRBD device.
*
* Removes all active extents form the activity log, waiting until
* the reference count of each entry dropped to 0 first, of course.
*
* You need to lock mdev->act_log with lc_try_lock() / lc_unlock()
*/
void drbd_al_shrink(struct drbd_conf *mdev)
{
struct lc_element *al_ext;
int i;
D_ASSERT(test_bit(__LC_DIRTY, &mdev->act_log->flags));
for (i = 0; i < mdev->act_log->nr_elements; i++) {
al_ext = lc_element_by_index(mdev->act_log, i);
if (al_ext->lc_number == LC_FREE)
continue;
wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext));
}
wake_up(&mdev->al_wait);
}
static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused)
{
struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
if (!get_ldev(mdev)) {
if (__ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n");
kfree(udw);
return 1;
}
drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr));
put_ldev(mdev);
kfree(udw);
if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) {
switch (mdev->state.conn) {
case C_SYNC_SOURCE: case C_SYNC_TARGET:
case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T:
drbd_resync_finished(mdev);
default:
/* nothing to do */
break;
}
}
drbd_bcast_sync_progress(mdev);
return 1;
}
/* ATTENTION. The AL's extents are 4MB each, while the extents in the
* resync LRU-cache are 16MB each.
* The caller of this function has to hold an get_ldev() reference.
*
* TODO will be obsoleted once we have a caching lru of the on disk bitmap
*/
static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
int count, int success)
{
struct lc_element *e;
struct update_odbm_work *udw;
unsigned int enr;
D_ASSERT(atomic_read(&mdev->local_cnt));
/* I simply assume that a sector/size pair never crosses
* a 16 MB extent border. (Currently this is true...) */
enr = BM_SECT_TO_EXT(sector);
e = lc_get(mdev->resync, enr);
if (e) {
struct bm_extent *ext = lc_entry(e, struct bm_extent, lce);
if (ext->lce.lc_number == enr) {
if (success)
ext->rs_left -= count;
else
ext->rs_failed += count;
if (ext->rs_left < ext->rs_failed) {
dev_err(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
"rs_failed=%d count=%d\n",
(unsigned long long)sector,
ext->lce.lc_number, ext->rs_left,
ext->rs_failed, count);
dump_stack();
lc_put(mdev->resync, &ext->lce);
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
return;
}
} else {
/* Normally this element should be in the cache,
* since drbd_rs_begin_io() pulled it already in.
*
* But maybe an application write finished, and we set
* something outside the resync lru_cache in sync.
*/
int rs_left = drbd_bm_e_weight(mdev, enr);
if (ext->flags != 0) {
dev_warn(DEV, "changing resync lce: %d[%u;%02lx]"
" -> %d[%u;00]\n",
ext->lce.lc_number, ext->rs_left,
ext->flags, enr, rs_left);
ext->flags = 0;
}
if (ext->rs_failed) {
dev_warn(DEV, "Kicking resync_lru element enr=%u "
"out with rs_failed=%d\n",
ext->lce.lc_number, ext->rs_failed);
}
ext->rs_left = rs_left;
ext->rs_failed = success ? 0 : count;
lc_changed(mdev->resync, &ext->lce);
}
lc_put(mdev->resync, &ext->lce);
/* no race, we are within the al_lock! */
if (ext->rs_left == ext->rs_failed) {
ext->rs_failed = 0;
udw = kmalloc(sizeof(*udw), GFP_ATOMIC);
if (udw) {
udw->enr = ext->lce.lc_number;
udw->w.cb = w_update_odbm;
drbd_queue_work_front(&mdev->data.work, &udw->w);
} else {
dev_warn(DEV, "Could not kmalloc an udw\n");
}
}
} else {
dev_err(DEV, "lc_get() failed! locked=%d/%d flags=%lu\n",
mdev->resync_locked,
mdev->resync->nr_elements,
mdev->resync->flags);
}
}
void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go)
{
unsigned long now = jiffies;
unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark];
int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS;
if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go &&
mdev->state.conn != C_PAUSED_SYNC_T &&
mdev->state.conn != C_PAUSED_SYNC_S) {
mdev->rs_mark_time[next] = now;
mdev->rs_mark_left[next] = still_to_go;
mdev->rs_last_mark = next;
}
}
}
/* clear the bit corresponding to the piece of storage in question:
* size byte of data starting from sector. Only clear a bits of the affected
* one ore more _aligned_ BM_BLOCK_SIZE blocks.
*
* called by worker on C_SYNC_TARGET and receiver on SyncSource.
*
*/
void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
const char *file, const unsigned int line)
{
/* Is called from worker and receiver context _only_ */
unsigned long sbnr, ebnr, lbnr;
unsigned long count = 0;
sector_t esector, nr_sectors;
int wake_up = 0;
unsigned long flags;
if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
}
nr_sectors = drbd_get_capacity(mdev->this_bdev);
esector = sector + (size >> 9) - 1;
ERR_IF(sector >= nr_sectors) return;
ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1);
lbnr = BM_SECT_TO_BIT(nr_sectors-1);
/* we clear it (in sync).
* round up start sector, round down end sector. we make sure we only
* clear full, aligned, BM_BLOCK_SIZE (4K) blocks */
if (unlikely(esector < BM_SECT_PER_BIT-1))
return;
if (unlikely(esector == (nr_sectors-1)))
ebnr = lbnr;
else
ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
if (sbnr > ebnr)
return;
/*
* ok, (capacity & 7) != 0 sometimes, but who cares...
* we count rs_{total,left} in bits, not sectors.
*/
count = drbd_bm_clear_bits(mdev, sbnr, ebnr);
if (count && get_ldev(mdev)) {
drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev));
spin_lock_irqsave(&mdev->al_lock, flags);
drbd_try_clear_on_disk_bm(mdev, sector, count, true);
spin_unlock_irqrestore(&mdev->al_lock, flags);
/* just wake_up unconditional now, various lc_chaged(),
* lc_put() in drbd_try_clear_on_disk_bm(). */
wake_up = 1;
put_ldev(mdev);
}
if (wake_up)
wake_up(&mdev->al_wait);
}
/*
* this is intended to set one request worth of data out of sync.
* affects at least 1 bit,
* and at most 1+DRBD_MAX_BIO_SIZE/BM_BLOCK_SIZE bits.
*
* called by tl_clear and drbd_send_dblock (==drbd_make_request).
* so this can be _any_ process.
*/
int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
const char *file, const unsigned int line)
{
unsigned long sbnr, ebnr, lbnr, flags;
sector_t esector, nr_sectors;
unsigned int enr, count = 0;
struct lc_element *e;
if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "sector: %llus, size: %d\n",
(unsigned long long)sector, size);
return 0;
}
if (!get_ldev(mdev))
return 0; /* no disk, no metadata, no bitmap to set bits in */
nr_sectors = drbd_get_capacity(mdev->this_bdev);
esector = sector + (size >> 9) - 1;
ERR_IF(sector >= nr_sectors)
goto out;
ERR_IF(esector >= nr_sectors)
esector = (nr_sectors-1);
lbnr = BM_SECT_TO_BIT(nr_sectors-1);
/* we set it out of sync,
* we do not need to round anything here */
sbnr = BM_SECT_TO_BIT(sector);
ebnr = BM_SECT_TO_BIT(esector);
/* ok, (capacity & 7) != 0 sometimes, but who cares...
* we count rs_{total,left} in bits, not sectors. */
spin_lock_irqsave(&mdev->al_lock, flags);
count = drbd_bm_set_bits(mdev, sbnr, ebnr);
enr = BM_SECT_TO_EXT(sector);
e = lc_find(mdev->resync, enr);
if (e)
lc_entry(e, struct bm_extent, lce)->rs_left += count;
spin_unlock_irqrestore(&mdev->al_lock, flags);
out:
put_ldev(mdev);
return count;
}
static
struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
{
struct lc_element *e;
struct bm_extent *bm_ext;
int wakeup = 0;
unsigned long rs_flags;
spin_lock_irq(&mdev->al_lock);
if (mdev->resync_locked > mdev->resync->nr_elements/2) {
spin_unlock_irq(&mdev->al_lock);
return NULL;
}
e = lc_get(mdev->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (bm_ext) {
if (bm_ext->lce.lc_number != enr) {
bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
bm_ext->rs_failed = 0;
lc_changed(mdev->resync, &bm_ext->lce);
wakeup = 1;
}
if (bm_ext->lce.refcnt == 1)
mdev->resync_locked++;
set_bit(BME_NO_WRITES, &bm_ext->flags);
}
rs_flags = mdev->resync->flags;
spin_unlock_irq(&mdev->al_lock);
if (wakeup)
wake_up(&mdev->al_wait);
if (!bm_ext) {
if (rs_flags & LC_STARVING)
dev_warn(DEV, "Have to wait for element"
" (resync LRU too small?)\n");
BUG_ON(rs_flags & LC_DIRTY);
}
return bm_ext;
}
static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
{
struct lc_element *al_ext;
int rv = 0;
spin_lock_irq(&mdev->al_lock);
if (unlikely(enr == mdev->act_log->new_number))
rv = 1;
else {
al_ext = lc_find(mdev->act_log, enr);
if (al_ext) {
if (al_ext->refcnt)
rv = 1;
}
}
spin_unlock_irq(&mdev->al_lock);
/*
if (unlikely(rv)) {
dev_info(DEV, "Delaying sync read until app's write is done\n");
}
*/
return rv;
}
/**
* drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED
* @mdev: DRBD device.
* @sector: The sector number.
*
* This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
*/
int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
struct bm_extent *bm_ext;
int i, sig;
int sa = 200; /* Step aside 200 times, then grab the extent and let app-IO wait.
200 times -> 20 seconds. */
retry:
sig = wait_event_interruptible(mdev->al_wait,
(bm_ext = _bme_get(mdev, enr)));
if (sig)
return -EINTR;
if (test_bit(BME_LOCKED, &bm_ext->flags))
return 0;
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
sig = wait_event_interruptible(mdev->al_wait,
!_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) ||
test_bit(BME_PRIORITY, &bm_ext->flags));
if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) {
spin_lock_irq(&mdev->al_lock);
if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
mdev->resync_locked--;
wake_up(&mdev->al_wait);
}
spin_unlock_irq(&mdev->al_lock);
if (sig)
return -EINTR;
if (schedule_timeout_interruptible(HZ/10))
return -EINTR;
if (sa && --sa == 0)
dev_warn(DEV,"drbd_rs_begin_io() stepped aside for 20sec."
"Resync stalled?\n");
goto retry;
}
}
set_bit(BME_LOCKED, &bm_ext->flags);
return 0;
}
/**
* drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep
* @mdev: DRBD device.
* @sector: The sector number.
*
* Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then
* tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
* if there is still application IO going on in this area.
*/
int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
struct lc_element *e;
struct bm_extent *bm_ext;
int i;
spin_lock_irq(&mdev->al_lock);
if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) {
/* in case you have very heavy scattered io, it may
* stall the syncer undefined if we give up the ref count
* when we try again and requeue.
*
* if we don't give up the refcount, but the next time
* we are scheduled this extent has been "synced" by new
* application writes, we'd miss the lc_put on the
* extent we keep the refcount on.
* so we remembered which extent we had to try again, and
* if the next requested one is something else, we do
* the lc_put here...
* we also have to wake_up
*/
e = lc_find(mdev->resync, mdev->resync_wenr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (bm_ext) {
D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
clear_bit(BME_NO_WRITES, &bm_ext->flags);
mdev->resync_wenr = LC_FREE;
if (lc_put(mdev->resync, &bm_ext->lce) == 0)
mdev->resync_locked--;
wake_up(&mdev->al_wait);
} else {
dev_alert(DEV, "LOGIC BUG\n");
}
}
/* TRY. */
e = lc_try_get(mdev->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (bm_ext) {
if (test_bit(BME_LOCKED, &bm_ext->flags))
goto proceed;
if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) {
mdev->resync_locked++;
} else {
/* we did set the BME_NO_WRITES,
* but then could not set BME_LOCKED,
* so we tried again.
* drop the extra reference. */
bm_ext->lce.refcnt--;
D_ASSERT(bm_ext->lce.refcnt > 0);
}
goto check_al;
} else {
/* do we rather want to try later? */
if (mdev->resync_locked > mdev->resync->nr_elements-3)
goto try_again;
/* Do or do not. There is no try. -- Yoda */
e = lc_get(mdev->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (!bm_ext) {
const unsigned long rs_flags = mdev->resync->flags;
if (rs_flags & LC_STARVING)
dev_warn(DEV, "Have to wait for element"
" (resync LRU too small?)\n");
BUG_ON(rs_flags & LC_DIRTY);
goto try_again;
}
if (bm_ext->lce.lc_number != enr) {
bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
bm_ext->rs_failed = 0;
lc_changed(mdev->resync, &bm_ext->lce);
wake_up(&mdev->al_wait);
D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0);
}
set_bit(BME_NO_WRITES, &bm_ext->flags);
D_ASSERT(bm_ext->lce.refcnt == 1);
mdev->resync_locked++;
goto check_al;
}
check_al:
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
if (unlikely(al_enr+i == mdev->act_log->new_number))
goto try_again;
if (lc_is_used(mdev->act_log, al_enr+i))
goto try_again;
}
set_bit(BME_LOCKED, &bm_ext->flags);
proceed:
mdev->resync_wenr = LC_FREE;
spin_unlock_irq(&mdev->al_lock);
return 0;
try_again:
if (bm_ext)
mdev->resync_wenr = enr;
spin_unlock_irq(&mdev->al_lock);
return -EAGAIN;
}
void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
struct lc_element *e;
struct bm_extent *bm_ext;
unsigned long flags;
spin_lock_irqsave(&mdev->al_lock, flags);
e = lc_find(mdev->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (!bm_ext) {
spin_unlock_irqrestore(&mdev->al_lock, flags);
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "drbd_rs_complete_io() called, but extent not found\n");
return;
}
if (bm_ext->lce.refcnt == 0) {
spin_unlock_irqrestore(&mdev->al_lock, flags);
dev_err(DEV, "drbd_rs_complete_io(,%llu [=%u]) called, "
"but refcnt is 0!?\n",
(unsigned long long)sector, enr);
return;
}
if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */
mdev->resync_locked--;
wake_up(&mdev->al_wait);
}
spin_unlock_irqrestore(&mdev->al_lock, flags);
}
/**
* drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED)
* @mdev: DRBD device.
*/
void drbd_rs_cancel_all(struct drbd_conf *mdev)
{
spin_lock_irq(&mdev->al_lock);
if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */
lc_reset(mdev->resync);
put_ldev(mdev);
}
mdev->resync_locked = 0;
mdev->resync_wenr = LC_FREE;
spin_unlock_irq(&mdev->al_lock);
wake_up(&mdev->al_wait);
}
/**
* drbd_rs_del_all() - Gracefully remove all extents from the resync LRU
* @mdev: DRBD device.
*
* Returns 0 upon success, -EAGAIN if at least one reference count was
* not zero.
*/
int drbd_rs_del_all(struct drbd_conf *mdev)
{
struct lc_element *e;
struct bm_extent *bm_ext;
int i;
spin_lock_irq(&mdev->al_lock);
if (get_ldev_if_state(mdev, D_FAILED)) {
/* ok, ->resync is there. */
for (i = 0; i < mdev->resync->nr_elements; i++) {
e = lc_element_by_index(mdev->resync, i);
bm_ext = lc_entry(e, struct bm_extent, lce);
if (bm_ext->lce.lc_number == LC_FREE)
continue;
if (bm_ext->lce.lc_number == mdev->resync_wenr) {
dev_info(DEV, "dropping %u in drbd_rs_del_all, apparently"
" got 'synced' by application io\n",
mdev->resync_wenr);
D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
clear_bit(BME_NO_WRITES, &bm_ext->flags);
mdev->resync_wenr = LC_FREE;
lc_put(mdev->resync, &bm_ext->lce);
}
if (bm_ext->lce.refcnt != 0) {
dev_info(DEV, "Retrying drbd_rs_del_all() later. "
"refcnt=%d\n", bm_ext->lce.refcnt);
put_ldev(mdev);
spin_unlock_irq(&mdev->al_lock);
return -EAGAIN;
}
D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags));
lc_del(mdev->resync, &bm_ext->lce);
}
D_ASSERT(mdev->resync->used == 0);
put_ldev(mdev);
}
spin_unlock_irq(&mdev->al_lock);
return 0;
}
/**
* drbd_rs_failed_io() - Record information on a failure to resync the specified blocks
* @mdev: DRBD device.
* @sector: The sector number.
* @size: Size of failed IO operation, in byte.
*/
void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
{
/* Is called from worker and receiver context _only_ */
unsigned long sbnr, ebnr, lbnr;
unsigned long count;
sector_t esector, nr_sectors;
int wake_up = 0;
if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
}
nr_sectors = drbd_get_capacity(mdev->this_bdev);
esector = sector + (size >> 9) - 1;
ERR_IF(sector >= nr_sectors) return;
ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1);
lbnr = BM_SECT_TO_BIT(nr_sectors-1);
/*
* round up start sector, round down end sector. we make sure we only
* handle full, aligned, BM_BLOCK_SIZE (4K) blocks */
if (unlikely(esector < BM_SECT_PER_BIT-1))
return;
if (unlikely(esector == (nr_sectors-1)))
ebnr = lbnr;
else
ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
if (sbnr > ebnr)
return;
/*
* ok, (capacity & 7) != 0 sometimes, but who cares...
* we count rs_{total,left} in bits, not sectors.
*/
spin_lock_irq(&mdev->al_lock);
count = drbd_bm_count_bits(mdev, sbnr, ebnr);
if (count) {
mdev->rs_failed += count;
if (get_ldev(mdev)) {
drbd_try_clear_on_disk_bm(mdev, sector, count, false);
put_ldev(mdev);
}
/* just wake_up unconditional now, various lc_chaged(),
* lc_put() in drbd_try_clear_on_disk_bm(). */
wake_up = 1;
}
spin_unlock_irq(&mdev->al_lock);
if (wake_up)
wake_up(&mdev->al_wait);
}
| gpl-2.0 |
omega-roms/G900F_Omega_Kernel_KK_4.4.2 | drivers/infiniband/hw/mthca/mthca_catas.c | 9639 | 5347 | /*
* Copyright (c) 2005 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include "mthca_dev.h"
enum {
MTHCA_CATAS_POLL_INTERVAL = 5 * HZ,
MTHCA_CATAS_TYPE_INTERNAL = 0,
MTHCA_CATAS_TYPE_UPLINK = 3,
MTHCA_CATAS_TYPE_DDR = 4,
MTHCA_CATAS_TYPE_PARITY = 5,
};
static DEFINE_SPINLOCK(catas_lock);
static LIST_HEAD(catas_list);
static struct workqueue_struct *catas_wq;
static struct work_struct catas_work;
static int catas_reset_disable;
module_param_named(catas_reset_disable, catas_reset_disable, int, 0644);
MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero");
static void catas_reset(struct work_struct *work)
{
struct mthca_dev *dev, *tmpdev;
LIST_HEAD(tlist);
int ret;
mutex_lock(&mthca_device_mutex);
spin_lock_irq(&catas_lock);
list_splice_init(&catas_list, &tlist);
spin_unlock_irq(&catas_lock);
list_for_each_entry_safe(dev, tmpdev, &tlist, catas_err.list) {
struct pci_dev *pdev = dev->pdev;
ret = __mthca_restart_one(dev->pdev);
/* 'dev' now is not valid */
if (ret)
printk(KERN_ERR "mthca %s: Reset failed (%d)\n",
pci_name(pdev), ret);
else {
struct mthca_dev *d = pci_get_drvdata(pdev);
mthca_dbg(d, "Reset succeeded\n");
}
}
mutex_unlock(&mthca_device_mutex);
}
static void handle_catas(struct mthca_dev *dev)
{
struct ib_event event;
unsigned long flags;
const char *type;
int i;
event.device = &dev->ib_dev;
event.event = IB_EVENT_DEVICE_FATAL;
event.element.port_num = 0;
dev->active = false;
ib_dispatch_event(&event);
switch (swab32(readl(dev->catas_err.map)) >> 24) {
case MTHCA_CATAS_TYPE_INTERNAL:
type = "internal error";
break;
case MTHCA_CATAS_TYPE_UPLINK:
type = "uplink bus error";
break;
case MTHCA_CATAS_TYPE_DDR:
type = "DDR data error";
break;
case MTHCA_CATAS_TYPE_PARITY:
type = "internal parity error";
break;
default:
type = "unknown error";
break;
}
mthca_err(dev, "Catastrophic error detected: %s\n", type);
for (i = 0; i < dev->catas_err.size; ++i)
mthca_err(dev, " buf[%02x]: %08x\n",
i, swab32(readl(dev->catas_err.map + i)));
if (catas_reset_disable)
return;
spin_lock_irqsave(&catas_lock, flags);
list_add(&dev->catas_err.list, &catas_list);
queue_work(catas_wq, &catas_work);
spin_unlock_irqrestore(&catas_lock, flags);
}
static void poll_catas(unsigned long dev_ptr)
{
struct mthca_dev *dev = (struct mthca_dev *) dev_ptr;
int i;
for (i = 0; i < dev->catas_err.size; ++i)
if (readl(dev->catas_err.map + i)) {
handle_catas(dev);
return;
}
mod_timer(&dev->catas_err.timer,
round_jiffies(jiffies + MTHCA_CATAS_POLL_INTERVAL));
}
void mthca_start_catas_poll(struct mthca_dev *dev)
{
phys_addr_t addr;
init_timer(&dev->catas_err.timer);
dev->catas_err.map = NULL;
addr = pci_resource_start(dev->pdev, 0) +
((pci_resource_len(dev->pdev, 0) - 1) &
dev->catas_err.addr);
dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4);
if (!dev->catas_err.map) {
mthca_warn(dev, "couldn't map catastrophic error region "
"at 0x%llx/0x%x\n", (unsigned long long) addr,
dev->catas_err.size * 4);
return;
}
dev->catas_err.timer.data = (unsigned long) dev;
dev->catas_err.timer.function = poll_catas;
dev->catas_err.timer.expires = jiffies + MTHCA_CATAS_POLL_INTERVAL;
INIT_LIST_HEAD(&dev->catas_err.list);
add_timer(&dev->catas_err.timer);
}
void mthca_stop_catas_poll(struct mthca_dev *dev)
{
del_timer_sync(&dev->catas_err.timer);
if (dev->catas_err.map)
iounmap(dev->catas_err.map);
spin_lock_irq(&catas_lock);
list_del(&dev->catas_err.list);
spin_unlock_irq(&catas_lock);
}
int __init mthca_catas_init(void)
{
INIT_WORK(&catas_work, catas_reset);
catas_wq = create_singlethread_workqueue("mthca_catas");
if (!catas_wq)
return -ENOMEM;
return 0;
}
void mthca_catas_cleanup(void)
{
destroy_workqueue(catas_wq);
}
| gpl-2.0 |
chentz78/chentz-N4-Kernel | arch/arm/boot/compressed/ofw-shark.c | 12199 | 5289 | /*
* linux/arch/arm/boot/compressed/ofw-shark.c
*
* by Alexander Schulz
*
* This file is used to get some basic information
* about the memory layout of the shark we are running
* on. Memory is usually divided in blocks a 8 MB.
* And bootargs are copied from OpenFirmware.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <asm/setup.h>
#include <asm/page.h>
asmlinkage void
create_params (unsigned long *buffer)
{
/* Is there a better address? Also change in mach-shark/core.c */
struct tag *tag = (struct tag *) 0x08003000;
int j,i,m,k,nr_banks,size;
unsigned char *c;
k = 0;
/* Head of the taglist */
tag->hdr.tag = ATAG_CORE;
tag->hdr.size = tag_size(tag_core);
tag->u.core.flags = 1;
tag->u.core.pagesize = PAGE_SIZE;
tag->u.core.rootdev = 0;
/* Build up one tagged block for each memory region */
size=0;
nr_banks=(unsigned int) buffer[0];
for (j=0;j<nr_banks;j++){
/* search the lowest address and put it into the next entry */
/* not a fast sort algorithm, but there are at most 8 entries */
/* and this is used only once anyway */
m=0xffffffff;
for (i=0;i<(unsigned int) buffer[0];i++){
if (buffer[2*i+1]<m) {
m=buffer[2*i+1];
k=i;
}
}
tag = tag_next(tag);
tag->hdr.tag = ATAG_MEM;
tag->hdr.size = tag_size(tag_mem32);
tag->u.mem.size = buffer[2*k+2];
tag->u.mem.start = buffer[2*k+1];
size += buffer[2*k+2];
buffer[2*k+1]=0xffffffff; /* mark as copied */
}
/* The command line */
tag = tag_next(tag);
tag->hdr.tag = ATAG_CMDLINE;
c=(unsigned char *)(&buffer[34]);
j=0;
while (*c) tag->u.cmdline.cmdline[j++]=*c++;
tag->u.cmdline.cmdline[j]=0;
tag->hdr.size = (j + 7 + sizeof(struct tag_header)) >> 2;
/* Hardware revision */
tag = tag_next(tag);
tag->hdr.tag = ATAG_REVISION;
tag->hdr.size = tag_size(tag_revision);
tag->u.revision.rev = ((unsigned char) buffer[33])-'0';
/* End of the taglist */
tag = tag_next(tag);
tag->hdr.tag = 0;
tag->hdr.size = 0;
}
typedef int (*ofw_handle_t)(void *);
/* Everything below is called with a wrong MMU setting.
* This means: no string constants, no initialization of
* arrays, no global variables! This is ugly but I didn't
* want to write this in assembler :-)
*/
int
of_decode_int(const unsigned char *p)
{
unsigned int i = *p++ << 8;
i = (i + *p++) << 8;
i = (i + *p++) << 8;
return (i + *p);
}
int
OF_finddevice(ofw_handle_t openfirmware, char *name)
{
unsigned int args[8];
char service[12];
service[0]='f';
service[1]='i';
service[2]='n';
service[3]='d';
service[4]='d';
service[5]='e';
service[6]='v';
service[7]='i';
service[8]='c';
service[9]='e';
service[10]='\0';
args[0]=(unsigned int)service;
args[1]=1;
args[2]=1;
args[3]=(unsigned int)name;
if (openfirmware(args) == -1)
return -1;
return args[4];
}
int
OF_getproplen(ofw_handle_t openfirmware, int handle, char *prop)
{
unsigned int args[8];
char service[12];
service[0]='g';
service[1]='e';
service[2]='t';
service[3]='p';
service[4]='r';
service[5]='o';
service[6]='p';
service[7]='l';
service[8]='e';
service[9]='n';
service[10]='\0';
args[0] = (unsigned int)service;
args[1] = 2;
args[2] = 1;
args[3] = (unsigned int)handle;
args[4] = (unsigned int)prop;
if (openfirmware(args) == -1)
return -1;
return args[5];
}
int
OF_getprop(ofw_handle_t openfirmware, int handle, char *prop, void *buf, unsigned int buflen)
{
unsigned int args[8];
char service[8];
service[0]='g';
service[1]='e';
service[2]='t';
service[3]='p';
service[4]='r';
service[5]='o';
service[6]='p';
service[7]='\0';
args[0] = (unsigned int)service;
args[1] = 4;
args[2] = 1;
args[3] = (unsigned int)handle;
args[4] = (unsigned int)prop;
args[5] = (unsigned int)buf;
args[6] = buflen;
if (openfirmware(args) == -1)
return -1;
return args[7];
}
asmlinkage void ofw_init(ofw_handle_t o, int *nomr, int *pointer)
{
int phandle,i,mem_len,buffer[32];
char temp[15];
temp[0]='/';
temp[1]='m';
temp[2]='e';
temp[3]='m';
temp[4]='o';
temp[5]='r';
temp[6]='y';
temp[7]='\0';
phandle=OF_finddevice(o,temp);
temp[0]='r';
temp[1]='e';
temp[2]='g';
temp[3]='\0';
mem_len = OF_getproplen(o,phandle, temp);
OF_getprop(o,phandle, temp, buffer, mem_len);
*nomr=mem_len >> 3;
for (i=0; i<=mem_len/4; i++) pointer[i]=of_decode_int((const unsigned char *)&buffer[i]);
temp[0]='/';
temp[1]='c';
temp[2]='h';
temp[3]='o';
temp[4]='s';
temp[5]='e';
temp[6]='n';
temp[7]='\0';
phandle=OF_finddevice(o,temp);
temp[0]='b';
temp[1]='o';
temp[2]='o';
temp[3]='t';
temp[4]='a';
temp[5]='r';
temp[6]='g';
temp[7]='s';
temp[8]='\0';
mem_len = OF_getproplen(o,phandle, temp);
OF_getprop(o,phandle, temp, buffer, mem_len);
if (mem_len > 128) mem_len=128;
for (i=0; i<=mem_len/4; i++) pointer[i+33]=buffer[i];
pointer[i+33]=0;
temp[0]='/';
temp[1]='\0';
phandle=OF_finddevice(o,temp);
temp[0]='b';
temp[1]='a';
temp[2]='n';
temp[3]='n';
temp[4]='e';
temp[5]='r';
temp[6]='-';
temp[7]='n';
temp[8]='a';
temp[9]='m';
temp[10]='e';
temp[11]='\0';
mem_len = OF_getproplen(o,phandle, temp);
OF_getprop(o,phandle, temp, buffer, mem_len);
* ((unsigned char *) &pointer[32]) = ((unsigned char *) buffer)[mem_len-2];
}
| gpl-2.0 |
SM-G920P/G920PVPU3BOI1 | arch/powerpc/math-emu/fmsub.c | 13735 | 1123 | #include <linux/types.h>
#include <linux/errno.h>
#include <asm/uaccess.h>
#include <asm/sfp-machine.h>
#include <math-emu/soft-fp.h>
#include <math-emu/double.h>
int
fmsub(void *frD, void *frA, void *frB, void *frC)
{
FP_DECL_D(R);
FP_DECL_D(A);
FP_DECL_D(B);
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
#endif
FP_UNPACK_DP(A, frA);
FP_UNPACK_DP(B, frB);
FP_UNPACK_DP(C, frC);
#ifdef DEBUG
printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c);
printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c);
printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c);
#endif
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
if (B_c != FP_CLS_NAN)
B_s ^= 1;
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
#ifdef DEBUG
printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c);
#endif
__FP_PACK_D(frD, R);
return FP_CUR_EXCEPTIONS;
}
| gpl-2.0 |
shminer/android_kernel_flounder | sound/drivers/opl4/opl4_mixer.c | 15015 | 2867 | /*
* OPL4 mixer functions
* Copyright (c) 2003 by Clemens Ladisch <clemens@ladisch.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "opl4_local.h"
#include <sound/control.h>
static int snd_opl4_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 7;
return 0;
}
static int snd_opl4_ctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_opl4 *opl4 = snd_kcontrol_chip(kcontrol);
unsigned long flags;
u8 reg = kcontrol->private_value;
u8 value;
spin_lock_irqsave(&opl4->reg_lock, flags);
value = snd_opl4_read(opl4, reg);
spin_unlock_irqrestore(&opl4->reg_lock, flags);
ucontrol->value.integer.value[0] = 7 - (value & 7);
ucontrol->value.integer.value[1] = 7 - ((value >> 3) & 7);
return 0;
}
static int snd_opl4_ctl_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_opl4 *opl4 = snd_kcontrol_chip(kcontrol);
unsigned long flags;
u8 reg = kcontrol->private_value;
u8 value, old_value;
value = (7 - (ucontrol->value.integer.value[0] & 7)) |
((7 - (ucontrol->value.integer.value[1] & 7)) << 3);
spin_lock_irqsave(&opl4->reg_lock, flags);
old_value = snd_opl4_read(opl4, reg);
snd_opl4_write(opl4, reg, value);
spin_unlock_irqrestore(&opl4->reg_lock, flags);
return value != old_value;
}
static struct snd_kcontrol_new snd_opl4_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "FM Playback Volume",
.info = snd_opl4_ctl_info,
.get = snd_opl4_ctl_get,
.put = snd_opl4_ctl_put,
.private_value = OPL4_REG_MIX_CONTROL_FM
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Wavetable Playback Volume",
.info = snd_opl4_ctl_info,
.get = snd_opl4_ctl_get,
.put = snd_opl4_ctl_put,
.private_value = OPL4_REG_MIX_CONTROL_PCM
}
};
int snd_opl4_create_mixer(struct snd_opl4 *opl4)
{
struct snd_card *card = opl4->card;
int i, err;
strcat(card->mixername, ",OPL4");
for (i = 0; i < 2; ++i) {
err = snd_ctl_add(card, snd_ctl_new1(&snd_opl4_controls[i], opl4));
if (err < 0)
return err;
}
return 0;
}
| gpl-2.0 |
binkybear/AK-OnePone | arch/arm/mach-msm/oppo/boot_mode.c | 168 | 1921 | /* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <asm/setup.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/boot_mode.h>
static int ftm_mode = MSM_BOOT_MODE__NORMAL;
static int __init board_mfg_mode_init(char *str)
{
if (!strncmp(str, "factory2", 5))
ftm_mode = MSM_BOOT_MODE__FACTORY;
else if (!strncmp(str, "ftmwifi", 5))
ftm_mode = MSM_BOOT_MODE__WLAN;
else if (!strncmp(str, "ftmrf", 5))
ftm_mode = MSM_BOOT_MODE__RF;
else if (!strncmp(str, "ftmrecovery", 5))
ftm_mode = MSM_BOOT_MODE__RECOVERY;
pr_debug("%s: ftm_mode=%d\n", __func__, ftm_mode);
return 1;
}
__setup("oppo_ftm_mode=", board_mfg_mode_init);
int get_boot_mode(void)
{
return ftm_mode;
}
EXPORT_SYMBOL(get_boot_mode);
static char boot_mode[16];
static int __init boot_mode_init(char *str)
{
strcpy(boot_mode, str);
pr_debug("%s: parse boot_mode is %s\n", __func__, boot_mode);
return 1;
}
__setup("androidboot.mode=", boot_mode_init);
char *get_boot_mode_str(void)
{
return boot_mode;
}
EXPORT_SYMBOL(get_boot_mode_str);
static char pwron_event[16];
static int __init start_reason_init(char *str)
{
strcpy(pwron_event, str);
pr_debug("%s: parse poweron reason %s\n", __func__, pwron_event);
return 1;
}
__setup("androidboot.startupmode=", start_reason_init);
char *get_start_reason(void)
{
return pwron_event;
}
EXPORT_SYMBOL(get_start_reason);
| gpl-2.0 |
Radium-Devices/Radium_jflte | drivers/net/wireless/ipsecdrvtl/ag.c | 168 | 21856 | /*
'des.c' Obfuscated by COBF (Version 1.06 2006-01-07 by BB) at Wed Dec 18 14:11:10 2013
*/
#include"cobf.h"
#ifdef _WIN32
#if defined( UNDER_CE) && defined( bb344) || ! defined( bb338)
#define bb340 1
#define bb336 1
#else
#define bb351 bb357
#define bb330 1
#define bb352 1
#endif
#define bb353 1
#include"uncobf.h"
#include<ndis.h>
#include"cobf.h"
#ifdef UNDER_CE
#include"uncobf.h"
#include<ndiswan.h>
#include"cobf.h"
#endif
#include"uncobf.h"
#include<stdio.h>
#include<basetsd.h>
#include"cobf.h"
bba bbs bbl bbf, *bb1;bba bbs bbe bbq, *bb94;bba bb135 bb124, *bb337;
bba bbs bbl bb39, *bb72;bba bbs bb135 bbk, *bb59;bba bbe bbu, *bb133;
bba bbh bbf*bb89;
#ifdef bb308
bba bbd bb60, *bb122;
#endif
#else
#include"uncobf.h"
#include<linux/module.h>
#include<linux/ctype.h>
#include<linux/time.h>
#include<linux/slab.h>
#include"cobf.h"
#ifndef bb116
#define bb116
#ifdef _WIN32
#include"uncobf.h"
#include<wtypes.h>
#include"cobf.h"
#else
#ifdef bb113
#include"uncobf.h"
#include<linux/types.h>
#include"cobf.h"
#else
#include"uncobf.h"
#include<stddef.h>
#include<sys/types.h>
#include"cobf.h"
#endif
#endif
#ifdef _WIN32
bba bb111 bb255;
#else
bba bbe bbu, *bb133, *bb279;
#define bb201 1
#define bb202 0
bba bb271 bb228, *bb217, *bb230;bba bbe bb237, *bb250, *bb286;bba bbs
bbq, *bb94, *bb288;bba bb6 bb223, *bb284;bba bbs bb6 bb227, *bb258;
bba bb6 bb117, *bb240;bba bbs bb6 bb63, *bb241;bba bb63 bb257, *bb229
;bba bb63 bb276, *bb291;bba bb117 bb111, *bb249;bba bb289 bb262;bba
bb209 bb124;bba bb270 bb82;bba bb115 bb114;bba bb115 bb274;
#ifdef bb226
bba bb236 bb39, *bb72;bba bb254 bbk, *bb59;bba bb252 bbd, *bb29;bba
bb269 bb56, *bb119;
#else
bba bb264 bb39, *bb72;bba bb256 bbk, *bb59;bba bb278 bbd, *bb29;bba
bb207 bb56, *bb119;
#endif
bba bb39 bbf, *bb1, *bb224;bba bbk bb244, *bb214, *bb221;bba bbk bb275
, *bb210, *bb247;bba bbd bb60, *bb122, *bb205;bba bb82 bb37, *bb266, *
bb242;bba bbd bb235, *bb211, *bb222;bba bb114 bb251, *bb268, *bb232;
bba bb56 bb225, *bb280, *bb273;
#define bb141 bbb
bba bbb*bb212, *bb77;bba bbh bbb*bb231;bba bbl bb208;bba bbl*bb233;
bba bbh bbl*bb83;
#if defined( bb113)
bba bbe bb112;
#endif
bba bb112 bb19;bba bb19*bb234;bba bbh bb19*bb188;
#if defined( bb283) || defined( bb238)
bba bb19 bb36;bba bb19 bb120;
#else
bba bbl bb36;bba bbs bbl bb120;
#endif
bba bbh bb36*bb261;bba bb36*bb267;bba bb60 bb265, *bb216;bba bbb*
bb107;bba bb107*bb239;
#define bb215( bb35) bbi bb35##__ { bbe bb219; }; bba bbi bb35##__ * \
bb35
bba bbi{bb37 bb190,bb246,bb243,bb245;}bb272, *bb281, *bb260;bba bbi{
bb37 bb8,bb193;}bb292, *bb263, *bb277;bba bbi{bb37 bb218,bb248;}bb220
, *bb213, *bb259;
#endif
bba bbh bbf*bb89;
#endif
bba bbf bb101;
#define IN
#define OUT
#ifdef _DEBUG
#define bb145( bbc) bb32( bbc)
#else
#define bb145( bbc) ( bbb)( bbc)
#endif
bba bbe bb161, *bb173;
#define bb287 0
#define bb312 1
#define bb296 2
#define bb323 3
#define bb343 4
bba bbe bb349;bba bbb*bb121;
#endif
#ifdef _WIN32
#ifndef UNDER_CE
#define bb31 bb341
#define bb43 bb346
bba bbs bb6 bb31;bba bb6 bb43;
#endif
#else
#endif
#ifdef _WIN32
bbb*bb128(bb31 bb47);bbb bb108(bbb* );bbb*bb137(bb31 bb159,bb31 bb47);
#else
#define bb128( bbc) bb147(1, bbc, bb140)
#define bb108( bbc) bb331( bbc)
#define bb137( bbc, bbn) bb147( bbc, bbn, bb140)
#endif
#ifdef _WIN32
#define bb32( bbc) bb339( bbc)
#else
#ifdef _DEBUG
bbe bb144(bbh bbl*bb95,bbh bbl*bb25,bbs bb285);
#define bb32( bbc) ( bbb)(( bbc) || ( bb144(# bbc, __FILE__, __LINE__ \
)))
#else
#define bb32( bbc) (( bbb)0)
#endif
#endif
bb43 bb302(bb43*bb324);
#ifndef _WIN32
bbe bb328(bbh bbl*bbg);bbe bb321(bbh bbl*bb20,...);
#endif
#ifdef _WIN32
bba bb342 bb96;
#define bb139( bbc) bb354( bbc)
#define bb143( bbc) bb329( bbc)
#define bb134( bbc) bb348( bbc)
#define bb132( bbc) bb332( bbc)
#else
bba bb334 bb96;
#define bb139( bbc) ( bbb)( * bbc = bb356( bbc))
#define bb143( bbc) (( bbb)0)
#define bb134( bbc) bb333( bbc)
#define bb132( bbc) bb358( bbc)
#endif
#ifdef __cplusplus
bbr"\x43"{
#endif
bba bbi{bbf bb417[8 *16 ];}bb932;bbb bb1750(bb932*bbj,bbh bbb*bb71);bbb
bb1931(bb932*bbj,bbh bbb*bb71);bbb bb1282(bb932*bbj,bbb*bb14,bbh bbb*
bb5);bba bbi{bb932 bb2033,bb2034,bb2032;}bb829;bbb bb2102(bb829*bbj,
bbh bbb*bb1303);bbb bb2136(bb829*bbj,bbh bbb*bb1303);bbb bb2043(bb829
*bbj,bbb*bb14,bbh bbb*bb5);bbb bb2178(bb829*bbj,bbb*bb14,bbh bbb*bb5
);
#ifdef __cplusplus
}
#endif
bb41 bbh bb39 bb2331[]={57 ,49 ,41 ,33 ,25 ,17 ,9 ,1 ,58 ,50 ,42 ,34 ,26 ,18 ,10 ,2 ,
59 ,51 ,43 ,35 ,27 ,19 ,11 ,3 ,60 ,52 ,44 ,36 ,63 ,55 ,47 ,39 ,31 ,23 ,15 ,7 ,62 ,54 ,46 ,38
,30 ,22 ,14 ,6 ,61 ,53 ,45 ,37 ,29 ,21 ,13 ,5 ,28 ,20 ,12 ,4 };bb41 bbh bbd bb1811[8 ]
[64 ]={{0x000000L ,0x004000L ,0x040000L ,0x044000L ,0x000100L ,0x004100L ,
0x040100L ,0x044100L ,0x020000L ,0x024000L ,0x060000L ,0x064000L ,0x020100L
,0x024100L ,0x060100L ,0x064100L ,0x000001L ,0x004001L ,0x040001L ,
0x044001L ,0x000101L ,0x004101L ,0x040101L ,0x044101L ,0x020001L ,0x024001L
,0x060001L ,0x064001L ,0x020101L ,0x024101L ,0x060101L ,0x064101L ,
0x080000L ,0x084000L ,0x0C0000L ,0x0C4000L ,0x080100L ,0x084100L ,0x0C0100L
,0x0C4100L ,0x0A0000L ,0x0A4000L ,0x0E0000L ,0x0E4000L ,0x0A0100L ,
0x0A4100L ,0x0E0100L ,0x0E4100L ,0x080001L ,0x084001L ,0x0C0001L ,0x0C4001L
,0x080101L ,0x084101L ,0x0C0101L ,0x0C4101L ,0x0A0001L ,0x0A4001L ,
0x0E0001L ,0x0E4001L ,0x0A0101L ,0x0A4101L ,0x0E0101L ,0x0E4101L },{
0x000000L ,0x000002L ,0x000200L ,0x000202L ,0x200000L ,0x200002L ,0x200200L
,0x200202L ,0x001000L ,0x001002L ,0x001200L ,0x001202L ,0x201000L ,
0x201002L ,0x201200L ,0x201202L ,0x000040L ,0x000042L ,0x000240L ,0x000242L
,0x200040L ,0x200042L ,0x200240L ,0x200242L ,0x001040L ,0x001042L ,
0x001240L ,0x001242L ,0x201040L ,0x201042L ,0x201240L ,0x201242L ,0x000010L
,0x000012L ,0x000210L ,0x000212L ,0x200010L ,0x200012L ,0x200210L ,
0x200212L ,0x001010L ,0x001012L ,0x001210L ,0x001212L ,0x201010L ,0x201012L
,0x201210L ,0x201212L ,0x000050L ,0x000052L ,0x000250L ,0x000252L ,
0x200050L ,0x200052L ,0x200250L ,0x200252L ,0x001050L ,0x001052L ,0x001250L
,0x001252L ,0x201050L ,0x201052L ,0x201250L ,0x201252L ,},{0x000000L ,
0x000004L ,0x000400L ,0x000404L ,0x400000L ,0x400004L ,0x400400L ,0x400404L
,0x000020L ,0x000024L ,0x000420L ,0x000424L ,0x400020L ,0x400024L ,
0x400420L ,0x400424L ,0x008000L ,0x008004L ,0x008400L ,0x008404L ,0x408000L
,0x408004L ,0x408400L ,0x408404L ,0x008020L ,0x008024L ,0x008420L ,
0x008424L ,0x408020L ,0x408024L ,0x408420L ,0x408424L ,0x800000L ,0x800004L
,0x800400L ,0x800404L ,0xC00000L ,0xC00004L ,0xC00400L ,0xC00404L ,
0x800020L ,0x800024L ,0x800420L ,0x800424L ,0xC00020L ,0xC00024L ,0xC00420L
,0xC00424L ,0x808000L ,0x808004L ,0x808400L ,0x808404L ,0xC08000L ,
0xC08004L ,0xC08400L ,0xC08404L ,0x808020L ,0x808024L ,0x808420L ,0x808424L
,0xC08020L ,0xC08024L ,0xC08420L ,0xC08424L ,},{0x000000L ,0x010000L ,
0x000008L ,0x010008L ,0x000080L ,0x010080L ,0x000088L ,0x010088L ,0x100000L
,0x110000L ,0x100008L ,0x110008L ,0x100080L ,0x110080L ,0x100088L ,
0x110088L ,0x000800L ,0x010800L ,0x000808L ,0x010808L ,0x000880L ,0x010880L
,0x000888L ,0x010888L ,0x100800L ,0x110800L ,0x100808L ,0x110808L ,
0x100880L ,0x110880L ,0x100888L ,0x110888L ,0x002000L ,0x012000L ,0x002008L
,0x012008L ,0x002080L ,0x012080L ,0x002088L ,0x012088L ,0x102000L ,
0x112000L ,0x102008L ,0x112008L ,0x102080L ,0x112080L ,0x102088L ,0x112088L
,0x002800L ,0x012800L ,0x002808L ,0x012808L ,0x002880L ,0x012880L ,
0x002888L ,0x012888L ,0x102800L ,0x112800L ,0x102808L ,0x112808L ,0x102880L
,0x112880L ,0x102888L ,0x112888L ,},{0x000000L ,0x000080L ,0x002000L ,
0x002080L ,0x000001L ,0x000081L ,0x002001L ,0x002081L ,0x200000L ,0x200080L
,0x202000L ,0x202080L ,0x200001L ,0x200081L ,0x202001L ,0x202081L ,
0x020000L ,0x020080L ,0x022000L ,0x022080L ,0x020001L ,0x020081L ,0x022001L
,0x022081L ,0x220000L ,0x220080L ,0x222000L ,0x222080L ,0x220001L ,
0x220081L ,0x222001L ,0x222081L ,0x000002L ,0x000082L ,0x002002L ,0x002082L
,0x000003L ,0x000083L ,0x002003L ,0x002083L ,0x200002L ,0x200082L ,
0x202002L ,0x202082L ,0x200003L ,0x200083L ,0x202003L ,0x202083L ,0x020002L
,0x020082L ,0x022002L ,0x022082L ,0x020003L ,0x020083L ,0x022003L ,
0x022083L ,0x220002L ,0x220082L ,0x222002L ,0x222082L ,0x220003L ,0x220083L
,0x222003L ,0x222083L ,},{0x000000L ,0x000010L ,0x800000L ,0x800010L ,
0x010000L ,0x010010L ,0x810000L ,0x810010L ,0x000200L ,0x000210L ,0x800200L
,0x800210L ,0x010200L ,0x010210L ,0x810200L ,0x810210L ,0x100000L ,
0x100010L ,0x900000L ,0x900010L ,0x110000L ,0x110010L ,0x910000L ,0x910010L
,0x100200L ,0x100210L ,0x900200L ,0x900210L ,0x110200L ,0x110210L ,
0x910200L ,0x910210L ,0x000004L ,0x000014L ,0x800004L ,0x800014L ,0x010004L
,0x010014L ,0x810004L ,0x810014L ,0x000204L ,0x000214L ,0x800204L ,
0x800214L ,0x010204L ,0x010214L ,0x810204L ,0x810214L ,0x100004L ,0x100014L
,0x900004L ,0x900014L ,0x110004L ,0x110014L ,0x910004L ,0x910014L ,
0x100204L ,0x100214L ,0x900204L ,0x900214L ,0x110204L ,0x110214L ,0x910204L
,0x910214L ,},{0x000000L ,0x000400L ,0x001000L ,0x001400L ,0x080000L ,
0x080400L ,0x081000L ,0x081400L ,0x000020L ,0x000420L ,0x001020L ,0x001420L
,0x080020L ,0x080420L ,0x081020L ,0x081420L ,0x004000L ,0x004400L ,
0x005000L ,0x005400L ,0x084000L ,0x084400L ,0x085000L ,0x085400L ,0x004020L
,0x004420L ,0x005020L ,0x005420L ,0x084020L ,0x084420L ,0x085020L ,
0x085420L ,0x000800L ,0x000C00L ,0x001800L ,0x001C00L ,0x080800L ,0x080C00L
,0x081800L ,0x081C00L ,0x000820L ,0x000C20L ,0x001820L ,0x001C20L ,
0x080820L ,0x080C20L ,0x081820L ,0x081C20L ,0x004800L ,0x004C00L ,0x005800L
,0x005C00L ,0x084800L ,0x084C00L ,0x085800L ,0x085C00L ,0x004820L ,
0x004C20L ,0x005820L ,0x005C20L ,0x084820L ,0x084C20L ,0x085820L ,0x085C20L
,},{0x000000L ,0x000100L ,0x040000L ,0x040100L ,0x000040L ,0x000140L ,
0x040040L ,0x040140L ,0x400000L ,0x400100L ,0x440000L ,0x440100L ,0x400040L
,0x400140L ,0x440040L ,0x440140L ,0x008000L ,0x008100L ,0x048000L ,
0x048100L ,0x008040L ,0x008140L ,0x048040L ,0x048140L ,0x408000L ,0x408100L
,0x448000L ,0x448100L ,0x408040L ,0x408140L ,0x448040L ,0x448140L ,
0x000008L ,0x000108L ,0x040008L ,0x040108L ,0x000048L ,0x000148L ,0x040048L
,0x040148L ,0x400008L ,0x400108L ,0x440008L ,0x440108L ,0x400048L ,
0x400148L ,0x440048L ,0x440148L ,0x008008L ,0x008108L ,0x048008L ,0x048108L
,0x008048L ,0x008148L ,0x048048L ,0x048148L ,0x408008L ,0x408108L ,
0x448008L ,0x448108L ,0x408048L ,0x408148L ,0x448048L ,0x448148L ,}};bb41
bbh bbd bb1836[8 ][64 ]={{0x00808200 ,0x00000000 ,0x00008000 ,0x00808202 ,
0x00808002 ,0x00008202 ,0x00000002 ,0x00008000 ,0x00000200 ,0x00808200 ,
0x00808202 ,0x00000200 ,0x00800202 ,0x00808002 ,0x00800000 ,0x00000002 ,
0x00000202 ,0x00800200 ,0x00800200 ,0x00008200 ,0x00008200 ,0x00808000 ,
0x00808000 ,0x00800202 ,0x00008002 ,0x00800002 ,0x00800002 ,0x00008002 ,
0x00000000 ,0x00000202 ,0x00008202 ,0x00800000 ,0x00008000 ,0x00808202 ,
0x00000002 ,0x00808000 ,0x00808200 ,0x00800000 ,0x00800000 ,0x00000200 ,
0x00808002 ,0x00008000 ,0x00008200 ,0x00800002 ,0x00000200 ,0x00000002 ,
0x00800202 ,0x00008202 ,0x00808202 ,0x00008002 ,0x00808000 ,0x00800202 ,
0x00800002 ,0x00000202 ,0x00008202 ,0x00808200 ,0x00000202 ,0x00800200 ,
0x00800200 ,0x00000000 ,0x00008002 ,0x00008200 ,0x00000000 ,0x00808002 },{
0x40084010 ,0x40004000 ,0x00004000 ,0x00084010 ,0x00080000 ,0x00000010 ,
0x40080010 ,0x40004010 ,0x40000010 ,0x40084010 ,0x40084000 ,0x40000000 ,
0x40004000 ,0x00080000 ,0x00000010 ,0x40080010 ,0x00084000 ,0x00080010 ,
0x40004010 ,0x00000000 ,0x40000000 ,0x00004000 ,0x00084010 ,0x40080000 ,
0x00080010 ,0x40000010 ,0x00000000 ,0x00084000 ,0x00004010 ,0x40084000 ,
0x40080000 ,0x00004010 ,0x00000000 ,0x00084010 ,0x40080010 ,0x00080000 ,
0x40004010 ,0x40080000 ,0x40084000 ,0x00004000 ,0x40080000 ,0x40004000 ,
0x00000010 ,0x40084010 ,0x00084010 ,0x00000010 ,0x00004000 ,0x40000000 ,
0x00004010 ,0x40084000 ,0x00080000 ,0x40000010 ,0x00080010 ,0x40004010 ,
0x40000010 ,0x00080010 ,0x00084000 ,0x00000000 ,0x40004000 ,0x00004010 ,
0x40000000 ,0x40080010 ,0x40084010 ,0x00084000 },{0x00000104 ,0x04010100 ,
0x00000000 ,0x04010004 ,0x04000100 ,0x00000000 ,0x00010104 ,0x04000100 ,
0x00010004 ,0x04000004 ,0x04000004 ,0x00010000 ,0x04010104 ,0x00010004 ,
0x04010000 ,0x00000104 ,0x04000000 ,0x00000004 ,0x04010100 ,0x00000100 ,
0x00010100 ,0x04010000 ,0x04010004 ,0x00010104 ,0x04000104 ,0x00010100 ,
0x00010000 ,0x04000104 ,0x00000004 ,0x04010104 ,0x00000100 ,0x04000000 ,
0x04010100 ,0x04000000 ,0x00010004 ,0x00000104 ,0x00010000 ,0x04010100 ,
0x04000100 ,0x00000000 ,0x00000100 ,0x00010004 ,0x04010104 ,0x04000100 ,
0x04000004 ,0x00000100 ,0x00000000 ,0x04010004 ,0x04000104 ,0x00010000 ,
0x04000000 ,0x04010104 ,0x00000004 ,0x00010104 ,0x00010100 ,0x04000004 ,
0x04010000 ,0x04000104 ,0x00000104 ,0x04010000 ,0x00010104 ,0x00000004 ,
0x04010004 ,0x00010100 },{0x80401000 ,0x80001040 ,0x80001040 ,0x00000040 ,
0x00401040 ,0x80400040 ,0x80400000 ,0x80001000 ,0x00000000 ,0x00401000 ,
0x00401000 ,0x80401040 ,0x80000040 ,0x00000000 ,0x00400040 ,0x80400000 ,
0x80000000 ,0x00001000 ,0x00400000 ,0x80401000 ,0x00000040 ,0x00400000 ,
0x80001000 ,0x00001040 ,0x80400040 ,0x80000000 ,0x00001040 ,0x00400040 ,
0x00001000 ,0x00401040 ,0x80401040 ,0x80000040 ,0x00400040 ,0x80400000 ,
0x00401000 ,0x80401040 ,0x80000040 ,0x00000000 ,0x00000000 ,0x00401000 ,
0x00001040 ,0x00400040 ,0x80400040 ,0x80000000 ,0x80401000 ,0x80001040 ,
0x80001040 ,0x00000040 ,0x80401040 ,0x80000040 ,0x80000000 ,0x00001000 ,
0x80400000 ,0x80001000 ,0x00401040 ,0x80400040 ,0x80001000 ,0x00001040 ,
0x00400000 ,0x80401000 ,0x00000040 ,0x00400000 ,0x00001000 ,0x00401040 },{
0x00000080 ,0x01040080 ,0x01040000 ,0x21000080 ,0x00040000 ,0x00000080 ,
0x20000000 ,0x01040000 ,0x20040080 ,0x00040000 ,0x01000080 ,0x20040080 ,
0x21000080 ,0x21040000 ,0x00040080 ,0x20000000 ,0x01000000 ,0x20040000 ,
0x20040000 ,0x00000000 ,0x20000080 ,0x21040080 ,0x21040080 ,0x01000080 ,
0x21040000 ,0x20000080 ,0x00000000 ,0x21000000 ,0x01040080 ,0x01000000 ,
0x21000000 ,0x00040080 ,0x00040000 ,0x21000080 ,0x00000080 ,0x01000000 ,
0x20000000 ,0x01040000 ,0x21000080 ,0x20040080 ,0x01000080 ,0x20000000 ,
0x21040000 ,0x01040080 ,0x20040080 ,0x00000080 ,0x01000000 ,0x21040000 ,
0x21040080 ,0x00040080 ,0x21000000 ,0x21040080 ,0x01040000 ,0x00000000 ,
0x20040000 ,0x21000000 ,0x00040080 ,0x01000080 ,0x20000080 ,0x00040000 ,
0x00000000 ,0x20040000 ,0x01040080 ,0x20000080 },{0x10000008 ,0x10200000 ,
0x00002000 ,0x10202008 ,0x10200000 ,0x00000008 ,0x10202008 ,0x00200000 ,
0x10002000 ,0x00202008 ,0x00200000 ,0x10000008 ,0x00200008 ,0x10002000 ,
0x10000000 ,0x00002008 ,0x00000000 ,0x00200008 ,0x10002008 ,0x00002000 ,
0x00202000 ,0x10002008 ,0x00000008 ,0x10200008 ,0x10200008 ,0x00000000 ,
0x00202008 ,0x10202000 ,0x00002008 ,0x00202000 ,0x10202000 ,0x10000000 ,
0x10002000 ,0x00000008 ,0x10200008 ,0x00202000 ,0x10202008 ,0x00200000 ,
0x00002008 ,0x10000008 ,0x00200000 ,0x10002000 ,0x10000000 ,0x00002008 ,
0x10000008 ,0x10202008 ,0x00202000 ,0x10200000 ,0x00202008 ,0x10202000 ,
0x00000000 ,0x10200008 ,0x00000008 ,0x00002000 ,0x10200000 ,0x00202008 ,
0x00002000 ,0x00200008 ,0x10002008 ,0x00000000 ,0x10202000 ,0x10000000 ,
0x00200008 ,0x10002008 },{0x00100000 ,0x02100001 ,0x02000401 ,0x00000000 ,
0x00000400 ,0x02000401 ,0x00100401 ,0x02100400 ,0x02100401 ,0x00100000 ,
0x00000000 ,0x02000001 ,0x00000001 ,0x02000000 ,0x02100001 ,0x00000401 ,
0x02000400 ,0x00100401 ,0x00100001 ,0x02000400 ,0x02000001 ,0x02100000 ,
0x02100400 ,0x00100001 ,0x02100000 ,0x00000400 ,0x00000401 ,0x02100401 ,
0x00100400 ,0x00000001 ,0x02000000 ,0x00100400 ,0x02000000 ,0x00100400 ,
0x00100000 ,0x02000401 ,0x02000401 ,0x02100001 ,0x02100001 ,0x00000001 ,
0x00100001 ,0x02000000 ,0x02000400 ,0x00100000 ,0x02100400 ,0x00000401 ,
0x00100401 ,0x02100400 ,0x00000401 ,0x02000001 ,0x02100401 ,0x02100000 ,
0x00100400 ,0x00000000 ,0x00000001 ,0x02100401 ,0x00000000 ,0x00100401 ,
0x02100000 ,0x00000400 ,0x02000001 ,0x02000400 ,0x00000400 ,0x00100001 },{
0x08000820 ,0x00000800 ,0x00020000 ,0x08020820 ,0x08000000 ,0x08000820 ,
0x00000020 ,0x08000000 ,0x00020020 ,0x08020000 ,0x08020820 ,0x00020800 ,
0x08020800 ,0x00020820 ,0x00000800 ,0x00000020 ,0x08020000 ,0x08000020 ,
0x08000800 ,0x00000820 ,0x00020800 ,0x00020020 ,0x08020020 ,0x08020800 ,
0x00000820 ,0x00000000 ,0x00000000 ,0x08020020 ,0x08000020 ,0x08000800 ,
0x00020820 ,0x00020000 ,0x00020820 ,0x00020000 ,0x08020800 ,0x00000800 ,
0x00000020 ,0x08020020 ,0x00000800 ,0x00020820 ,0x08000800 ,0x00000020 ,
0x08000020 ,0x08020000 ,0x08020020 ,0x08000000 ,0x00020000 ,0x08000820 ,
0x00000000 ,0x08020820 ,0x00020020 ,0x08000020 ,0x08020000 ,0x08000800 ,
0x08000820 ,0x00000000 ,0x08020820 ,0x00020800 ,0x00020800 ,0x00000820 ,
0x00000820 ,0x00020020 ,0x08000000 ,0x08020800 }};bb41 bb1780 bbf bb2551(
bbh bbb*bb2300,bbq bb2064){bb1 bb74=(bb1)bb2300;bbq bb2281=bb2064/8 ;
bbq bb2215=bb2064%8 ;bbf bb2496=bb74[bb2281];bbf bb2560=bb2496>>(8 -(
bb2215+1 ))&0x01 ;bb2 bb2560;}bb41 bb1780 bbb bb2537(bbb*bb2300,bbe
bb2064,bbf bb171){bb1 bb74=(bb1)bb2300;bbe bb2281=bb2064/8 ;bbe bb2215
=bb2064%8 ;bb74[bb2281]|=bb171<<(8 -(bb2215+1 ));}bb41 bbb bb2567(bbh bbb
*bb74,bbh bb39*bb989,bbq bb2564,bbb*bb1550,bbq bb47){bbq bbz;bb998(
bb1550,0 ,bb47);bb90(bbz=0 ;bbz<bb2564;bbz++){bbf bb171=bb2551(bb74,(
bb39)bb989[bbz]-1 );bb2537(bb1550,bbz,bb171);}}bb41 bbd bb2487(bbh bbb
*bb2528,bbd bb27){bb29 bb2346=(bb29)bb2528;bbd bbw[2 ],bb515;bb27=(
bb27>>24 )|((bb27&0xff0000 )>>8 )|((bb27&0xff00 )<<8 )|(bb27<<24 );bbw[0 ]=(
(bb27)>>(9 )|(bb27)<<(32 -9 ))&0x00fc0000 |((bb27)>>(11 )|(bb27)<<(32 -11 ))&
0x0003f000 |((bb27)>>(13 )|(bb27)<<(32 -13 ))&0x00000fc0 |((bb27)>>(15 )|(
bb27)<<(32 -15 ))&0x0000003f ;bbw[1 ]=((bb27)>>(25 )|(bb27)<<(32 -25 ))&
0x00fc0000 |((bb27)>>(27 )|(bb27)<<(32 -27 ))&0x0003f000 |((bb27)>>(29 )|(
bb27)<<(32 -29 ))&0x00000fc0 |((bb27)>>(31 )|(bb27)<<(32 -31 ))&0x0000003f ;
bb27= *bb2346;bbw[0 ]^=bb27;bb27= * (bb2346+1 );bbw[1 ]^=bb27;bb515=
bb1836[0 ][bbw[0 ]>>18 &0x3f ]|bb1836[1 ][bbw[0 ]>>12 &0x3f ]|bb1836[2 ][bbw[0
]>>6 &0x3f ]|bb1836[3 ][bbw[0 ]&0x3f ]|bb1836[4 ][bbw[1 ]>>18 &0x3f ]|bb1836[5
][bbw[1 ]>>12 &0x3f ]|bb1836[6 ][bbw[1 ]>>6 &0x3f ]|bb1836[7 ][bbw[1 ]&0x3f ];
bb2(bb515>>24 )|((bb515&0xff0000 )>>8 )|((bb515&0xff00 )<<8 )|(bb515<<24 );
}bbb bb1750(bb932*bbj,bbh bbb*bb71){bbf bb1630[7 ];bbd bb335,bb362;
bb41 bbe bb2456[16 ]={0 ,0 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,1 ,1 ,1 ,1 ,1 ,1 ,0 };bb29 bb2285;bbe
bbz;bb32(bbj);bb2567(bb71,bb2331,bb12(bb2331),bb1630,bb12(bb1630));
bb335=bb1630[0 ]<<20 |bb1630[1 ]<<12 |bb1630[2 ]<<4 |bb1630[3 ]>>4 ;bb362=(
bb1630[3 ]&0xf )<<24 |bb1630[4 ]<<16 |bb1630[5 ]<<8 |bb1630[6 ];bb2285=(bb29)(
bbj->bb417);bb90(bbz=0 ;bbz<16 ;bbz++){bbd bbo,bbt;bbm(bb2456[bbz]){
bb335=((bb335)<<(2 )|(bb335)>>(28 -2 ))&0x0fffffff ;bb362=((bb362)<<(2 )|(
bb362)>>(28 -2 ))&0x0fffffff ;}bb54{bb335=((bb335)<<(1 )|(bb335)>>(28 -1 ))&
0x0fffffff ;bb362=((bb362)<<(1 )|(bb362)>>(28 -1 ))&0x0fffffff ;}bbo=
bb1811[0 ][bb335>>22 &0x3f ]|bb1811[1 ][(bb335>>16 &0x30 )|(bb335>>15 &0xf )]
|bb1811[2 ][(bb335>>9 &0x3c )|(bb335>>8 &0x3 )]|bb1811[3 ][(bb335>>2 &0x20 )|
(bb335>>1 &0x18 )|(bb335&0x7 )];bbt=bb1811[4 ][bb362>>22 &0x3f ]|bb1811[5 ][
(bb362>>15 &0x30 )|(bb362>>14 &0x0f )]|bb1811[6 ][bb362>>7 &0x3f ]|bb1811[7 ]
[(bb362>>1 &0x3c )|(bb362&0x3 )]; *bb2285++=bbo; *bb2285++=bbt;}}bbb
bb1931(bb932*bbj,bbh bbb*bb71){bbf bb2426[8 *16 ];bb1 bb417=bbj->bb417;
bbq bbz;bb1750(bbj,bb71);bb81(bb2426,bb417,8 *16 );bb417+=120 ;bb90(bbz=
0 ;bbz<16 ;bbz++){bb81(bb417,bb2426+bbz*8 ,8 );bb417-=8 ;}}bbb bb1282(
bb932*bbj,bbb*bb14,bbh bbb*bb5){bb29 bb417;bbd bb27,bb197,bbz;bb32(
bbj&&bb14&&bb5);bb197=(((bb1)bb5)[3 ]|((bb1)bb5)[2 ]<<8 |((bb1)bb5)[1 ]<<
16 |((bb1)bb5)[0 ]<<24 );bb27=(((bb1)bb5+4 )[3 ]|((bb1)bb5+4 )[2 ]<<8 |((bb1)bb5
+4 )[1 ]<<16 |((bb1)bb5+4 )[0 ]<<24 );{bbd bb45;((bb45)=((((bb27)>>(4 ))^(
bb197))&(0x0f0f0f0fL )),(bb197)^=(bb45),(bb27)^=((bb45)<<(4 )));((bb45)=
((((bb197)>>(16 ))^(bb27))&(0x0000ffffL )),(bb27)^=(bb45),(bb197)^=((
bb45)<<(16 )));((bb45)=((((bb27)>>(2 ))^(bb197))&(0x33333333L )),(bb197)^=
(bb45),(bb27)^=((bb45)<<(2 )));((bb45)=((((bb197)>>(8 ))^(bb27))&(
0x00ff00ffL )),(bb27)^=(bb45),(bb197)^=((bb45)<<(8 )));((bb45)=((((bb27
)>>(1 ))^(bb197))&(0x55555555L )),(bb197)^=(bb45),(bb27)^=((bb45)<<(1 )));
((bb45)=((((bb197)>>(4 ))^(bb27))&(0x0f0f0f0fL )),(bb27)^=(bb45),(bb197
)^=((bb45)<<(4 )));((bb45)=((((bb27)>>(4 ))^(bb197))&(0x0f0f0f0fL )),(
bb197)^=(bb45),(bb27)^=((bb45)<<(4 )));}bb417=(bb29)&bbj->bb417;bb90(
bbz=0 ;bbz<16 ;bbz++){bbd bb2404,bb2384;bb2404=bb27;bb2384=bb197^bb2487
(bb417,bb27);bb197=bb2404;bb27=bb2384;bb417+=2 ;}{bbd bb45;((bb45)=(((
(bb197)>>(4 ))^(bb27))&(0x0f0f0f0fL )),(bb27)^=(bb45),(bb197)^=((bb45)<<
(4 )));((bb45)=((((bb27)>>(4 ))^(bb197))&(0x0f0f0f0fL )),(bb197)^=(bb45),
(bb27)^=((bb45)<<(4 )));((bb45)=((((bb197)>>(1 ))^(bb27))&(0x55555555L )),
(bb27)^=(bb45),(bb197)^=((bb45)<<(1 )));((bb45)=((((bb27)>>(8 ))^(bb197
))&(0x00ff00ffL )),(bb197)^=(bb45),(bb27)^=((bb45)<<(8 )));((bb45)=((((
bb197)>>(2 ))^(bb27))&(0x33333333L )),(bb27)^=(bb45),(bb197)^=((bb45)<<
(2 )));((bb45)=((((bb27)>>(16 ))^(bb197))&(0x0000ffffL )),(bb197)^=(bb45
),(bb27)^=((bb45)<<(16 )));((bb45)=((((bb197)>>(4 ))^(bb27))&(
0x0f0f0f0fL )),(bb27)^=(bb45),(bb197)^=((bb45)<<(4 )));}((bb29)bb14)[0 ]
=(((bb1)&bb27)[3 ]|((bb1)&bb27)[2 ]<<8 |((bb1)&bb27)[1 ]<<16 |((bb1)&bb27)[
0 ]<<24 );((bb29)bb14)[1 ]=(((bb1)&bb197)[3 ]|((bb1)&bb197)[2 ]<<8 |((bb1)&
bb197)[1 ]<<16 |((bb1)&bb197)[0 ]<<24 );}bbb bb2102(bb829*bbj,bbh bbb*
bb1303){bb1 bb71=(bb1)bb1303;bb1750(&bbj->bb2033,bb71);bb1931(&bbj->
bb2034,bb71+8 );bb1750(&bbj->bb2032,bb71+16 );}bbb bb2136(bb829*bbj,bbh
bbb*bb1303){bb1 bb71=(bb1)bb1303;bb1931(&bbj->bb2033,bb71);bb1750(&
bbj->bb2034,bb71+8 );bb1931(&bbj->bb2032,bb71+16 );}bbb bb2043(bb829*
bbj,bbb*bb14,bbh bbb*bb5){bb1282(&bbj->bb2033,bb14,bb5);bb1282(&bbj->
bb2034,bb14,bb14);bb1282(&bbj->bb2032,bb14,bb14);}bbb bb2178(bb829*
bbj,bbb*bb14,bbh bbb*bb5){bb1282(&bbj->bb2032,bb14,bb5);bb1282(&bbj->
bb2034,bb14,bb14);bb1282(&bbj->bb2033,bb14,bb14);}
| gpl-2.0 |
TheStrix/caf_msm8916 | arch/um/os-Linux/aio.c | 2216 | 9299 | /*
* Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <unistd.h>
#include <sched.h>
#include <signal.h>
#include <errno.h>
#include <sys/time.h>
#include <asm/unistd.h>
#include <aio.h>
#include <init.h>
#include <kern_util.h>
#include <os.h>
struct aio_thread_req {
enum aio_type type;
int io_fd;
unsigned long long offset;
char *buf;
int len;
struct aio_context *aio;
};
#if defined(HAVE_AIO_ABI)
#include <linux/aio_abi.h>
/*
* If we have the headers, we are going to build with AIO enabled.
* If we don't have aio in libc, we define the necessary stubs here.
*/
#if !defined(HAVE_AIO_LIBC)
static long io_setup(int n, aio_context_t *ctxp)
{
return syscall(__NR_io_setup, n, ctxp);
}
static long io_submit(aio_context_t ctx, long nr, struct iocb **iocbpp)
{
return syscall(__NR_io_submit, ctx, nr, iocbpp);
}
static long io_getevents(aio_context_t ctx_id, long min_nr, long nr,
struct io_event *events, struct timespec *timeout)
{
return syscall(__NR_io_getevents, ctx_id, min_nr, nr, events, timeout);
}
#endif
/*
* The AIO_MMAP cases force the mmapped page into memory here
* rather than in whatever place first touches the data. I used
* to do this by touching the page, but that's delicate because
* gcc is prone to optimizing that away. So, what's done here
* is we read from the descriptor from which the page was
* mapped. The caller is required to pass an offset which is
* inside the page that was mapped. Thus, when the read
* returns, we know that the page is in the page cache, and
* that it now backs the mmapped area.
*/
static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf,
int len, unsigned long long offset, struct aio_context *aio)
{
struct iocb *iocbp = & ((struct iocb) {
.aio_data = (unsigned long) aio,
.aio_fildes = fd,
.aio_buf = (unsigned long) buf,
.aio_nbytes = len,
.aio_offset = offset
});
char c;
switch (type) {
case AIO_READ:
iocbp->aio_lio_opcode = IOCB_CMD_PREAD;
break;
case AIO_WRITE:
iocbp->aio_lio_opcode = IOCB_CMD_PWRITE;
break;
case AIO_MMAP:
iocbp->aio_lio_opcode = IOCB_CMD_PREAD;
iocbp->aio_buf = (unsigned long) &c;
iocbp->aio_nbytes = sizeof(c);
break;
default:
printk(UM_KERN_ERR "Bogus op in do_aio - %d\n", type);
return -EINVAL;
}
return (io_submit(ctx, 1, &iocbp) > 0) ? 0 : -errno;
}
/* Initialized in an initcall and unchanged thereafter */
static aio_context_t ctx = 0;
static int aio_thread(void *arg)
{
struct aio_thread_reply reply;
struct io_event event;
int err, n, reply_fd;
signal(SIGWINCH, SIG_IGN);
while (1) {
n = io_getevents(ctx, 1, 1, &event, NULL);
if (n < 0) {
if (errno == EINTR)
continue;
printk(UM_KERN_ERR "aio_thread - io_getevents failed, "
"errno = %d\n", errno);
}
else {
reply = ((struct aio_thread_reply)
{ .data = (void *) (long) event.data,
.err = event.res });
reply_fd = ((struct aio_context *) reply.data)->reply_fd;
err = write(reply_fd, &reply, sizeof(reply));
if (err != sizeof(reply))
printk(UM_KERN_ERR "aio_thread - write failed, "
"fd = %d, err = %d\n", reply_fd, errno);
}
}
return 0;
}
#endif
static int do_not_aio(struct aio_thread_req *req)
{
char c;
unsigned long long actual;
int n;
actual = lseek64(req->io_fd, req->offset, SEEK_SET);
if (actual != req->offset)
return -errno;
switch (req->type) {
case AIO_READ:
n = read(req->io_fd, req->buf, req->len);
break;
case AIO_WRITE:
n = write(req->io_fd, req->buf, req->len);
break;
case AIO_MMAP:
n = read(req->io_fd, &c, sizeof(c));
break;
default:
printk(UM_KERN_ERR "do_not_aio - bad request type : %d\n",
req->type);
return -EINVAL;
}
if (n < 0)
return -errno;
return 0;
}
/* These are initialized in initcalls and not changed */
static int aio_req_fd_r = -1;
static int aio_req_fd_w = -1;
static int aio_pid = -1;
static unsigned long aio_stack;
static int not_aio_thread(void *arg)
{
struct aio_thread_req req;
struct aio_thread_reply reply;
int err;
signal(SIGWINCH, SIG_IGN);
while (1) {
err = read(aio_req_fd_r, &req, sizeof(req));
if (err != sizeof(req)) {
if (err < 0)
printk(UM_KERN_ERR "not_aio_thread - "
"read failed, fd = %d, err = %d\n",
aio_req_fd_r,
errno);
else {
printk(UM_KERN_ERR "not_aio_thread - short "
"read, fd = %d, length = %d\n",
aio_req_fd_r, err);
}
continue;
}
err = do_not_aio(&req);
reply = ((struct aio_thread_reply) { .data = req.aio,
.err = err });
err = write(req.aio->reply_fd, &reply, sizeof(reply));
if (err != sizeof(reply))
printk(UM_KERN_ERR "not_aio_thread - write failed, "
"fd = %d, err = %d\n", req.aio->reply_fd, errno);
}
return 0;
}
static int init_aio_24(void)
{
int fds[2], err;
err = os_pipe(fds, 1, 1);
if (err)
goto out;
aio_req_fd_w = fds[0];
aio_req_fd_r = fds[1];
err = os_set_fd_block(aio_req_fd_w, 0);
if (err)
goto out_close_pipe;
err = run_helper_thread(not_aio_thread, NULL,
CLONE_FILES | CLONE_VM, &aio_stack);
if (err < 0)
goto out_close_pipe;
aio_pid = err;
goto out;
out_close_pipe:
close(fds[0]);
close(fds[1]);
aio_req_fd_w = -1;
aio_req_fd_r = -1;
out:
#ifndef HAVE_AIO_ABI
printk(UM_KERN_INFO "/usr/include/linux/aio_abi.h not present during "
"build\n");
#endif
printk(UM_KERN_INFO "2.6 host AIO support not used - falling back to "
"I/O thread\n");
return 0;
}
#ifdef HAVE_AIO_ABI
#define DEFAULT_24_AIO 0
static int init_aio_26(void)
{
int err;
if (io_setup(256, &ctx)) {
err = -errno;
printk(UM_KERN_ERR "aio_thread failed to initialize context, "
"err = %d\n", errno);
return err;
}
err = run_helper_thread(aio_thread, NULL,
CLONE_FILES | CLONE_VM, &aio_stack);
if (err < 0)
return err;
aio_pid = err;
printk(UM_KERN_INFO "Using 2.6 host AIO\n");
return 0;
}
static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len,
unsigned long long offset, struct aio_context *aio)
{
struct aio_thread_reply reply;
int err;
err = do_aio(ctx, type, io_fd, buf, len, offset, aio);
if (err) {
reply = ((struct aio_thread_reply) { .data = aio,
.err = err });
err = write(aio->reply_fd, &reply, sizeof(reply));
if (err != sizeof(reply)) {
err = -errno;
printk(UM_KERN_ERR "submit_aio_26 - write failed, "
"fd = %d, err = %d\n", aio->reply_fd, -err);
}
else err = 0;
}
return err;
}
#else
#define DEFAULT_24_AIO 1
static int init_aio_26(void)
{
return -ENOSYS;
}
static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len,
unsigned long long offset, struct aio_context *aio)
{
return -ENOSYS;
}
#endif
/* Initialized in an initcall and unchanged thereafter */
static int aio_24 = DEFAULT_24_AIO;
static int __init set_aio_24(char *name, int *add)
{
aio_24 = 1;
return 0;
}
__uml_setup("aio=2.4", set_aio_24,
"aio=2.4\n"
" This is used to force UML to use 2.4-style AIO even when 2.6 AIO is\n"
" available. 2.4 AIO is a single thread that handles one request at a\n"
" time, synchronously. 2.6 AIO is a thread which uses the 2.6 AIO \n"
" interface to handle an arbitrary number of pending requests. 2.6 AIO \n"
" is not available in tt mode, on 2.4 hosts, or when UML is built with\n"
" /usr/include/linux/aio_abi.h not available. Many distributions don't\n"
" include aio_abi.h, so you will need to copy it from a kernel tree to\n"
" your /usr/include/linux in order to build an AIO-capable UML\n\n"
);
static int init_aio(void)
{
int err;
if (!aio_24) {
err = init_aio_26();
if (err && (errno == ENOSYS)) {
printk(UM_KERN_INFO "2.6 AIO not supported on the "
"host - reverting to 2.4 AIO\n");
aio_24 = 1;
}
else return err;
}
if (aio_24)
return init_aio_24();
return 0;
}
/*
* The reason for the __initcall/__uml_exitcall asymmetry is that init_aio
* needs to be called when the kernel is running because it calls run_helper,
* which needs get_free_page. exit_aio is a __uml_exitcall because the generic
* kernel does not run __exitcalls on shutdown, and can't because many of them
* break when called outside of module unloading.
*/
__initcall(init_aio);
static void exit_aio(void)
{
if (aio_pid != -1) {
os_kill_process(aio_pid, 1);
free_stack(aio_stack, 0);
}
}
__uml_exitcall(exit_aio);
static int submit_aio_24(enum aio_type type, int io_fd, char *buf, int len,
unsigned long long offset, struct aio_context *aio)
{
struct aio_thread_req req = { .type = type,
.io_fd = io_fd,
.offset = offset,
.buf = buf,
.len = len,
.aio = aio,
};
int err;
err = write(aio_req_fd_w, &req, sizeof(req));
if (err == sizeof(req))
err = 0;
else err = -errno;
return err;
}
int submit_aio(enum aio_type type, int io_fd, char *buf, int len,
unsigned long long offset, int reply_fd,
struct aio_context *aio)
{
aio->reply_fd = reply_fd;
if (aio_24)
return submit_aio_24(type, io_fd, buf, len, offset, aio);
else
return submit_aio_26(type, io_fd, buf, len, offset, aio);
}
| gpl-2.0 |
byterom/android_kernel_lge_g3 | arch/arm/mach-msm/qdsp6v2/audio_qcelp.c | 2216 | 4767 | /* qcelp(v13k) audio output device
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "audio_utils_aio.h"
#define FRAME_SIZE_DEC_QCELP ((32) + sizeof(struct dec_meta_in))
#ifdef CONFIG_DEBUG_FS
static const struct file_operations audio_qcelp_debug_fops = {
.read = audio_aio_debug_read,
.open = audio_aio_debug_open,
};
#endif
static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct q6audio_aio *audio = file->private_data;
int rc = 0;
switch (cmd) {
case AUDIO_START: {
pr_debug("%s[%p]: AUDIO_START session_id[%d]\n", __func__,
audio, audio->ac->session);
if (audio->feedback == NON_TUNNEL_MODE) {
/* Configure PCM output block */
rc = q6asm_enc_cfg_blk_pcm(audio->ac,
audio->pcm_cfg.sample_rate,
audio->pcm_cfg.channel_count);
if (rc < 0) {
pr_err("pcm output block config failed\n");
break;
}
}
rc = audio_aio_enable(audio);
audio->eos_rsp = 0;
audio->eos_flag = 0;
if (!rc) {
audio->enabled = 1;
} else {
audio->enabled = 0;
pr_err("Audio Start procedure failed rc=%d\n", rc);
break;
}
pr_debug("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__,
audio->ac->session,
audio->enabled);
if (audio->stopped == 1)
audio->stopped = 0;
break;
}
default:
pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio);
rc = audio->codec_ioctl(file, cmd, arg);
}
return rc;
}
static int audio_open(struct inode *inode, struct file *file)
{
struct q6audio_aio *audio = NULL;
int rc = 0;
#ifdef CONFIG_DEBUG_FS
/* 4 bytes represents decoder number, 1 byte for terminate string */
char name[sizeof "msm_qcelp_" + 5];
#endif
audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
if (audio == NULL) {
pr_err("Could not allocate memory for aac decode driver\n");
return -ENOMEM;
}
/* Settings will be re-config at AUDIO_SET_CONFIG,
* but at least we need to have initial config
*/
audio->str_cfg.buffer_size = FRAME_SIZE_DEC_QCELP;
audio->str_cfg.buffer_count = FRAME_NUM;
audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
audio->pcm_cfg.buffer_count = PCM_BUF_COUNT;
audio->pcm_cfg.sample_rate = 8000;
audio->pcm_cfg.channel_count = 1;
audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
(void *)audio);
if (!audio->ac) {
pr_err("Could not allocate memory for audio client\n");
kfree(audio);
return -ENOMEM;
}
/* open in T/NT mode */
if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
FORMAT_V13K);
if (rc < 0) {
pr_err("NT mode Open failed rc=%d\n", rc);
rc = -ENODEV;
goto fail;
}
audio->feedback = NON_TUNNEL_MODE;
audio->buf_cfg.frames_per_buf = 0x01;
audio->buf_cfg.meta_info_enable = 0x01;
} else if ((file->f_mode & FMODE_WRITE) &&
!(file->f_mode & FMODE_READ)) {
rc = q6asm_open_write(audio->ac, FORMAT_V13K);
if (rc < 0) {
pr_err("T mode Open failed rc=%d\n", rc);
rc = -ENODEV;
goto fail;
}
audio->feedback = TUNNEL_MODE;
audio->buf_cfg.meta_info_enable = 0x00;
} else {
pr_err("Not supported mode\n");
rc = -EACCES;
goto fail;
}
rc = audio_aio_open(audio, file);
if (rc < 0) {
pr_err("audio_aio_open rc=%d\n", rc);
goto fail;
}
#ifdef CONFIG_DEBUG_FS
snprintf(name, sizeof name, "msm_qcelp_%04x", audio->ac->session);
audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
NULL, (void *)audio,
&audio_qcelp_debug_fops);
if (IS_ERR(audio->dentry))
pr_debug("debugfs_create_file failed\n");
#endif
pr_info("%s:dec success mode[%d]session[%d]\n", __func__,
audio->feedback,
audio->ac->session);
return 0;
fail:
q6asm_audio_client_free(audio->ac);
kfree(audio);
return rc;
}
static const struct file_operations audio_qcelp_fops = {
.owner = THIS_MODULE,
.open = audio_open,
.release = audio_aio_release,
.unlocked_ioctl = audio_ioctl,
.fsync = audio_aio_fsync,
};
struct miscdevice audio_qcelp_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_qcelp",
.fops = &audio_qcelp_fops,
};
static int __init audio_qcelp_init(void)
{
return misc_register(&audio_qcelp_misc);
}
device_initcall(audio_qcelp_init);
| gpl-2.0 |
chirayudesai/laughing-cyril | drivers/video/msm/mdp_hw_init.c | 2216 | 24930 | /* Copyright (c) 2008-2009, 2012 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "mdp.h"
/* mdp primary csc limit vector */
uint32 mdp_plv[] = { 0x10, 0xeb, 0x10, 0xf0 };
/* Color Coefficient matrix for YUV -> RGB */
struct mdp_ccs mdp_ccs_yuv2rgb = {
MDP_CCS_YUV2RGB,
{
0x254,
0x000,
0x331,
0x254,
0xff38,
0xfe61,
0x254,
0x409,
0x000,
},
{
#ifdef CONFIG_FB_MSM_MDP31
0x1f0,
0x180,
0x180
#else
0x10,
0x80,
0x80
#endif
}
};
/* Color Coefficient matrix for RGB -> YUV */
struct mdp_ccs mdp_ccs_rgb2yuv = {
MDP_CCS_RGB2YUV,
{
0x83,
0x102,
0x32,
0xffb5,
0xff6c,
0xe1,
0xe1,
0xff45,
0xffdc,
},
#ifdef CONFIG_FB_MSM_MDP31
{
0x10,
0x80,
0x80
}
#endif
};
static void mdp_load_lut_param(void)
{
outpdw(MDP_BASE + 0x40800, 0x0);
outpdw(MDP_BASE + 0x40804, 0x151515);
outpdw(MDP_BASE + 0x40808, 0x1d1d1d);
outpdw(MDP_BASE + 0x4080c, 0x232323);
outpdw(MDP_BASE + 0x40810, 0x272727);
outpdw(MDP_BASE + 0x40814, 0x2b2b2b);
outpdw(MDP_BASE + 0x40818, 0x2f2f2f);
outpdw(MDP_BASE + 0x4081c, 0x333333);
outpdw(MDP_BASE + 0x40820, 0x363636);
outpdw(MDP_BASE + 0x40824, 0x393939);
outpdw(MDP_BASE + 0x40828, 0x3b3b3b);
outpdw(MDP_BASE + 0x4082c, 0x3e3e3e);
outpdw(MDP_BASE + 0x40830, 0x404040);
outpdw(MDP_BASE + 0x40834, 0x434343);
outpdw(MDP_BASE + 0x40838, 0x454545);
outpdw(MDP_BASE + 0x4083c, 0x474747);
outpdw(MDP_BASE + 0x40840, 0x494949);
outpdw(MDP_BASE + 0x40844, 0x4b4b4b);
outpdw(MDP_BASE + 0x40848, 0x4d4d4d);
outpdw(MDP_BASE + 0x4084c, 0x4f4f4f);
outpdw(MDP_BASE + 0x40850, 0x515151);
outpdw(MDP_BASE + 0x40854, 0x535353);
outpdw(MDP_BASE + 0x40858, 0x555555);
outpdw(MDP_BASE + 0x4085c, 0x565656);
outpdw(MDP_BASE + 0x40860, 0x585858);
outpdw(MDP_BASE + 0x40864, 0x5a5a5a);
outpdw(MDP_BASE + 0x40868, 0x5b5b5b);
outpdw(MDP_BASE + 0x4086c, 0x5d5d5d);
outpdw(MDP_BASE + 0x40870, 0x5e5e5e);
outpdw(MDP_BASE + 0x40874, 0x606060);
outpdw(MDP_BASE + 0x40878, 0x616161);
outpdw(MDP_BASE + 0x4087c, 0x636363);
outpdw(MDP_BASE + 0x40880, 0x646464);
outpdw(MDP_BASE + 0x40884, 0x666666);
outpdw(MDP_BASE + 0x40888, 0x676767);
outpdw(MDP_BASE + 0x4088c, 0x686868);
outpdw(MDP_BASE + 0x40890, 0x6a6a6a);
outpdw(MDP_BASE + 0x40894, 0x6b6b6b);
outpdw(MDP_BASE + 0x40898, 0x6c6c6c);
outpdw(MDP_BASE + 0x4089c, 0x6e6e6e);
outpdw(MDP_BASE + 0x408a0, 0x6f6f6f);
outpdw(MDP_BASE + 0x408a4, 0x707070);
outpdw(MDP_BASE + 0x408a8, 0x717171);
outpdw(MDP_BASE + 0x408ac, 0x727272);
outpdw(MDP_BASE + 0x408b0, 0x747474);
outpdw(MDP_BASE + 0x408b4, 0x757575);
outpdw(MDP_BASE + 0x408b8, 0x767676);
outpdw(MDP_BASE + 0x408bc, 0x777777);
outpdw(MDP_BASE + 0x408c0, 0x787878);
outpdw(MDP_BASE + 0x408c4, 0x797979);
outpdw(MDP_BASE + 0x408c8, 0x7a7a7a);
outpdw(MDP_BASE + 0x408cc, 0x7c7c7c);
outpdw(MDP_BASE + 0x408d0, 0x7d7d7d);
outpdw(MDP_BASE + 0x408d4, 0x7e7e7e);
outpdw(MDP_BASE + 0x408d8, 0x7f7f7f);
outpdw(MDP_BASE + 0x408dc, 0x808080);
outpdw(MDP_BASE + 0x408e0, 0x818181);
outpdw(MDP_BASE + 0x408e4, 0x828282);
outpdw(MDP_BASE + 0x408e8, 0x838383);
outpdw(MDP_BASE + 0x408ec, 0x848484);
outpdw(MDP_BASE + 0x408f0, 0x858585);
outpdw(MDP_BASE + 0x408f4, 0x868686);
outpdw(MDP_BASE + 0x408f8, 0x878787);
outpdw(MDP_BASE + 0x408fc, 0x888888);
outpdw(MDP_BASE + 0x40900, 0x898989);
outpdw(MDP_BASE + 0x40904, 0x8a8a8a);
outpdw(MDP_BASE + 0x40908, 0x8b8b8b);
outpdw(MDP_BASE + 0x4090c, 0x8c8c8c);
outpdw(MDP_BASE + 0x40910, 0x8d8d8d);
outpdw(MDP_BASE + 0x40914, 0x8e8e8e);
outpdw(MDP_BASE + 0x40918, 0x8f8f8f);
outpdw(MDP_BASE + 0x4091c, 0x8f8f8f);
outpdw(MDP_BASE + 0x40920, 0x909090);
outpdw(MDP_BASE + 0x40924, 0x919191);
outpdw(MDP_BASE + 0x40928, 0x929292);
outpdw(MDP_BASE + 0x4092c, 0x939393);
outpdw(MDP_BASE + 0x40930, 0x949494);
outpdw(MDP_BASE + 0x40934, 0x959595);
outpdw(MDP_BASE + 0x40938, 0x969696);
outpdw(MDP_BASE + 0x4093c, 0x969696);
outpdw(MDP_BASE + 0x40940, 0x979797);
outpdw(MDP_BASE + 0x40944, 0x989898);
outpdw(MDP_BASE + 0x40948, 0x999999);
outpdw(MDP_BASE + 0x4094c, 0x9a9a9a);
outpdw(MDP_BASE + 0x40950, 0x9b9b9b);
outpdw(MDP_BASE + 0x40954, 0x9c9c9c);
outpdw(MDP_BASE + 0x40958, 0x9c9c9c);
outpdw(MDP_BASE + 0x4095c, 0x9d9d9d);
outpdw(MDP_BASE + 0x40960, 0x9e9e9e);
outpdw(MDP_BASE + 0x40964, 0x9f9f9f);
outpdw(MDP_BASE + 0x40968, 0xa0a0a0);
outpdw(MDP_BASE + 0x4096c, 0xa0a0a0);
outpdw(MDP_BASE + 0x40970, 0xa1a1a1);
outpdw(MDP_BASE + 0x40974, 0xa2a2a2);
outpdw(MDP_BASE + 0x40978, 0xa3a3a3);
outpdw(MDP_BASE + 0x4097c, 0xa4a4a4);
outpdw(MDP_BASE + 0x40980, 0xa4a4a4);
outpdw(MDP_BASE + 0x40984, 0xa5a5a5);
outpdw(MDP_BASE + 0x40988, 0xa6a6a6);
outpdw(MDP_BASE + 0x4098c, 0xa7a7a7);
outpdw(MDP_BASE + 0x40990, 0xa7a7a7);
outpdw(MDP_BASE + 0x40994, 0xa8a8a8);
outpdw(MDP_BASE + 0x40998, 0xa9a9a9);
outpdw(MDP_BASE + 0x4099c, 0xaaaaaa);
outpdw(MDP_BASE + 0x409a0, 0xaaaaaa);
outpdw(MDP_BASE + 0x409a4, 0xababab);
outpdw(MDP_BASE + 0x409a8, 0xacacac);
outpdw(MDP_BASE + 0x409ac, 0xadadad);
outpdw(MDP_BASE + 0x409b0, 0xadadad);
outpdw(MDP_BASE + 0x409b4, 0xaeaeae);
outpdw(MDP_BASE + 0x409b8, 0xafafaf);
outpdw(MDP_BASE + 0x409bc, 0xafafaf);
outpdw(MDP_BASE + 0x409c0, 0xb0b0b0);
outpdw(MDP_BASE + 0x409c4, 0xb1b1b1);
outpdw(MDP_BASE + 0x409c8, 0xb2b2b2);
outpdw(MDP_BASE + 0x409cc, 0xb2b2b2);
outpdw(MDP_BASE + 0x409d0, 0xb3b3b3);
outpdw(MDP_BASE + 0x409d4, 0xb4b4b4);
outpdw(MDP_BASE + 0x409d8, 0xb4b4b4);
outpdw(MDP_BASE + 0x409dc, 0xb5b5b5);
outpdw(MDP_BASE + 0x409e0, 0xb6b6b6);
outpdw(MDP_BASE + 0x409e4, 0xb6b6b6);
outpdw(MDP_BASE + 0x409e8, 0xb7b7b7);
outpdw(MDP_BASE + 0x409ec, 0xb8b8b8);
outpdw(MDP_BASE + 0x409f0, 0xb8b8b8);
outpdw(MDP_BASE + 0x409f4, 0xb9b9b9);
outpdw(MDP_BASE + 0x409f8, 0xbababa);
outpdw(MDP_BASE + 0x409fc, 0xbababa);
outpdw(MDP_BASE + 0x40a00, 0xbbbbbb);
outpdw(MDP_BASE + 0x40a04, 0xbcbcbc);
outpdw(MDP_BASE + 0x40a08, 0xbcbcbc);
outpdw(MDP_BASE + 0x40a0c, 0xbdbdbd);
outpdw(MDP_BASE + 0x40a10, 0xbebebe);
outpdw(MDP_BASE + 0x40a14, 0xbebebe);
outpdw(MDP_BASE + 0x40a18, 0xbfbfbf);
outpdw(MDP_BASE + 0x40a1c, 0xc0c0c0);
outpdw(MDP_BASE + 0x40a20, 0xc0c0c0);
outpdw(MDP_BASE + 0x40a24, 0xc1c1c1);
outpdw(MDP_BASE + 0x40a28, 0xc1c1c1);
outpdw(MDP_BASE + 0x40a2c, 0xc2c2c2);
outpdw(MDP_BASE + 0x40a30, 0xc3c3c3);
outpdw(MDP_BASE + 0x40a34, 0xc3c3c3);
outpdw(MDP_BASE + 0x40a38, 0xc4c4c4);
outpdw(MDP_BASE + 0x40a3c, 0xc5c5c5);
outpdw(MDP_BASE + 0x40a40, 0xc5c5c5);
outpdw(MDP_BASE + 0x40a44, 0xc6c6c6);
outpdw(MDP_BASE + 0x40a48, 0xc6c6c6);
outpdw(MDP_BASE + 0x40a4c, 0xc7c7c7);
outpdw(MDP_BASE + 0x40a50, 0xc8c8c8);
outpdw(MDP_BASE + 0x40a54, 0xc8c8c8);
outpdw(MDP_BASE + 0x40a58, 0xc9c9c9);
outpdw(MDP_BASE + 0x40a5c, 0xc9c9c9);
outpdw(MDP_BASE + 0x40a60, 0xcacaca);
outpdw(MDP_BASE + 0x40a64, 0xcbcbcb);
outpdw(MDP_BASE + 0x40a68, 0xcbcbcb);
outpdw(MDP_BASE + 0x40a6c, 0xcccccc);
outpdw(MDP_BASE + 0x40a70, 0xcccccc);
outpdw(MDP_BASE + 0x40a74, 0xcdcdcd);
outpdw(MDP_BASE + 0x40a78, 0xcecece);
outpdw(MDP_BASE + 0x40a7c, 0xcecece);
outpdw(MDP_BASE + 0x40a80, 0xcfcfcf);
outpdw(MDP_BASE + 0x40a84, 0xcfcfcf);
outpdw(MDP_BASE + 0x40a88, 0xd0d0d0);
outpdw(MDP_BASE + 0x40a8c, 0xd0d0d0);
outpdw(MDP_BASE + 0x40a90, 0xd1d1d1);
outpdw(MDP_BASE + 0x40a94, 0xd2d2d2);
outpdw(MDP_BASE + 0x40a98, 0xd2d2d2);
outpdw(MDP_BASE + 0x40a9c, 0xd3d3d3);
outpdw(MDP_BASE + 0x40aa0, 0xd3d3d3);
outpdw(MDP_BASE + 0x40aa4, 0xd4d4d4);
outpdw(MDP_BASE + 0x40aa8, 0xd4d4d4);
outpdw(MDP_BASE + 0x40aac, 0xd5d5d5);
outpdw(MDP_BASE + 0x40ab0, 0xd6d6d6);
outpdw(MDP_BASE + 0x40ab4, 0xd6d6d6);
outpdw(MDP_BASE + 0x40ab8, 0xd7d7d7);
outpdw(MDP_BASE + 0x40abc, 0xd7d7d7);
outpdw(MDP_BASE + 0x40ac0, 0xd8d8d8);
outpdw(MDP_BASE + 0x40ac4, 0xd8d8d8);
outpdw(MDP_BASE + 0x40ac8, 0xd9d9d9);
outpdw(MDP_BASE + 0x40acc, 0xd9d9d9);
outpdw(MDP_BASE + 0x40ad0, 0xdadada);
outpdw(MDP_BASE + 0x40ad4, 0xdbdbdb);
outpdw(MDP_BASE + 0x40ad8, 0xdbdbdb);
outpdw(MDP_BASE + 0x40adc, 0xdcdcdc);
outpdw(MDP_BASE + 0x40ae0, 0xdcdcdc);
outpdw(MDP_BASE + 0x40ae4, 0xdddddd);
outpdw(MDP_BASE + 0x40ae8, 0xdddddd);
outpdw(MDP_BASE + 0x40aec, 0xdedede);
outpdw(MDP_BASE + 0x40af0, 0xdedede);
outpdw(MDP_BASE + 0x40af4, 0xdfdfdf);
outpdw(MDP_BASE + 0x40af8, 0xdfdfdf);
outpdw(MDP_BASE + 0x40afc, 0xe0e0e0);
outpdw(MDP_BASE + 0x40b00, 0xe0e0e0);
outpdw(MDP_BASE + 0x40b04, 0xe1e1e1);
outpdw(MDP_BASE + 0x40b08, 0xe1e1e1);
outpdw(MDP_BASE + 0x40b0c, 0xe2e2e2);
outpdw(MDP_BASE + 0x40b10, 0xe3e3e3);
outpdw(MDP_BASE + 0x40b14, 0xe3e3e3);
outpdw(MDP_BASE + 0x40b18, 0xe4e4e4);
outpdw(MDP_BASE + 0x40b1c, 0xe4e4e4);
outpdw(MDP_BASE + 0x40b20, 0xe5e5e5);
outpdw(MDP_BASE + 0x40b24, 0xe5e5e5);
outpdw(MDP_BASE + 0x40b28, 0xe6e6e6);
outpdw(MDP_BASE + 0x40b2c, 0xe6e6e6);
outpdw(MDP_BASE + 0x40b30, 0xe7e7e7);
outpdw(MDP_BASE + 0x40b34, 0xe7e7e7);
outpdw(MDP_BASE + 0x40b38, 0xe8e8e8);
outpdw(MDP_BASE + 0x40b3c, 0xe8e8e8);
outpdw(MDP_BASE + 0x40b40, 0xe9e9e9);
outpdw(MDP_BASE + 0x40b44, 0xe9e9e9);
outpdw(MDP_BASE + 0x40b48, 0xeaeaea);
outpdw(MDP_BASE + 0x40b4c, 0xeaeaea);
outpdw(MDP_BASE + 0x40b50, 0xebebeb);
outpdw(MDP_BASE + 0x40b54, 0xebebeb);
outpdw(MDP_BASE + 0x40b58, 0xececec);
outpdw(MDP_BASE + 0x40b5c, 0xececec);
outpdw(MDP_BASE + 0x40b60, 0xededed);
outpdw(MDP_BASE + 0x40b64, 0xededed);
outpdw(MDP_BASE + 0x40b68, 0xeeeeee);
outpdw(MDP_BASE + 0x40b6c, 0xeeeeee);
outpdw(MDP_BASE + 0x40b70, 0xefefef);
outpdw(MDP_BASE + 0x40b74, 0xefefef);
outpdw(MDP_BASE + 0x40b78, 0xf0f0f0);
outpdw(MDP_BASE + 0x40b7c, 0xf0f0f0);
outpdw(MDP_BASE + 0x40b80, 0xf1f1f1);
outpdw(MDP_BASE + 0x40b84, 0xf1f1f1);
outpdw(MDP_BASE + 0x40b88, 0xf2f2f2);
outpdw(MDP_BASE + 0x40b8c, 0xf2f2f2);
outpdw(MDP_BASE + 0x40b90, 0xf2f2f2);
outpdw(MDP_BASE + 0x40b94, 0xf3f3f3);
outpdw(MDP_BASE + 0x40b98, 0xf3f3f3);
outpdw(MDP_BASE + 0x40b9c, 0xf4f4f4);
outpdw(MDP_BASE + 0x40ba0, 0xf4f4f4);
outpdw(MDP_BASE + 0x40ba4, 0xf5f5f5);
outpdw(MDP_BASE + 0x40ba8, 0xf5f5f5);
outpdw(MDP_BASE + 0x40bac, 0xf6f6f6);
outpdw(MDP_BASE + 0x40bb0, 0xf6f6f6);
outpdw(MDP_BASE + 0x40bb4, 0xf7f7f7);
outpdw(MDP_BASE + 0x40bb8, 0xf7f7f7);
outpdw(MDP_BASE + 0x40bbc, 0xf8f8f8);
outpdw(MDP_BASE + 0x40bc0, 0xf8f8f8);
outpdw(MDP_BASE + 0x40bc4, 0xf9f9f9);
outpdw(MDP_BASE + 0x40bc8, 0xf9f9f9);
outpdw(MDP_BASE + 0x40bcc, 0xfafafa);
outpdw(MDP_BASE + 0x40bd0, 0xfafafa);
outpdw(MDP_BASE + 0x40bd4, 0xfafafa);
outpdw(MDP_BASE + 0x40bd8, 0xfbfbfb);
outpdw(MDP_BASE + 0x40bdc, 0xfbfbfb);
outpdw(MDP_BASE + 0x40be0, 0xfcfcfc);
outpdw(MDP_BASE + 0x40be4, 0xfcfcfc);
outpdw(MDP_BASE + 0x40be8, 0xfdfdfd);
outpdw(MDP_BASE + 0x40bec, 0xfdfdfd);
outpdw(MDP_BASE + 0x40bf0, 0xfefefe);
outpdw(MDP_BASE + 0x40bf4, 0xfefefe);
outpdw(MDP_BASE + 0x40bf8, 0xffffff);
outpdw(MDP_BASE + 0x40bfc, 0xffffff);
outpdw(MDP_BASE + 0x40c00, 0x0);
outpdw(MDP_BASE + 0x40c04, 0x0);
outpdw(MDP_BASE + 0x40c08, 0x0);
outpdw(MDP_BASE + 0x40c0c, 0x0);
outpdw(MDP_BASE + 0x40c10, 0x0);
outpdw(MDP_BASE + 0x40c14, 0x0);
outpdw(MDP_BASE + 0x40c18, 0x0);
outpdw(MDP_BASE + 0x40c1c, 0x0);
outpdw(MDP_BASE + 0x40c20, 0x0);
outpdw(MDP_BASE + 0x40c24, 0x0);
outpdw(MDP_BASE + 0x40c28, 0x0);
outpdw(MDP_BASE + 0x40c2c, 0x0);
outpdw(MDP_BASE + 0x40c30, 0x0);
outpdw(MDP_BASE + 0x40c34, 0x0);
outpdw(MDP_BASE + 0x40c38, 0x0);
outpdw(MDP_BASE + 0x40c3c, 0x0);
outpdw(MDP_BASE + 0x40c40, 0x10101);
outpdw(MDP_BASE + 0x40c44, 0x10101);
outpdw(MDP_BASE + 0x40c48, 0x10101);
outpdw(MDP_BASE + 0x40c4c, 0x10101);
outpdw(MDP_BASE + 0x40c50, 0x10101);
outpdw(MDP_BASE + 0x40c54, 0x10101);
outpdw(MDP_BASE + 0x40c58, 0x10101);
outpdw(MDP_BASE + 0x40c5c, 0x10101);
outpdw(MDP_BASE + 0x40c60, 0x10101);
outpdw(MDP_BASE + 0x40c64, 0x10101);
outpdw(MDP_BASE + 0x40c68, 0x20202);
outpdw(MDP_BASE + 0x40c6c, 0x20202);
outpdw(MDP_BASE + 0x40c70, 0x20202);
outpdw(MDP_BASE + 0x40c74, 0x20202);
outpdw(MDP_BASE + 0x40c78, 0x20202);
outpdw(MDP_BASE + 0x40c7c, 0x20202);
outpdw(MDP_BASE + 0x40c80, 0x30303);
outpdw(MDP_BASE + 0x40c84, 0x30303);
outpdw(MDP_BASE + 0x40c88, 0x30303);
outpdw(MDP_BASE + 0x40c8c, 0x30303);
outpdw(MDP_BASE + 0x40c90, 0x30303);
outpdw(MDP_BASE + 0x40c94, 0x40404);
outpdw(MDP_BASE + 0x40c98, 0x40404);
outpdw(MDP_BASE + 0x40c9c, 0x40404);
outpdw(MDP_BASE + 0x40ca0, 0x40404);
outpdw(MDP_BASE + 0x40ca4, 0x40404);
outpdw(MDP_BASE + 0x40ca8, 0x50505);
outpdw(MDP_BASE + 0x40cac, 0x50505);
outpdw(MDP_BASE + 0x40cb0, 0x50505);
outpdw(MDP_BASE + 0x40cb4, 0x50505);
outpdw(MDP_BASE + 0x40cb8, 0x60606);
outpdw(MDP_BASE + 0x40cbc, 0x60606);
outpdw(MDP_BASE + 0x40cc0, 0x60606);
outpdw(MDP_BASE + 0x40cc4, 0x70707);
outpdw(MDP_BASE + 0x40cc8, 0x70707);
outpdw(MDP_BASE + 0x40ccc, 0x70707);
outpdw(MDP_BASE + 0x40cd0, 0x70707);
outpdw(MDP_BASE + 0x40cd4, 0x80808);
outpdw(MDP_BASE + 0x40cd8, 0x80808);
outpdw(MDP_BASE + 0x40cdc, 0x80808);
outpdw(MDP_BASE + 0x40ce0, 0x90909);
outpdw(MDP_BASE + 0x40ce4, 0x90909);
outpdw(MDP_BASE + 0x40ce8, 0xa0a0a);
outpdw(MDP_BASE + 0x40cec, 0xa0a0a);
outpdw(MDP_BASE + 0x40cf0, 0xa0a0a);
outpdw(MDP_BASE + 0x40cf4, 0xb0b0b);
outpdw(MDP_BASE + 0x40cf8, 0xb0b0b);
outpdw(MDP_BASE + 0x40cfc, 0xb0b0b);
outpdw(MDP_BASE + 0x40d00, 0xc0c0c);
outpdw(MDP_BASE + 0x40d04, 0xc0c0c);
outpdw(MDP_BASE + 0x40d08, 0xd0d0d);
outpdw(MDP_BASE + 0x40d0c, 0xd0d0d);
outpdw(MDP_BASE + 0x40d10, 0xe0e0e);
outpdw(MDP_BASE + 0x40d14, 0xe0e0e);
outpdw(MDP_BASE + 0x40d18, 0xe0e0e);
outpdw(MDP_BASE + 0x40d1c, 0xf0f0f);
outpdw(MDP_BASE + 0x40d20, 0xf0f0f);
outpdw(MDP_BASE + 0x40d24, 0x101010);
outpdw(MDP_BASE + 0x40d28, 0x101010);
outpdw(MDP_BASE + 0x40d2c, 0x111111);
outpdw(MDP_BASE + 0x40d30, 0x111111);
outpdw(MDP_BASE + 0x40d34, 0x121212);
outpdw(MDP_BASE + 0x40d38, 0x121212);
outpdw(MDP_BASE + 0x40d3c, 0x131313);
outpdw(MDP_BASE + 0x40d40, 0x131313);
outpdw(MDP_BASE + 0x40d44, 0x141414);
outpdw(MDP_BASE + 0x40d48, 0x151515);
outpdw(MDP_BASE + 0x40d4c, 0x151515);
outpdw(MDP_BASE + 0x40d50, 0x161616);
outpdw(MDP_BASE + 0x40d54, 0x161616);
outpdw(MDP_BASE + 0x40d58, 0x171717);
outpdw(MDP_BASE + 0x40d5c, 0x171717);
outpdw(MDP_BASE + 0x40d60, 0x181818);
outpdw(MDP_BASE + 0x40d64, 0x191919);
outpdw(MDP_BASE + 0x40d68, 0x191919);
outpdw(MDP_BASE + 0x40d6c, 0x1a1a1a);
outpdw(MDP_BASE + 0x40d70, 0x1b1b1b);
outpdw(MDP_BASE + 0x40d74, 0x1b1b1b);
outpdw(MDP_BASE + 0x40d78, 0x1c1c1c);
outpdw(MDP_BASE + 0x40d7c, 0x1c1c1c);
outpdw(MDP_BASE + 0x40d80, 0x1d1d1d);
outpdw(MDP_BASE + 0x40d84, 0x1e1e1e);
outpdw(MDP_BASE + 0x40d88, 0x1f1f1f);
outpdw(MDP_BASE + 0x40d8c, 0x1f1f1f);
outpdw(MDP_BASE + 0x40d90, 0x202020);
outpdw(MDP_BASE + 0x40d94, 0x212121);
outpdw(MDP_BASE + 0x40d98, 0x212121);
outpdw(MDP_BASE + 0x40d9c, 0x222222);
outpdw(MDP_BASE + 0x40da0, 0x232323);
outpdw(MDP_BASE + 0x40da4, 0x242424);
outpdw(MDP_BASE + 0x40da8, 0x242424);
outpdw(MDP_BASE + 0x40dac, 0x252525);
outpdw(MDP_BASE + 0x40db0, 0x262626);
outpdw(MDP_BASE + 0x40db4, 0x272727);
outpdw(MDP_BASE + 0x40db8, 0x272727);
outpdw(MDP_BASE + 0x40dbc, 0x282828);
outpdw(MDP_BASE + 0x40dc0, 0x292929);
outpdw(MDP_BASE + 0x40dc4, 0x2a2a2a);
outpdw(MDP_BASE + 0x40dc8, 0x2b2b2b);
outpdw(MDP_BASE + 0x40dcc, 0x2c2c2c);
outpdw(MDP_BASE + 0x40dd0, 0x2c2c2c);
outpdw(MDP_BASE + 0x40dd4, 0x2d2d2d);
outpdw(MDP_BASE + 0x40dd8, 0x2e2e2e);
outpdw(MDP_BASE + 0x40ddc, 0x2f2f2f);
outpdw(MDP_BASE + 0x40de0, 0x303030);
outpdw(MDP_BASE + 0x40de4, 0x313131);
outpdw(MDP_BASE + 0x40de8, 0x323232);
outpdw(MDP_BASE + 0x40dec, 0x333333);
outpdw(MDP_BASE + 0x40df0, 0x333333);
outpdw(MDP_BASE + 0x40df4, 0x343434);
outpdw(MDP_BASE + 0x40df8, 0x353535);
outpdw(MDP_BASE + 0x40dfc, 0x363636);
outpdw(MDP_BASE + 0x40e00, 0x373737);
outpdw(MDP_BASE + 0x40e04, 0x383838);
outpdw(MDP_BASE + 0x40e08, 0x393939);
outpdw(MDP_BASE + 0x40e0c, 0x3a3a3a);
outpdw(MDP_BASE + 0x40e10, 0x3b3b3b);
outpdw(MDP_BASE + 0x40e14, 0x3c3c3c);
outpdw(MDP_BASE + 0x40e18, 0x3d3d3d);
outpdw(MDP_BASE + 0x40e1c, 0x3e3e3e);
outpdw(MDP_BASE + 0x40e20, 0x3f3f3f);
outpdw(MDP_BASE + 0x40e24, 0x404040);
outpdw(MDP_BASE + 0x40e28, 0x414141);
outpdw(MDP_BASE + 0x40e2c, 0x424242);
outpdw(MDP_BASE + 0x40e30, 0x434343);
outpdw(MDP_BASE + 0x40e34, 0x444444);
outpdw(MDP_BASE + 0x40e38, 0x464646);
outpdw(MDP_BASE + 0x40e3c, 0x474747);
outpdw(MDP_BASE + 0x40e40, 0x484848);
outpdw(MDP_BASE + 0x40e44, 0x494949);
outpdw(MDP_BASE + 0x40e48, 0x4a4a4a);
outpdw(MDP_BASE + 0x40e4c, 0x4b4b4b);
outpdw(MDP_BASE + 0x40e50, 0x4c4c4c);
outpdw(MDP_BASE + 0x40e54, 0x4d4d4d);
outpdw(MDP_BASE + 0x40e58, 0x4f4f4f);
outpdw(MDP_BASE + 0x40e5c, 0x505050);
outpdw(MDP_BASE + 0x40e60, 0x515151);
outpdw(MDP_BASE + 0x40e64, 0x525252);
outpdw(MDP_BASE + 0x40e68, 0x535353);
outpdw(MDP_BASE + 0x40e6c, 0x545454);
outpdw(MDP_BASE + 0x40e70, 0x565656);
outpdw(MDP_BASE + 0x40e74, 0x575757);
outpdw(MDP_BASE + 0x40e78, 0x585858);
outpdw(MDP_BASE + 0x40e7c, 0x595959);
outpdw(MDP_BASE + 0x40e80, 0x5b5b5b);
outpdw(MDP_BASE + 0x40e84, 0x5c5c5c);
outpdw(MDP_BASE + 0x40e88, 0x5d5d5d);
outpdw(MDP_BASE + 0x40e8c, 0x5e5e5e);
outpdw(MDP_BASE + 0x40e90, 0x606060);
outpdw(MDP_BASE + 0x40e94, 0x616161);
outpdw(MDP_BASE + 0x40e98, 0x626262);
outpdw(MDP_BASE + 0x40e9c, 0x646464);
outpdw(MDP_BASE + 0x40ea0, 0x656565);
outpdw(MDP_BASE + 0x40ea4, 0x666666);
outpdw(MDP_BASE + 0x40ea8, 0x686868);
outpdw(MDP_BASE + 0x40eac, 0x696969);
outpdw(MDP_BASE + 0x40eb0, 0x6a6a6a);
outpdw(MDP_BASE + 0x40eb4, 0x6c6c6c);
outpdw(MDP_BASE + 0x40eb8, 0x6d6d6d);
outpdw(MDP_BASE + 0x40ebc, 0x6f6f6f);
outpdw(MDP_BASE + 0x40ec0, 0x707070);
outpdw(MDP_BASE + 0x40ec4, 0x717171);
outpdw(MDP_BASE + 0x40ec8, 0x737373);
outpdw(MDP_BASE + 0x40ecc, 0x747474);
outpdw(MDP_BASE + 0x40ed0, 0x767676);
outpdw(MDP_BASE + 0x40ed4, 0x777777);
outpdw(MDP_BASE + 0x40ed8, 0x797979);
outpdw(MDP_BASE + 0x40edc, 0x7a7a7a);
outpdw(MDP_BASE + 0x40ee0, 0x7c7c7c);
outpdw(MDP_BASE + 0x40ee4, 0x7d7d7d);
outpdw(MDP_BASE + 0x40ee8, 0x7f7f7f);
outpdw(MDP_BASE + 0x40eec, 0x808080);
outpdw(MDP_BASE + 0x40ef0, 0x828282);
outpdw(MDP_BASE + 0x40ef4, 0x838383);
outpdw(MDP_BASE + 0x40ef8, 0x858585);
outpdw(MDP_BASE + 0x40efc, 0x868686);
outpdw(MDP_BASE + 0x40f00, 0x888888);
outpdw(MDP_BASE + 0x40f04, 0x898989);
outpdw(MDP_BASE + 0x40f08, 0x8b8b8b);
outpdw(MDP_BASE + 0x40f0c, 0x8d8d8d);
outpdw(MDP_BASE + 0x40f10, 0x8e8e8e);
outpdw(MDP_BASE + 0x40f14, 0x909090);
outpdw(MDP_BASE + 0x40f18, 0x919191);
outpdw(MDP_BASE + 0x40f1c, 0x939393);
outpdw(MDP_BASE + 0x40f20, 0x959595);
outpdw(MDP_BASE + 0x40f24, 0x969696);
outpdw(MDP_BASE + 0x40f28, 0x989898);
outpdw(MDP_BASE + 0x40f2c, 0x9a9a9a);
outpdw(MDP_BASE + 0x40f30, 0x9b9b9b);
outpdw(MDP_BASE + 0x40f34, 0x9d9d9d);
outpdw(MDP_BASE + 0x40f38, 0x9f9f9f);
outpdw(MDP_BASE + 0x40f3c, 0xa1a1a1);
outpdw(MDP_BASE + 0x40f40, 0xa2a2a2);
outpdw(MDP_BASE + 0x40f44, 0xa4a4a4);
outpdw(MDP_BASE + 0x40f48, 0xa6a6a6);
outpdw(MDP_BASE + 0x40f4c, 0xa7a7a7);
outpdw(MDP_BASE + 0x40f50, 0xa9a9a9);
outpdw(MDP_BASE + 0x40f54, 0xababab);
outpdw(MDP_BASE + 0x40f58, 0xadadad);
outpdw(MDP_BASE + 0x40f5c, 0xafafaf);
outpdw(MDP_BASE + 0x40f60, 0xb0b0b0);
outpdw(MDP_BASE + 0x40f64, 0xb2b2b2);
outpdw(MDP_BASE + 0x40f68, 0xb4b4b4);
outpdw(MDP_BASE + 0x40f6c, 0xb6b6b6);
outpdw(MDP_BASE + 0x40f70, 0xb8b8b8);
outpdw(MDP_BASE + 0x40f74, 0xbababa);
outpdw(MDP_BASE + 0x40f78, 0xbbbbbb);
outpdw(MDP_BASE + 0x40f7c, 0xbdbdbd);
outpdw(MDP_BASE + 0x40f80, 0xbfbfbf);
outpdw(MDP_BASE + 0x40f84, 0xc1c1c1);
outpdw(MDP_BASE + 0x40f88, 0xc3c3c3);
outpdw(MDP_BASE + 0x40f8c, 0xc5c5c5);
outpdw(MDP_BASE + 0x40f90, 0xc7c7c7);
outpdw(MDP_BASE + 0x40f94, 0xc9c9c9);
outpdw(MDP_BASE + 0x40f98, 0xcbcbcb);
outpdw(MDP_BASE + 0x40f9c, 0xcdcdcd);
outpdw(MDP_BASE + 0x40fa0, 0xcfcfcf);
outpdw(MDP_BASE + 0x40fa4, 0xd1d1d1);
outpdw(MDP_BASE + 0x40fa8, 0xd3d3d3);
outpdw(MDP_BASE + 0x40fac, 0xd5d5d5);
outpdw(MDP_BASE + 0x40fb0, 0xd7d7d7);
outpdw(MDP_BASE + 0x40fb4, 0xd9d9d9);
outpdw(MDP_BASE + 0x40fb8, 0xdbdbdb);
outpdw(MDP_BASE + 0x40fbc, 0xdddddd);
outpdw(MDP_BASE + 0x40fc0, 0xdfdfdf);
outpdw(MDP_BASE + 0x40fc4, 0xe1e1e1);
outpdw(MDP_BASE + 0x40fc8, 0xe3e3e3);
outpdw(MDP_BASE + 0x40fcc, 0xe5e5e5);
outpdw(MDP_BASE + 0x40fd0, 0xe7e7e7);
outpdw(MDP_BASE + 0x40fd4, 0xe9e9e9);
outpdw(MDP_BASE + 0x40fd8, 0xebebeb);
outpdw(MDP_BASE + 0x40fdc, 0xeeeeee);
outpdw(MDP_BASE + 0x40fe0, 0xf0f0f0);
outpdw(MDP_BASE + 0x40fe4, 0xf2f2f2);
outpdw(MDP_BASE + 0x40fe8, 0xf4f4f4);
outpdw(MDP_BASE + 0x40fec, 0xf6f6f6);
outpdw(MDP_BASE + 0x40ff0, 0xf8f8f8);
outpdw(MDP_BASE + 0x40ff4, 0xfbfbfb);
outpdw(MDP_BASE + 0x40ff8, 0xfdfdfd);
outpdw(MDP_BASE + 0x40ffc, 0xffffff);
}
#define IRQ_EN_1__MDP_IRQ___M 0x00000800
void mdp_hw_init(int splash)
{
int i;
/* MDP cmd block enable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/* debug interface write access */
outpdw(MDP_BASE + 0x60, 1);
outp32(MDP_INTR_ENABLE, MDP_ANY_INTR_MASK);
outp32(MDP_EBI2_PORTMAP_MODE, 0x3);
outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8, 0x0);
outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc, 0x0);
outpdw(MDP_BASE + 0x60, 0x1);
mdp_load_lut_param();
/*
* clear up unused fg/main registers
*/
/* comp.plane 2&3 ystride */
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0120, 0x0);
/* unpacked pattern */
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x012c, 0x0);
/* unpacked pattern */
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0130, 0x0);
/* unpacked pattern */
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0134, 0x0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0158, 0x0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x15c, 0x0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0160, 0x0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0170, 0x0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0174, 0x0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x017c, 0x0);
/* comp.plane 2 */
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0114, 0x0);
/* comp.plane 3 */
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0118, 0x0);
/* clear up unused bg registers */
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8, 0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0, 0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc, 0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0, 0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4, 0);
#ifndef CONFIG_FB_MSM_MDP22
if (!splash)
MDP_OUTP(MDP_BASE + 0xE0000, 0);
MDP_OUTP(MDP_BASE + 0x100, 0xffffffff);
MDP_OUTP(MDP_BASE + 0x90070, 0);
#endif
/*
* limit vector
* pre gets applied before color matrix conversion
* post is after ccs
*/
writel(mdp_plv[0], MDP_CSC_PRE_LV1n(0));
writel(mdp_plv[1], MDP_CSC_PRE_LV1n(1));
writel(mdp_plv[2], MDP_CSC_PRE_LV1n(2));
writel(mdp_plv[3], MDP_CSC_PRE_LV1n(3));
#ifdef CONFIG_FB_MSM_MDP31
writel(mdp_plv[2], MDP_CSC_PRE_LV1n(4));
writel(mdp_plv[3], MDP_CSC_PRE_LV1n(5));
writel(0, MDP_CSC_POST_LV1n(0));
writel(0xff, MDP_CSC_POST_LV1n(1));
writel(0, MDP_CSC_POST_LV1n(2));
writel(0xff, MDP_CSC_POST_LV1n(3));
writel(0, MDP_CSC_POST_LV1n(4));
writel(0xff, MDP_CSC_POST_LV1n(5));
writel(0, MDP_CSC_PRE_LV2n(0));
writel(0xff, MDP_CSC_PRE_LV2n(1));
writel(0, MDP_CSC_PRE_LV2n(2));
writel(0xff, MDP_CSC_PRE_LV2n(3));
writel(0, MDP_CSC_PRE_LV2n(4));
writel(0xff, MDP_CSC_PRE_LV2n(5));
writel(mdp_plv[0], MDP_CSC_POST_LV2n(0));
writel(mdp_plv[1], MDP_CSC_POST_LV2n(1));
writel(mdp_plv[2], MDP_CSC_POST_LV2n(2));
writel(mdp_plv[3], MDP_CSC_POST_LV2n(3));
writel(mdp_plv[2], MDP_CSC_POST_LV2n(4));
writel(mdp_plv[3], MDP_CSC_POST_LV2n(5));
#endif
/* primary forward matrix */
for (i = 0; i < MDP_CCS_SIZE; i++)
writel(mdp_ccs_rgb2yuv.ccs[i], MDP_CSC_PFMVn(i));
#ifdef CONFIG_FB_MSM_MDP31
for (i = 0; i < MDP_BV_SIZE; i++)
writel(mdp_ccs_rgb2yuv.bv[i], MDP_CSC_POST_BV2n(i));
writel(0, MDP_CSC_PRE_BV2n(0));
writel(0, MDP_CSC_PRE_BV2n(1));
writel(0, MDP_CSC_PRE_BV2n(2));
#endif
/* primary reverse matrix */
for (i = 0; i < MDP_CCS_SIZE; i++)
writel(mdp_ccs_yuv2rgb.ccs[i], MDP_CSC_PRMVn(i));
for (i = 0; i < MDP_BV_SIZE; i++)
writel(mdp_ccs_yuv2rgb.bv[i], MDP_CSC_PRE_BV1n(i));
#ifdef CONFIG_FB_MSM_MDP31
writel(0, MDP_CSC_POST_BV1n(0));
writel(0, MDP_CSC_POST_BV1n(1));
writel(0, MDP_CSC_POST_BV1n(2));
outpdw(MDP_BASE + 0x30010, 0x03e0);
outpdw(MDP_BASE + 0x30014, 0x0360);
outpdw(MDP_BASE + 0x30018, 0x0120);
outpdw(MDP_BASE + 0x3001c, 0x0140);
#endif
mdp_init_scale_table();
#ifndef CONFIG_FB_MSM_MDP31
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0104,
((16 << 6) << 16) | (16) << 6);
#endif
/* MDP cmd block disable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
| gpl-2.0 |
SlimRoms/kernel_motorola_ghost | arch/arm/mach-msm/qdsp6v2/audio_wmapro.c | 2216 | 7544 | /* wmapro audio output device
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/types.h>
#include <linux/msm_audio_wmapro.h>
#include "audio_utils_aio.h"
#ifdef CONFIG_DEBUG_FS
static const struct file_operations audio_wmapro_debug_fops = {
.read = audio_aio_debug_read,
.open = audio_aio_debug_open,
};
#endif
static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct q6audio_aio *audio = file->private_data;
int rc = 0;
switch (cmd) {
case AUDIO_START: {
struct asm_wmapro_cfg wmapro_cfg;
struct msm_audio_wmapro_config *wmapro_config;
pr_debug("%s: AUDIO_START session_id[%d]\n", __func__,
audio->ac->session);
if (audio->feedback == NON_TUNNEL_MODE) {
/* Configure PCM output block */
rc = q6asm_enc_cfg_blk_pcm(audio->ac,
audio->pcm_cfg.sample_rate,
audio->pcm_cfg.channel_count);
if (rc < 0) {
pr_err("pcm output block config failed\n");
break;
}
}
wmapro_config = (struct msm_audio_wmapro_config *)
audio->codec_cfg;
if ((wmapro_config->formattag == 0x162) ||
(wmapro_config->formattag == 0x163) ||
(wmapro_config->formattag == 0x166) ||
(wmapro_config->formattag == 0x167)) {
wmapro_cfg.format_tag = wmapro_config->formattag;
} else {
pr_err("%s:AUDIO_START failed: formattag = %d\n",
__func__, wmapro_config->formattag);
rc = -EINVAL;
break;
}
if ((wmapro_config->numchannels == 1) ||
(wmapro_config->numchannels == 2)) {
wmapro_cfg.ch_cfg = wmapro_config->numchannels;
} else {
pr_err("%s:AUDIO_START failed: channels = %d\n",
__func__, wmapro_config->numchannels);
rc = -EINVAL;
break;
}
if ((wmapro_config->samplingrate <= 48000) ||
(wmapro_config->samplingrate > 0)) {
wmapro_cfg.sample_rate =
wmapro_config->samplingrate;
} else {
pr_err("%s:AUDIO_START failed: sample_rate = %d\n",
__func__, wmapro_config->samplingrate);
rc = -EINVAL;
break;
}
wmapro_cfg.avg_bytes_per_sec =
wmapro_config->avgbytespersecond;
if ((wmapro_config->asfpacketlength <= 13376) ||
(wmapro_config->asfpacketlength > 0)) {
wmapro_cfg.block_align =
wmapro_config->asfpacketlength;
} else {
pr_err("%s:AUDIO_START failed: block_align = %d\n",
__func__, wmapro_config->asfpacketlength);
rc = -EINVAL;
break;
}
if ((wmapro_config->validbitspersample == 16) ||
(wmapro_config->validbitspersample == 24)) {
wmapro_cfg.valid_bits_per_sample =
wmapro_config->validbitspersample;
} else {
pr_err("%s:AUDIO_START failed: bitspersample = %d\n",
__func__,
wmapro_config->validbitspersample);
rc = -EINVAL;
break;
}
if ((wmapro_config->channelmask == 4) ||
(wmapro_config->channelmask == 3)) {
wmapro_cfg.ch_mask = wmapro_config->channelmask;
} else {
pr_err("%s:AUDIO_START failed: channel_mask = %d\n",
__func__, wmapro_config->channelmask);
rc = -EINVAL;
break;
}
wmapro_cfg.encode_opt = wmapro_config->encodeopt;
wmapro_cfg.adv_encode_opt =
wmapro_config->advancedencodeopt;
wmapro_cfg.adv_encode_opt2 =
wmapro_config->advancedencodeopt2;
/* Configure Media format block */
rc = q6asm_media_format_block_wmapro(audio->ac, &wmapro_cfg);
if (rc < 0) {
pr_err("cmd media format block failed\n");
break;
}
rc = audio_aio_enable(audio);
audio->eos_rsp = 0;
audio->eos_flag = 0;
if (!rc) {
audio->enabled = 1;
} else {
audio->enabled = 0;
pr_err("Audio Start procedure failed rc=%d\n", rc);
break;
}
pr_debug("AUDIO_START success enable[%d]\n", audio->enabled);
if (audio->stopped == 1)
audio->stopped = 0;
break;
}
case AUDIO_GET_WMAPRO_CONFIG: {
if (copy_to_user((void *)arg, audio->codec_cfg,
sizeof(struct msm_audio_wmapro_config))) {
rc = -EFAULT;
}
break;
}
case AUDIO_SET_WMAPRO_CONFIG: {
if (copy_from_user(audio->codec_cfg, (void *)arg,
sizeof(struct msm_audio_wmapro_config))) {
rc = -EFAULT;
break;
}
break;
}
default:
pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio);
rc = audio->codec_ioctl(file, cmd, arg);
if (rc)
pr_err("Failed in utils_ioctl: %d\n", rc);
}
return rc;
}
static int audio_open(struct inode *inode, struct file *file)
{
struct q6audio_aio *audio = NULL;
int rc = 0;
#ifdef CONFIG_DEBUG_FS
/* 4 bytes represents decoder number, 1 byte for terminate string */
char name[sizeof "msm_wmapro_" + 5];
#endif
audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
if (audio == NULL) {
pr_err("Could not allocate memory for wma decode driver\n");
return -ENOMEM;
}
audio->codec_cfg = kzalloc(sizeof(struct msm_audio_wmapro_config),
GFP_KERNEL);
if (audio->codec_cfg == NULL) {
pr_err("%s: Could not allocate memory for wmapro"
"config\n", __func__);
kfree(audio);
return -ENOMEM;
}
audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
(void *)audio);
if (!audio->ac) {
pr_err("Could not allocate memory for audio client\n");
kfree(audio->codec_cfg);
kfree(audio);
return -ENOMEM;
}
/* open in T/NT mode */
if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
FORMAT_WMA_V10PRO);
if (rc < 0) {
pr_err("NT mode Open failed rc=%d\n", rc);
rc = -ENODEV;
goto fail;
}
audio->feedback = NON_TUNNEL_MODE;
/* open WMA decoder, expected frames is always 1*/
audio->buf_cfg.frames_per_buf = 0x01;
audio->buf_cfg.meta_info_enable = 0x01;
} else if ((file->f_mode & FMODE_WRITE) &&
!(file->f_mode & FMODE_READ)) {
rc = q6asm_open_write(audio->ac, FORMAT_WMA_V10PRO);
if (rc < 0) {
pr_err("T mode Open failed rc=%d\n", rc);
rc = -ENODEV;
goto fail;
}
audio->feedback = TUNNEL_MODE;
audio->buf_cfg.meta_info_enable = 0x00;
} else {
pr_err("Not supported mode\n");
rc = -EACCES;
goto fail;
}
rc = audio_aio_open(audio, file);
if (rc < 0) {
pr_err("audio_aio_open rc=%d\n", rc);
goto fail;
}
#ifdef CONFIG_DEBUG_FS
snprintf(name, sizeof name, "msm_wmapro_%04x", audio->ac->session);
audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
NULL, (void *)audio,
&audio_wmapro_debug_fops);
if (IS_ERR(audio->dentry))
pr_debug("debugfs_create_file failed\n");
#endif
pr_info("%s:wmapro decoder open success, session_id = %d\n", __func__,
audio->ac->session);
return rc;
fail:
q6asm_audio_client_free(audio->ac);
kfree(audio->codec_cfg);
kfree(audio);
return rc;
}
static const struct file_operations audio_wmapro_fops = {
.owner = THIS_MODULE,
.open = audio_open,
.release = audio_aio_release,
.unlocked_ioctl = audio_ioctl,
.fsync = audio_aio_fsync,
};
struct miscdevice audio_wmapro_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_wmapro",
.fops = &audio_wmapro_fops,
};
static int __init audio_wmapro_init(void)
{
return misc_register(&audio_wmapro_misc);
}
device_initcall(audio_wmapro_init);
| gpl-2.0 |
AntaresOne/AntaresCore-Kernel-G4 | drivers/video/nvidia/nvidia.c | 2472 | 39735 | /*
* linux/drivers/video/nvidia/nvidia.c - nVidia fb driver
*
* Copyright 2004 Antonino Daplas <adaplas@pol.net>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/console.h>
#include <linux/backlight.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#ifdef CONFIG_PPC_OF
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#endif
#ifdef CONFIG_BOOTX_TEXT
#include <asm/btext.h>
#endif
#include "nv_local.h"
#include "nv_type.h"
#include "nv_proto.h"
#include "nv_dma.h"
#ifdef CONFIG_FB_NVIDIA_DEBUG
#define NVTRACE printk
#else
#define NVTRACE if (0) printk
#endif
#define NVTRACE_ENTER(...) NVTRACE("%s START\n", __func__)
#define NVTRACE_LEAVE(...) NVTRACE("%s END\n", __func__)
#ifdef CONFIG_FB_NVIDIA_DEBUG
#define assert(expr) \
if (!(expr)) { \
printk( "Assertion failed! %s,%s,%s,line=%d\n",\
#expr,__FILE__,__func__,__LINE__); \
BUG(); \
}
#else
#define assert(expr)
#endif
#define PFX "nvidiafb: "
/* HW cursor parameters */
#define MAX_CURS 32
static struct pci_device_id nvidiafb_pci_tbl[] = {
{PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_BASE_CLASS_DISPLAY << 16, 0xff0000, 0},
{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvidiafb_pci_tbl);
/* command line data, set in nvidiafb_setup() */
static int flatpanel = -1; /* Autodetect later */
static int fpdither = -1;
static int forceCRTC = -1;
static int hwcur = 0;
static int noaccel = 0;
static int noscale = 0;
static int paneltweak = 0;
static int vram = 0;
static int bpp = 8;
static int reverse_i2c;
#ifdef CONFIG_MTRR
static bool nomtrr = false;
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
static int backlight = 1;
#else
static int backlight = 0;
#endif
static char *mode_option = NULL;
static struct fb_fix_screeninfo nvidiafb_fix = {
.type = FB_TYPE_PACKED_PIXELS,
.xpanstep = 8,
.ypanstep = 1,
};
static struct fb_var_screeninfo nvidiafb_default_var = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
.yres_virtual = 480,
.bits_per_pixel = 8,
.red = {0, 8, 0},
.green = {0, 8, 0},
.blue = {0, 8, 0},
.transp = {0, 0, 0},
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.pixclock = 39721,
.left_margin = 40,
.right_margin = 24,
.upper_margin = 32,
.lower_margin = 11,
.hsync_len = 96,
.vsync_len = 2,
.vmode = FB_VMODE_NONINTERLACED
};
static void nvidiafb_load_cursor_image(struct nvidia_par *par, u8 * data8,
u16 bg, u16 fg, u32 w, u32 h)
{
u32 *data = (u32 *) data8;
int i, j, k = 0;
u32 b, tmp;
w = (w + 1) & ~1;
for (i = 0; i < h; i++) {
b = *data++;
reverse_order(&b);
for (j = 0; j < w / 2; j++) {
tmp = 0;
#if defined (__BIG_ENDIAN)
tmp = (b & (1 << 31)) ? fg << 16 : bg << 16;
b <<= 1;
tmp |= (b & (1 << 31)) ? fg : bg;
b <<= 1;
#else
tmp = (b & 1) ? fg : bg;
b >>= 1;
tmp |= (b & 1) ? fg << 16 : bg << 16;
b >>= 1;
#endif
NV_WR32(&par->CURSOR[k++], 0, tmp);
}
k += (MAX_CURS - w) / 2;
}
}
static void nvidia_write_clut(struct nvidia_par *par,
u8 regnum, u8 red, u8 green, u8 blue)
{
NVWriteDacMask(par, 0xff);
NVWriteDacWriteAddr(par, regnum);
NVWriteDacData(par, red);
NVWriteDacData(par, green);
NVWriteDacData(par, blue);
}
static void nvidia_read_clut(struct nvidia_par *par,
u8 regnum, u8 * red, u8 * green, u8 * blue)
{
NVWriteDacMask(par, 0xff);
NVWriteDacReadAddr(par, regnum);
*red = NVReadDacData(par);
*green = NVReadDacData(par);
*blue = NVReadDacData(par);
}
static int nvidia_panel_tweak(struct nvidia_par *par,
struct _riva_hw_state *state)
{
int tweak = 0;
if (par->paneltweak) {
tweak = par->paneltweak;
} else {
/* begin flat panel hacks */
/* This is unfortunate, but some chips need this register
tweaked or else you get artifacts where adjacent pixels are
swapped. There are no hard rules for what to set here so all
we can do is experiment and apply hacks. */
if(((par->Chipset & 0xffff) == 0x0328) && (state->bpp == 32)) {
/* At least one NV34 laptop needs this workaround. */
tweak = -1;
}
if((par->Chipset & 0xfff0) == 0x0310) {
tweak = 1;
}
/* end flat panel hacks */
}
return tweak;
}
static void nvidia_screen_off(struct nvidia_par *par, int on)
{
unsigned char tmp;
if (on) {
/*
* Turn off screen and disable sequencer.
*/
tmp = NVReadSeq(par, 0x01);
NVWriteSeq(par, 0x00, 0x01); /* Synchronous Reset */
NVWriteSeq(par, 0x01, tmp | 0x20); /* disable the display */
} else {
/*
* Reenable sequencer, then turn on screen.
*/
tmp = NVReadSeq(par, 0x01);
NVWriteSeq(par, 0x01, tmp & ~0x20); /* reenable display */
NVWriteSeq(par, 0x00, 0x03); /* End Reset */
}
}
static void nvidia_save_vga(struct nvidia_par *par,
struct _riva_hw_state *state)
{
int i;
NVTRACE_ENTER();
NVLockUnlock(par, 0);
NVUnloadStateExt(par, state);
state->misc_output = NVReadMiscOut(par);
for (i = 0; i < NUM_CRT_REGS; i++)
state->crtc[i] = NVReadCrtc(par, i);
for (i = 0; i < NUM_ATC_REGS; i++)
state->attr[i] = NVReadAttr(par, i);
for (i = 0; i < NUM_GRC_REGS; i++)
state->gra[i] = NVReadGr(par, i);
for (i = 0; i < NUM_SEQ_REGS; i++)
state->seq[i] = NVReadSeq(par, i);
NVTRACE_LEAVE();
}
#undef DUMP_REG
static void nvidia_write_regs(struct nvidia_par *par,
struct _riva_hw_state *state)
{
int i;
NVTRACE_ENTER();
NVLoadStateExt(par, state);
NVWriteMiscOut(par, state->misc_output);
for (i = 1; i < NUM_SEQ_REGS; i++) {
#ifdef DUMP_REG
printk(" SEQ[%02x] = %08x\n", i, state->seq[i]);
#endif
NVWriteSeq(par, i, state->seq[i]);
}
/* Ensure CRTC registers 0-7 are unlocked by clearing bit 7 of CRTC[17] */
NVWriteCrtc(par, 0x11, state->crtc[0x11] & ~0x80);
for (i = 0; i < NUM_CRT_REGS; i++) {
switch (i) {
case 0x19:
case 0x20 ... 0x40:
break;
default:
#ifdef DUMP_REG
printk("CRTC[%02x] = %08x\n", i, state->crtc[i]);
#endif
NVWriteCrtc(par, i, state->crtc[i]);
}
}
for (i = 0; i < NUM_GRC_REGS; i++) {
#ifdef DUMP_REG
printk(" GRA[%02x] = %08x\n", i, state->gra[i]);
#endif
NVWriteGr(par, i, state->gra[i]);
}
for (i = 0; i < NUM_ATC_REGS; i++) {
#ifdef DUMP_REG
printk("ATTR[%02x] = %08x\n", i, state->attr[i]);
#endif
NVWriteAttr(par, i, state->attr[i]);
}
NVTRACE_LEAVE();
}
static int nvidia_calc_regs(struct fb_info *info)
{
struct nvidia_par *par = info->par;
struct _riva_hw_state *state = &par->ModeReg;
int i, depth = fb_get_color_depth(&info->var, &info->fix);
int h_display = info->var.xres / 8 - 1;
int h_start = (info->var.xres + info->var.right_margin) / 8 - 1;
int h_end = (info->var.xres + info->var.right_margin +
info->var.hsync_len) / 8 - 1;
int h_total = (info->var.xres + info->var.right_margin +
info->var.hsync_len + info->var.left_margin) / 8 - 5;
int h_blank_s = h_display;
int h_blank_e = h_total + 4;
int v_display = info->var.yres - 1;
int v_start = info->var.yres + info->var.lower_margin - 1;
int v_end = (info->var.yres + info->var.lower_margin +
info->var.vsync_len) - 1;
int v_total = (info->var.yres + info->var.lower_margin +
info->var.vsync_len + info->var.upper_margin) - 2;
int v_blank_s = v_display;
int v_blank_e = v_total + 1;
/*
* Set all CRTC values.
*/
if (info->var.vmode & FB_VMODE_INTERLACED)
v_total |= 1;
if (par->FlatPanel == 1) {
v_start = v_total - 3;
v_end = v_total - 2;
v_blank_s = v_start;
h_start = h_total - 5;
h_end = h_total - 2;
h_blank_e = h_total + 4;
}
state->crtc[0x0] = Set8Bits(h_total);
state->crtc[0x1] = Set8Bits(h_display);
state->crtc[0x2] = Set8Bits(h_blank_s);
state->crtc[0x3] = SetBitField(h_blank_e, 4: 0, 4:0)
| SetBit(7);
state->crtc[0x4] = Set8Bits(h_start);
state->crtc[0x5] = SetBitField(h_blank_e, 5: 5, 7:7)
| SetBitField(h_end, 4: 0, 4:0);
state->crtc[0x6] = SetBitField(v_total, 7: 0, 7:0);
state->crtc[0x7] = SetBitField(v_total, 8: 8, 0:0)
| SetBitField(v_display, 8: 8, 1:1)
| SetBitField(v_start, 8: 8, 2:2)
| SetBitField(v_blank_s, 8: 8, 3:3)
| SetBit(4)
| SetBitField(v_total, 9: 9, 5:5)
| SetBitField(v_display, 9: 9, 6:6)
| SetBitField(v_start, 9: 9, 7:7);
state->crtc[0x9] = SetBitField(v_blank_s, 9: 9, 5:5)
| SetBit(6)
| ((info->var.vmode & FB_VMODE_DOUBLE) ? 0x80 : 0x00);
state->crtc[0x10] = Set8Bits(v_start);
state->crtc[0x11] = SetBitField(v_end, 3: 0, 3:0) | SetBit(5);
state->crtc[0x12] = Set8Bits(v_display);
state->crtc[0x13] = ((info->var.xres_virtual / 8) *
(info->var.bits_per_pixel / 8));
state->crtc[0x15] = Set8Bits(v_blank_s);
state->crtc[0x16] = Set8Bits(v_blank_e);
state->attr[0x10] = 0x01;
if (par->Television)
state->attr[0x11] = 0x00;
state->screen = SetBitField(h_blank_e, 6: 6, 4:4)
| SetBitField(v_blank_s, 10: 10, 3:3)
| SetBitField(v_start, 10: 10, 2:2)
| SetBitField(v_display, 10: 10, 1:1)
| SetBitField(v_total, 10: 10, 0:0);
state->horiz = SetBitField(h_total, 8: 8, 0:0)
| SetBitField(h_display, 8: 8, 1:1)
| SetBitField(h_blank_s, 8: 8, 2:2)
| SetBitField(h_start, 8: 8, 3:3);
state->extra = SetBitField(v_total, 11: 11, 0:0)
| SetBitField(v_display, 11: 11, 2:2)
| SetBitField(v_start, 11: 11, 4:4)
| SetBitField(v_blank_s, 11: 11, 6:6);
if (info->var.vmode & FB_VMODE_INTERLACED) {
h_total = (h_total >> 1) & ~1;
state->interlace = Set8Bits(h_total);
state->horiz |= SetBitField(h_total, 8: 8, 4:4);
} else {
state->interlace = 0xff; /* interlace off */
}
/*
* Calculate the extended registers.
*/
if (depth < 24)
i = depth;
else
i = 32;
if (par->Architecture >= NV_ARCH_10)
par->CURSOR = (volatile u32 __iomem *)(info->screen_base +
par->CursorStart);
if (info->var.sync & FB_SYNC_HOR_HIGH_ACT)
state->misc_output &= ~0x40;
else
state->misc_output |= 0x40;
if (info->var.sync & FB_SYNC_VERT_HIGH_ACT)
state->misc_output &= ~0x80;
else
state->misc_output |= 0x80;
NVCalcStateExt(par, state, i, info->var.xres_virtual,
info->var.xres, info->var.yres_virtual,
1000000000 / info->var.pixclock, info->var.vmode);
state->scale = NV_RD32(par->PRAMDAC, 0x00000848) & 0xfff000ff;
if (par->FlatPanel == 1) {
state->pixel |= (1 << 7);
if (!par->fpScaler || (par->fpWidth <= info->var.xres)
|| (par->fpHeight <= info->var.yres)) {
state->scale |= (1 << 8);
}
if (!par->crtcSync_read) {
state->crtcSync = NV_RD32(par->PRAMDAC, 0x0828);
par->crtcSync_read = 1;
}
par->PanelTweak = nvidia_panel_tweak(par, state);
}
state->vpll = state->pll;
state->vpll2 = state->pll;
state->vpllB = state->pllB;
state->vpll2B = state->pllB;
VGA_WR08(par->PCIO, 0x03D4, 0x1C);
state->fifo = VGA_RD08(par->PCIO, 0x03D5) & ~(1<<5);
if (par->CRTCnumber) {
state->head = NV_RD32(par->PCRTC0, 0x00000860) & ~0x00001000;
state->head2 = NV_RD32(par->PCRTC0, 0x00002860) | 0x00001000;
state->crtcOwner = 3;
state->pllsel |= 0x20000800;
state->vpll = NV_RD32(par->PRAMDAC0, 0x00000508);
if (par->twoStagePLL)
state->vpllB = NV_RD32(par->PRAMDAC0, 0x00000578);
} else if (par->twoHeads) {
state->head = NV_RD32(par->PCRTC0, 0x00000860) | 0x00001000;
state->head2 = NV_RD32(par->PCRTC0, 0x00002860) & ~0x00001000;
state->crtcOwner = 0;
state->vpll2 = NV_RD32(par->PRAMDAC0, 0x0520);
if (par->twoStagePLL)
state->vpll2B = NV_RD32(par->PRAMDAC0, 0x057C);
}
state->cursorConfig = 0x00000100;
if (info->var.vmode & FB_VMODE_DOUBLE)
state->cursorConfig |= (1 << 4);
if (par->alphaCursor) {
if ((par->Chipset & 0x0ff0) != 0x0110)
state->cursorConfig |= 0x04011000;
else
state->cursorConfig |= 0x14011000;
state->general |= (1 << 29);
} else
state->cursorConfig |= 0x02000000;
if (par->twoHeads) {
if ((par->Chipset & 0x0ff0) == 0x0110) {
state->dither = NV_RD32(par->PRAMDAC, 0x0528) &
~0x00010000;
if (par->FPDither)
state->dither |= 0x00010000;
} else {
state->dither = NV_RD32(par->PRAMDAC, 0x083C) & ~1;
if (par->FPDither)
state->dither |= 1;
}
}
state->timingH = 0;
state->timingV = 0;
state->displayV = info->var.xres;
return 0;
}
static void nvidia_init_vga(struct fb_info *info)
{
struct nvidia_par *par = info->par;
struct _riva_hw_state *state = &par->ModeReg;
int i;
for (i = 0; i < 0x10; i++)
state->attr[i] = i;
state->attr[0x10] = 0x41;
state->attr[0x11] = 0xff;
state->attr[0x12] = 0x0f;
state->attr[0x13] = 0x00;
state->attr[0x14] = 0x00;
memset(state->crtc, 0x00, NUM_CRT_REGS);
state->crtc[0x0a] = 0x20;
state->crtc[0x17] = 0xe3;
state->crtc[0x18] = 0xff;
state->crtc[0x28] = 0x40;
memset(state->gra, 0x00, NUM_GRC_REGS);
state->gra[0x05] = 0x40;
state->gra[0x06] = 0x05;
state->gra[0x07] = 0x0f;
state->gra[0x08] = 0xff;
state->seq[0x00] = 0x03;
state->seq[0x01] = 0x01;
state->seq[0x02] = 0x0f;
state->seq[0x03] = 0x00;
state->seq[0x04] = 0x0e;
state->misc_output = 0xeb;
}
static int nvidiafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct nvidia_par *par = info->par;
u8 data[MAX_CURS * MAX_CURS / 8];
int i, set = cursor->set;
u16 fg, bg;
if (cursor->image.width > MAX_CURS || cursor->image.height > MAX_CURS)
return -ENXIO;
NVShowHideCursor(par, 0);
if (par->cursor_reset) {
set = FB_CUR_SETALL;
par->cursor_reset = 0;
}
if (set & FB_CUR_SETSIZE)
memset_io(par->CURSOR, 0, MAX_CURS * MAX_CURS * 2);
if (set & FB_CUR_SETPOS) {
u32 xx, yy, temp;
yy = cursor->image.dy - info->var.yoffset;
xx = cursor->image.dx - info->var.xoffset;
temp = xx & 0xFFFF;
temp |= yy << 16;
NV_WR32(par->PRAMDAC, 0x0000300, temp);
}
if (set & (FB_CUR_SETSHAPE | FB_CUR_SETCMAP | FB_CUR_SETIMAGE)) {
u32 bg_idx = cursor->image.bg_color;
u32 fg_idx = cursor->image.fg_color;
u32 s_pitch = (cursor->image.width + 7) >> 3;
u32 d_pitch = MAX_CURS / 8;
u8 *dat = (u8 *) cursor->image.data;
u8 *msk = (u8 *) cursor->mask;
u8 *src;
src = kmalloc(s_pitch * cursor->image.height, GFP_ATOMIC);
if (src) {
switch (cursor->rop) {
case ROP_XOR:
for (i = 0; i < s_pitch * cursor->image.height; i++)
src[i] = dat[i] ^ msk[i];
break;
case ROP_COPY:
default:
for (i = 0; i < s_pitch * cursor->image.height; i++)
src[i] = dat[i] & msk[i];
break;
}
fb_pad_aligned_buffer(data, d_pitch, src, s_pitch,
cursor->image.height);
bg = ((info->cmap.red[bg_idx] & 0xf8) << 7) |
((info->cmap.green[bg_idx] & 0xf8) << 2) |
((info->cmap.blue[bg_idx] & 0xf8) >> 3) | 1 << 15;
fg = ((info->cmap.red[fg_idx] & 0xf8) << 7) |
((info->cmap.green[fg_idx] & 0xf8) << 2) |
((info->cmap.blue[fg_idx] & 0xf8) >> 3) | 1 << 15;
NVLockUnlock(par, 0);
nvidiafb_load_cursor_image(par, data, bg, fg,
cursor->image.width,
cursor->image.height);
kfree(src);
}
}
if (cursor->enable)
NVShowHideCursor(par, 1);
return 0;
}
static int nvidiafb_set_par(struct fb_info *info)
{
struct nvidia_par *par = info->par;
NVTRACE_ENTER();
NVLockUnlock(par, 1);
if (!par->FlatPanel || !par->twoHeads)
par->FPDither = 0;
if (par->FPDither < 0) {
if ((par->Chipset & 0x0ff0) == 0x0110)
par->FPDither = !!(NV_RD32(par->PRAMDAC, 0x0528)
& 0x00010000);
else
par->FPDither = !!(NV_RD32(par->PRAMDAC, 0x083C) & 1);
printk(KERN_INFO PFX "Flat panel dithering %s\n",
par->FPDither ? "enabled" : "disabled");
}
info->fix.visual = (info->var.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
nvidia_init_vga(info);
nvidia_calc_regs(info);
NVLockUnlock(par, 0);
if (par->twoHeads) {
VGA_WR08(par->PCIO, 0x03D4, 0x44);
VGA_WR08(par->PCIO, 0x03D5, par->ModeReg.crtcOwner);
NVLockUnlock(par, 0);
}
nvidia_screen_off(par, 1);
nvidia_write_regs(par, &par->ModeReg);
NVSetStartAddress(par, 0);
#if defined (__BIG_ENDIAN)
/* turn on LFB swapping */
{
unsigned char tmp;
VGA_WR08(par->PCIO, 0x3d4, 0x46);
tmp = VGA_RD08(par->PCIO, 0x3d5);
tmp |= (1 << 7);
VGA_WR08(par->PCIO, 0x3d5, tmp);
}
#endif
info->fix.line_length = (info->var.xres_virtual *
info->var.bits_per_pixel) >> 3;
if (info->var.accel_flags) {
info->fbops->fb_imageblit = nvidiafb_imageblit;
info->fbops->fb_fillrect = nvidiafb_fillrect;
info->fbops->fb_copyarea = nvidiafb_copyarea;
info->fbops->fb_sync = nvidiafb_sync;
info->pixmap.scan_align = 4;
info->flags &= ~FBINFO_HWACCEL_DISABLED;
info->flags |= FBINFO_READS_FAST;
NVResetGraphics(info);
} else {
info->fbops->fb_imageblit = cfb_imageblit;
info->fbops->fb_fillrect = cfb_fillrect;
info->fbops->fb_copyarea = cfb_copyarea;
info->fbops->fb_sync = NULL;
info->pixmap.scan_align = 1;
info->flags |= FBINFO_HWACCEL_DISABLED;
info->flags &= ~FBINFO_READS_FAST;
}
par->cursor_reset = 1;
nvidia_screen_off(par, 0);
#ifdef CONFIG_BOOTX_TEXT
/* Update debug text engine */
btext_update_display(info->fix.smem_start,
info->var.xres, info->var.yres,
info->var.bits_per_pixel, info->fix.line_length);
#endif
NVLockUnlock(par, 0);
NVTRACE_LEAVE();
return 0;
}
static int nvidiafb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
struct nvidia_par *par = info->par;
int i;
NVTRACE_ENTER();
if (regno >= (1 << info->var.green.length))
return -EINVAL;
if (info->var.grayscale) {
/* gray = 0.30*R + 0.59*G + 0.11*B */
red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
}
if (regno < 16 && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
((u32 *) info->pseudo_palette)[regno] =
(regno << info->var.red.offset) |
(regno << info->var.green.offset) |
(regno << info->var.blue.offset);
}
switch (info->var.bits_per_pixel) {
case 8:
/* "transparent" stuff is completely ignored. */
nvidia_write_clut(par, regno, red >> 8, green >> 8, blue >> 8);
break;
case 16:
if (info->var.green.length == 5) {
for (i = 0; i < 8; i++) {
nvidia_write_clut(par, regno * 8 + i, red >> 8,
green >> 8, blue >> 8);
}
} else {
u8 r, g, b;
if (regno < 32) {
for (i = 0; i < 8; i++) {
nvidia_write_clut(par, regno * 8 + i,
red >> 8, green >> 8,
blue >> 8);
}
}
nvidia_read_clut(par, regno * 4, &r, &g, &b);
for (i = 0; i < 4; i++)
nvidia_write_clut(par, regno * 4 + i, r,
green >> 8, b);
}
break;
case 32:
nvidia_write_clut(par, regno, red >> 8, green >> 8, blue >> 8);
break;
default:
/* do nothing */
break;
}
NVTRACE_LEAVE();
return 0;
}
static int nvidiafb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct nvidia_par *par = info->par;
int memlen, vramlen, mode_valid = 0;
int pitch, err = 0;
NVTRACE_ENTER();
var->transp.offset = 0;
var->transp.length = 0;
var->xres &= ~7;
if (var->bits_per_pixel <= 8)
var->bits_per_pixel = 8;
else if (var->bits_per_pixel <= 16)
var->bits_per_pixel = 16;
else
var->bits_per_pixel = 32;
switch (var->bits_per_pixel) {
case 8:
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 0;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 16:
var->green.length = (var->green.length < 6) ? 5 : 6;
var->red.length = 5;
var->blue.length = 5;
var->transp.length = 6 - var->green.length;
var->blue.offset = 0;
var->green.offset = 5;
var->red.offset = 5 + var->green.length;
var->transp.offset = (5 + var->red.offset) & 15;
break;
case 32: /* RGBA 8888 */
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.length = 8;
var->transp.offset = 24;
break;
}
var->red.msb_right = 0;
var->green.msb_right = 0;
var->blue.msb_right = 0;
var->transp.msb_right = 0;
if (!info->monspecs.hfmax || !info->monspecs.vfmax ||
!info->monspecs.dclkmax || !fb_validate_mode(var, info))
mode_valid = 1;
/* calculate modeline if supported by monitor */
if (!mode_valid && info->monspecs.gtf) {
if (!fb_get_mode(FB_MAXTIMINGS, 0, var, info))
mode_valid = 1;
}
if (!mode_valid) {
const struct fb_videomode *mode;
mode = fb_find_best_mode(var, &info->modelist);
if (mode) {
fb_videomode_to_var(var, mode);
mode_valid = 1;
}
}
if (!mode_valid && info->monspecs.modedb_len)
return -EINVAL;
/*
* If we're on a flat panel, check if the mode is outside of the
* panel dimensions. If so, cap it and try for the next best mode
* before bailing out.
*/
if (par->fpWidth && par->fpHeight && (par->fpWidth < var->xres ||
par->fpHeight < var->yres)) {
const struct fb_videomode *mode;
var->xres = par->fpWidth;
var->yres = par->fpHeight;
mode = fb_find_best_mode(var, &info->modelist);
if (!mode) {
printk(KERN_ERR PFX "mode out of range of flat "
"panel dimensions\n");
return -EINVAL;
}
fb_videomode_to_var(var, mode);
}
if (var->yres_virtual < var->yres)
var->yres_virtual = var->yres;
if (var->xres_virtual < var->xres)
var->xres_virtual = var->xres;
var->xres_virtual = (var->xres_virtual + 63) & ~63;
vramlen = info->screen_size;
pitch = ((var->xres_virtual * var->bits_per_pixel) + 7) / 8;
memlen = pitch * var->yres_virtual;
if (memlen > vramlen) {
var->yres_virtual = vramlen / pitch;
if (var->yres_virtual < var->yres) {
var->yres_virtual = var->yres;
var->xres_virtual = vramlen / var->yres_virtual;
var->xres_virtual /= var->bits_per_pixel / 8;
var->xres_virtual &= ~63;
pitch = (var->xres_virtual *
var->bits_per_pixel + 7) / 8;
memlen = pitch * var->yres;
if (var->xres_virtual < var->xres) {
printk("nvidiafb: required video memory, "
"%d bytes, for %dx%d-%d (virtual) "
"is out of range\n",
memlen, var->xres_virtual,
var->yres_virtual, var->bits_per_pixel);
err = -ENOMEM;
}
}
}
if (var->accel_flags) {
if (var->yres_virtual > 0x7fff)
var->yres_virtual = 0x7fff;
if (var->xres_virtual > 0x7fff)
var->xres_virtual = 0x7fff;
}
var->xres_virtual &= ~63;
NVTRACE_LEAVE();
return err;
}
static int nvidiafb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct nvidia_par *par = info->par;
u32 total;
total = var->yoffset * info->fix.line_length + var->xoffset;
NVSetStartAddress(par, total);
return 0;
}
static int nvidiafb_blank(int blank, struct fb_info *info)
{
struct nvidia_par *par = info->par;
unsigned char tmp, vesa;
tmp = NVReadSeq(par, 0x01) & ~0x20; /* screen on/off */
vesa = NVReadCrtc(par, 0x1a) & ~0xc0; /* sync on/off */
NVTRACE_ENTER();
if (blank)
tmp |= 0x20;
switch (blank) {
case FB_BLANK_UNBLANK:
case FB_BLANK_NORMAL:
break;
case FB_BLANK_VSYNC_SUSPEND:
vesa |= 0x80;
break;
case FB_BLANK_HSYNC_SUSPEND:
vesa |= 0x40;
break;
case FB_BLANK_POWERDOWN:
vesa |= 0xc0;
break;
}
NVWriteSeq(par, 0x01, tmp);
NVWriteCrtc(par, 0x1a, vesa);
NVTRACE_LEAVE();
return 0;
}
/*
* Because the VGA registers are not mapped linearly in its MMIO space,
* restrict VGA register saving and restore to x86 only, where legacy VGA IO
* access is legal. Consequently, we must also check if the device is the
* primary display.
*/
#ifdef CONFIG_X86
static void save_vga_x86(struct nvidia_par *par)
{
struct resource *res= &par->pci_dev->resource[PCI_ROM_RESOURCE];
if (res && res->flags & IORESOURCE_ROM_SHADOW) {
memset(&par->vgastate, 0, sizeof(par->vgastate));
par->vgastate.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS |
VGA_SAVE_CMAP;
save_vga(&par->vgastate);
}
}
static void restore_vga_x86(struct nvidia_par *par)
{
struct resource *res= &par->pci_dev->resource[PCI_ROM_RESOURCE];
if (res && res->flags & IORESOURCE_ROM_SHADOW)
restore_vga(&par->vgastate);
}
#else
#define save_vga_x86(x) do {} while (0)
#define restore_vga_x86(x) do {} while (0)
#endif /* X86 */
static int nvidiafb_open(struct fb_info *info, int user)
{
struct nvidia_par *par = info->par;
if (!par->open_count) {
save_vga_x86(par);
nvidia_save_vga(par, &par->initial_state);
}
par->open_count++;
return 0;
}
static int nvidiafb_release(struct fb_info *info, int user)
{
struct nvidia_par *par = info->par;
int err = 0;
if (!par->open_count) {
err = -EINVAL;
goto done;
}
if (par->open_count == 1) {
nvidia_write_regs(par, &par->initial_state);
restore_vga_x86(par);
}
par->open_count--;
done:
return err;
}
static struct fb_ops nvidia_fb_ops = {
.owner = THIS_MODULE,
.fb_open = nvidiafb_open,
.fb_release = nvidiafb_release,
.fb_check_var = nvidiafb_check_var,
.fb_set_par = nvidiafb_set_par,
.fb_setcolreg = nvidiafb_setcolreg,
.fb_pan_display = nvidiafb_pan_display,
.fb_blank = nvidiafb_blank,
.fb_fillrect = nvidiafb_fillrect,
.fb_copyarea = nvidiafb_copyarea,
.fb_imageblit = nvidiafb_imageblit,
.fb_cursor = nvidiafb_cursor,
.fb_sync = nvidiafb_sync,
};
#ifdef CONFIG_PM
static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t mesg)
{
struct fb_info *info = pci_get_drvdata(dev);
struct nvidia_par *par = info->par;
if (mesg.event == PM_EVENT_PRETHAW)
mesg.event = PM_EVENT_FREEZE;
console_lock();
par->pm_state = mesg.event;
if (mesg.event & PM_EVENT_SLEEP) {
fb_set_suspend(info, 1);
nvidiafb_blank(FB_BLANK_POWERDOWN, info);
nvidia_write_regs(par, &par->SavedReg);
pci_save_state(dev);
pci_disable_device(dev);
pci_set_power_state(dev, pci_choose_state(dev, mesg));
}
dev->dev.power.power_state = mesg;
console_unlock();
return 0;
}
static int nvidiafb_resume(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
struct nvidia_par *par = info->par;
console_lock();
pci_set_power_state(dev, PCI_D0);
if (par->pm_state != PM_EVENT_FREEZE) {
pci_restore_state(dev);
if (pci_enable_device(dev))
goto fail;
pci_set_master(dev);
}
par->pm_state = PM_EVENT_ON;
nvidiafb_set_par(info);
fb_set_suspend (info, 0);
nvidiafb_blank(FB_BLANK_UNBLANK, info);
fail:
console_unlock();
return 0;
}
#else
#define nvidiafb_suspend NULL
#define nvidiafb_resume NULL
#endif
static int nvidia_set_fbinfo(struct fb_info *info)
{
struct fb_monspecs *specs = &info->monspecs;
struct fb_videomode modedb;
struct nvidia_par *par = info->par;
int lpitch;
NVTRACE_ENTER();
info->flags = FBINFO_DEFAULT
| FBINFO_HWACCEL_IMAGEBLIT
| FBINFO_HWACCEL_FILLRECT
| FBINFO_HWACCEL_COPYAREA
| FBINFO_HWACCEL_YPAN;
fb_videomode_to_modelist(info->monspecs.modedb,
info->monspecs.modedb_len, &info->modelist);
fb_var_to_videomode(&modedb, &nvidiafb_default_var);
switch (bpp) {
case 0 ... 8:
bpp = 8;
break;
case 9 ... 16:
bpp = 16;
break;
default:
bpp = 32;
break;
}
if (specs->modedb != NULL) {
const struct fb_videomode *mode;
mode = fb_find_best_display(specs, &info->modelist);
fb_videomode_to_var(&nvidiafb_default_var, mode);
nvidiafb_default_var.bits_per_pixel = bpp;
} else if (par->fpWidth && par->fpHeight) {
char buf[16];
memset(buf, 0, 16);
snprintf(buf, 15, "%dx%dMR", par->fpWidth, par->fpHeight);
fb_find_mode(&nvidiafb_default_var, info, buf, specs->modedb,
specs->modedb_len, &modedb, bpp);
}
if (mode_option)
fb_find_mode(&nvidiafb_default_var, info, mode_option,
specs->modedb, specs->modedb_len, &modedb, bpp);
info->var = nvidiafb_default_var;
info->fix.visual = (info->var.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
info->pseudo_palette = par->pseudo_palette;
fb_alloc_cmap(&info->cmap, 256, 0);
fb_destroy_modedb(info->monspecs.modedb);
info->monspecs.modedb = NULL;
/* maximize virtual vertical length */
lpitch = info->var.xres_virtual *
((info->var.bits_per_pixel + 7) >> 3);
info->var.yres_virtual = info->screen_size / lpitch;
info->pixmap.scan_align = 4;
info->pixmap.buf_align = 4;
info->pixmap.access_align = 32;
info->pixmap.size = 8 * 1024;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
if (!hwcur)
info->fbops->fb_cursor = NULL;
info->var.accel_flags = (!noaccel);
switch (par->Architecture) {
case NV_ARCH_04:
info->fix.accel = FB_ACCEL_NV4;
break;
case NV_ARCH_10:
info->fix.accel = FB_ACCEL_NV_10;
break;
case NV_ARCH_20:
info->fix.accel = FB_ACCEL_NV_20;
break;
case NV_ARCH_30:
info->fix.accel = FB_ACCEL_NV_30;
break;
case NV_ARCH_40:
info->fix.accel = FB_ACCEL_NV_40;
break;
}
NVTRACE_LEAVE();
return nvidiafb_check_var(&info->var, info);
}
static u32 nvidia_get_chipset(struct fb_info *info)
{
struct nvidia_par *par = info->par;
u32 id = (par->pci_dev->vendor << 16) | par->pci_dev->device;
printk(KERN_INFO PFX "Device ID: %x \n", id);
if ((id & 0xfff0) == 0x00f0 ||
(id & 0xfff0) == 0x02e0) {
/* pci-e */
id = NV_RD32(par->REGS, 0x1800);
if ((id & 0x0000ffff) == 0x000010DE)
id = 0x10DE0000 | (id >> 16);
else if ((id & 0xffff0000) == 0xDE100000) /* wrong endian */
id = 0x10DE0000 | ((id << 8) & 0x0000ff00) |
((id >> 8) & 0x000000ff);
printk(KERN_INFO PFX "Subsystem ID: %x \n", id);
}
return id;
}
static u32 nvidia_get_arch(struct fb_info *info)
{
struct nvidia_par *par = info->par;
u32 arch = 0;
switch (par->Chipset & 0x0ff0) {
case 0x0100: /* GeForce 256 */
case 0x0110: /* GeForce2 MX */
case 0x0150: /* GeForce2 */
case 0x0170: /* GeForce4 MX */
case 0x0180: /* GeForce4 MX (8x AGP) */
case 0x01A0: /* nForce */
case 0x01F0: /* nForce2 */
arch = NV_ARCH_10;
break;
case 0x0200: /* GeForce3 */
case 0x0250: /* GeForce4 Ti */
case 0x0280: /* GeForce4 Ti (8x AGP) */
arch = NV_ARCH_20;
break;
case 0x0300: /* GeForceFX 5800 */
case 0x0310: /* GeForceFX 5600 */
case 0x0320: /* GeForceFX 5200 */
case 0x0330: /* GeForceFX 5900 */
case 0x0340: /* GeForceFX 5700 */
arch = NV_ARCH_30;
break;
case 0x0040: /* GeForce 6800 */
case 0x00C0: /* GeForce 6800 */
case 0x0120: /* GeForce 6800 */
case 0x0140: /* GeForce 6600 */
case 0x0160: /* GeForce 6200 */
case 0x01D0: /* GeForce 7200, 7300, 7400 */
case 0x0090: /* GeForce 7800 */
case 0x0210: /* GeForce 6800 */
case 0x0220: /* GeForce 6200 */
case 0x0240: /* GeForce 6100 */
case 0x0290: /* GeForce 7900 */
case 0x0390: /* GeForce 7600 */
case 0x03D0:
arch = NV_ARCH_40;
break;
case 0x0020: /* TNT, TNT2 */
arch = NV_ARCH_04;
break;
default: /* unknown architecture */
break;
}
return arch;
}
static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
{
struct nvidia_par *par;
struct fb_info *info;
unsigned short cmd;
NVTRACE_ENTER();
assert(pd != NULL);
info = framebuffer_alloc(sizeof(struct nvidia_par), &pd->dev);
if (!info)
goto err_out;
par = info->par;
par->pci_dev = pd;
info->pixmap.addr = kzalloc(8 * 1024, GFP_KERNEL);
if (info->pixmap.addr == NULL)
goto err_out_kfree;
if (pci_enable_device(pd)) {
printk(KERN_ERR PFX "cannot enable PCI device\n");
goto err_out_enable;
}
if (pci_request_regions(pd, "nvidiafb")) {
printk(KERN_ERR PFX "cannot request PCI regions\n");
goto err_out_enable;
}
par->FlatPanel = flatpanel;
if (flatpanel == 1)
printk(KERN_INFO PFX "flatpanel support enabled\n");
par->FPDither = fpdither;
par->CRTCnumber = forceCRTC;
par->FpScale = (!noscale);
par->paneltweak = paneltweak;
par->reverse_i2c = reverse_i2c;
/* enable IO and mem if not already done */
pci_read_config_word(pd, PCI_COMMAND, &cmd);
cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
pci_write_config_word(pd, PCI_COMMAND, cmd);
nvidiafb_fix.mmio_start = pci_resource_start(pd, 0);
nvidiafb_fix.smem_start = pci_resource_start(pd, 1);
nvidiafb_fix.mmio_len = pci_resource_len(pd, 0);
par->REGS = ioremap(nvidiafb_fix.mmio_start, nvidiafb_fix.mmio_len);
if (!par->REGS) {
printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
goto err_out_free_base0;
}
par->Chipset = nvidia_get_chipset(info);
par->Architecture = nvidia_get_arch(info);
if (par->Architecture == 0) {
printk(KERN_ERR PFX "unknown NV_ARCH\n");
goto err_out_arch;
}
sprintf(nvidiafb_fix.id, "NV%x", (pd->device & 0x0ff0) >> 4);
if (NVCommonSetup(info))
goto err_out_arch;
par->FbAddress = nvidiafb_fix.smem_start;
par->FbMapSize = par->RamAmountKBytes * 1024;
if (vram && vram * 1024 * 1024 < par->FbMapSize)
par->FbMapSize = vram * 1024 * 1024;
/* Limit amount of vram to 64 MB */
if (par->FbMapSize > 64 * 1024 * 1024)
par->FbMapSize = 64 * 1024 * 1024;
if(par->Architecture >= NV_ARCH_40)
par->FbUsableSize = par->FbMapSize - (560 * 1024);
else
par->FbUsableSize = par->FbMapSize - (128 * 1024);
par->ScratchBufferSize = (par->Architecture < NV_ARCH_10) ? 8 * 1024 :
16 * 1024;
par->ScratchBufferStart = par->FbUsableSize - par->ScratchBufferSize;
par->CursorStart = par->FbUsableSize + (32 * 1024);
info->screen_base = ioremap(nvidiafb_fix.smem_start, par->FbMapSize);
info->screen_size = par->FbUsableSize;
nvidiafb_fix.smem_len = par->RamAmountKBytes * 1024;
if (!info->screen_base) {
printk(KERN_ERR PFX "cannot ioremap FB base\n");
goto err_out_free_base1;
}
par->FbStart = info->screen_base;
#ifdef CONFIG_MTRR
if (!nomtrr) {
par->mtrr.vram = mtrr_add(nvidiafb_fix.smem_start,
par->RamAmountKBytes * 1024,
MTRR_TYPE_WRCOMB, 1);
if (par->mtrr.vram < 0) {
printk(KERN_ERR PFX "unable to setup MTRR\n");
} else {
par->mtrr.vram_valid = 1;
/* let there be speed */
printk(KERN_INFO PFX "MTRR set to ON\n");
}
}
#endif /* CONFIG_MTRR */
info->fbops = &nvidia_fb_ops;
info->fix = nvidiafb_fix;
if (nvidia_set_fbinfo(info) < 0) {
printk(KERN_ERR PFX "error setting initial video mode\n");
goto err_out_iounmap_fb;
}
nvidia_save_vga(par, &par->SavedReg);
pci_set_drvdata(pd, info);
if (backlight)
nvidia_bl_init(par);
if (register_framebuffer(info) < 0) {
printk(KERN_ERR PFX "error registering nVidia framebuffer\n");
goto err_out_iounmap_fb;
}
printk(KERN_INFO PFX
"PCI nVidia %s framebuffer (%dMB @ 0x%lX)\n",
info->fix.id,
par->FbMapSize / (1024 * 1024), info->fix.smem_start);
NVTRACE_LEAVE();
return 0;
err_out_iounmap_fb:
iounmap(info->screen_base);
err_out_free_base1:
fb_destroy_modedb(info->monspecs.modedb);
nvidia_delete_i2c_busses(par);
err_out_arch:
iounmap(par->REGS);
err_out_free_base0:
pci_release_regions(pd);
err_out_enable:
kfree(info->pixmap.addr);
err_out_kfree:
framebuffer_release(info);
err_out:
return -ENODEV;
}
static void nvidiafb_remove(struct pci_dev *pd)
{
struct fb_info *info = pci_get_drvdata(pd);
struct nvidia_par *par = info->par;
NVTRACE_ENTER();
unregister_framebuffer(info);
nvidia_bl_exit(par);
#ifdef CONFIG_MTRR
if (par->mtrr.vram_valid)
mtrr_del(par->mtrr.vram, info->fix.smem_start,
info->fix.smem_len);
#endif /* CONFIG_MTRR */
iounmap(info->screen_base);
fb_destroy_modedb(info->monspecs.modedb);
nvidia_delete_i2c_busses(par);
iounmap(par->REGS);
pci_release_regions(pd);
kfree(info->pixmap.addr);
framebuffer_release(info);
pci_set_drvdata(pd, NULL);
NVTRACE_LEAVE();
}
/* ------------------------------------------------------------------------- *
*
* initialization
*
* ------------------------------------------------------------------------- */
#ifndef MODULE
static int nvidiafb_setup(char *options)
{
char *this_opt;
NVTRACE_ENTER();
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "forceCRTC", 9)) {
char *p;
p = this_opt + 9;
if (!*p || !*(++p))
continue;
forceCRTC = *p - '0';
if (forceCRTC < 0 || forceCRTC > 1)
forceCRTC = -1;
} else if (!strncmp(this_opt, "flatpanel", 9)) {
flatpanel = 1;
} else if (!strncmp(this_opt, "hwcur", 5)) {
hwcur = 1;
} else if (!strncmp(this_opt, "noaccel", 6)) {
noaccel = 1;
} else if (!strncmp(this_opt, "noscale", 7)) {
noscale = 1;
} else if (!strncmp(this_opt, "reverse_i2c", 11)) {
reverse_i2c = 1;
} else if (!strncmp(this_opt, "paneltweak:", 11)) {
paneltweak = simple_strtoul(this_opt+11, NULL, 0);
} else if (!strncmp(this_opt, "vram:", 5)) {
vram = simple_strtoul(this_opt+5, NULL, 0);
} else if (!strncmp(this_opt, "backlight:", 10)) {
backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_MTRR
} else if (!strncmp(this_opt, "nomtrr", 6)) {
nomtrr = true;
#endif
} else if (!strncmp(this_opt, "fpdither:", 9)) {
fpdither = simple_strtol(this_opt+9, NULL, 0);
} else if (!strncmp(this_opt, "bpp:", 4)) {
bpp = simple_strtoul(this_opt+4, NULL, 0);
} else
mode_option = this_opt;
}
NVTRACE_LEAVE();
return 0;
}
#endif /* !MODULE */
static struct pci_driver nvidiafb_driver = {
.name = "nvidiafb",
.id_table = nvidiafb_pci_tbl,
.probe = nvidiafb_probe,
.suspend = nvidiafb_suspend,
.resume = nvidiafb_resume,
.remove = nvidiafb_remove,
};
/* ------------------------------------------------------------------------- *
*
* modularization
*
* ------------------------------------------------------------------------- */
static int nvidiafb_init(void)
{
#ifndef MODULE
char *option = NULL;
if (fb_get_options("nvidiafb", &option))
return -ENODEV;
nvidiafb_setup(option);
#endif
return pci_register_driver(&nvidiafb_driver);
}
module_init(nvidiafb_init);
static void __exit nvidiafb_exit(void)
{
pci_unregister_driver(&nvidiafb_driver);
}
module_exit(nvidiafb_exit);
module_param(flatpanel, int, 0);
MODULE_PARM_DESC(flatpanel,
"Enables experimental flat panel support for some chipsets. "
"(0=disabled, 1=enabled, -1=autodetect) (default=-1)");
module_param(fpdither, int, 0);
MODULE_PARM_DESC(fpdither,
"Enables dithering of flat panel for 6 bits panels. "
"(0=disabled, 1=enabled, -1=autodetect) (default=-1)");
module_param(hwcur, int, 0);
MODULE_PARM_DESC(hwcur,
"Enables hardware cursor implementation. (0 or 1=enabled) "
"(default=0)");
module_param(noaccel, int, 0);
MODULE_PARM_DESC(noaccel,
"Disables hardware acceleration. (0 or 1=disable) "
"(default=0)");
module_param(noscale, int, 0);
MODULE_PARM_DESC(noscale,
"Disables screen scaleing. (0 or 1=disable) "
"(default=0, do scaling)");
module_param(paneltweak, int, 0);
MODULE_PARM_DESC(paneltweak,
"Tweak display settings for flatpanels. "
"(default=0, no tweaks)");
module_param(forceCRTC, int, 0);
MODULE_PARM_DESC(forceCRTC,
"Forces usage of a particular CRTC in case autodetection "
"fails. (0 or 1) (default=autodetect)");
module_param(vram, int, 0);
MODULE_PARM_DESC(vram,
"amount of framebuffer memory to remap in MiB"
"(default=0 - remap entire memory)");
module_param(mode_option, charp, 0);
MODULE_PARM_DESC(mode_option, "Specify initial video mode");
module_param(bpp, int, 0);
MODULE_PARM_DESC(bpp, "pixel width in bits"
"(default=8)");
module_param(reverse_i2c, int, 0);
MODULE_PARM_DESC(reverse_i2c, "reverse port assignment of the i2c bus");
#ifdef CONFIG_MTRR
module_param(nomtrr, bool, false);
MODULE_PARM_DESC(nomtrr, "Disables MTRR support (0 or 1=disabled) "
"(default=0)");
#endif
MODULE_AUTHOR("Antonino Daplas");
MODULE_DESCRIPTION("Framebuffer driver for nVidia graphics chipset");
MODULE_LICENSE("GPL");
| gpl-2.0 |
TEAM-RAZOR-DEVICES/kernel_lge_g3 | sound/soc/blackfin/bfin-eval-adav80x.c | 5032 | 4078 | /*
* Machine driver for EVAL-ADAV801 and EVAL-ADAV803 on Analog Devices bfin
* evaluation boards.
*
* Copyright 2011 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
*
* Licensed under the GPL-2 or later.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include "../codecs/adav80x.h"
static const struct snd_soc_dapm_widget bfin_eval_adav80x_dapm_widgets[] = {
SND_SOC_DAPM_LINE("Line Out", NULL),
SND_SOC_DAPM_LINE("Line In", NULL),
};
static const struct snd_soc_dapm_route bfin_eval_adav80x_dapm_routes[] = {
{ "Line Out", NULL, "VOUTL" },
{ "Line Out", NULL, "VOUTR" },
{ "VINL", NULL, "Line In" },
{ "VINR", NULL, "Line In" },
};
static int bfin_eval_adav80x_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
int ret;
ret = snd_soc_dai_set_pll(codec_dai, ADAV80X_PLL1, ADAV80X_PLL_SRC_XTAL,
27000000, params_rate(params) * 256);
if (ret)
return ret;
ret = snd_soc_dai_set_sysclk(codec_dai, ADAV80X_CLK_PLL1,
params_rate(params) * 256, SND_SOC_CLOCK_IN);
return ret;
}
static int bfin_eval_adav80x_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *codec_dai = rtd->codec_dai;
snd_soc_dai_set_sysclk(codec_dai, ADAV80X_CLK_SYSCLK1, 0,
SND_SOC_CLOCK_OUT);
snd_soc_dai_set_sysclk(codec_dai, ADAV80X_CLK_SYSCLK2, 0,
SND_SOC_CLOCK_OUT);
snd_soc_dai_set_sysclk(codec_dai, ADAV80X_CLK_SYSCLK3, 0,
SND_SOC_CLOCK_OUT);
snd_soc_dai_set_sysclk(codec_dai, ADAV80X_CLK_XTAL, 2700000, 0);
return 0;
}
static struct snd_soc_ops bfin_eval_adav80x_ops = {
.hw_params = bfin_eval_adav80x_hw_params,
};
static struct snd_soc_dai_link bfin_eval_adav80x_dais[] = {
{
.name = "adav80x",
.stream_name = "ADAV80x HiFi",
.cpu_dai_name = "bfin-i2s.0",
.codec_dai_name = "adav80x-hifi",
.platform_name = "bfin-i2s-pcm-audio",
.init = bfin_eval_adav80x_codec_init,
.ops = &bfin_eval_adav80x_ops,
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM,
},
};
static struct snd_soc_card bfin_eval_adav80x = {
.name = "bfin-eval-adav80x",
.owner = THIS_MODULE,
.dai_link = bfin_eval_adav80x_dais,
.num_links = ARRAY_SIZE(bfin_eval_adav80x_dais),
.dapm_widgets = bfin_eval_adav80x_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(bfin_eval_adav80x_dapm_widgets),
.dapm_routes = bfin_eval_adav80x_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(bfin_eval_adav80x_dapm_routes),
};
enum bfin_eval_adav80x_type {
BFIN_EVAL_ADAV801,
BFIN_EVAL_ADAV803,
};
static int bfin_eval_adav80x_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &bfin_eval_adav80x;
const char *codec_name;
switch (platform_get_device_id(pdev)->driver_data) {
case BFIN_EVAL_ADAV801:
codec_name = "spi0.1";
break;
case BFIN_EVAL_ADAV803:
codec_name = "adav803.0-0034";
break;
default:
return -EINVAL;
}
bfin_eval_adav80x_dais[0].codec_name = codec_name;
card->dev = &pdev->dev;
return snd_soc_register_card(&bfin_eval_adav80x);
}
static int __devexit bfin_eval_adav80x_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
snd_soc_unregister_card(card);
return 0;
}
static const struct platform_device_id bfin_eval_adav80x_ids[] = {
{ "bfin-eval-adav801", BFIN_EVAL_ADAV801 },
{ "bfin-eval-adav803", BFIN_EVAL_ADAV803 },
{ },
};
MODULE_DEVICE_TABLE(platform, bfin_eval_adav80x_ids);
static struct platform_driver bfin_eval_adav80x_driver = {
.driver = {
.name = "bfin-eval-adav80x",
.owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = bfin_eval_adav80x_probe,
.remove = __devexit_p(bfin_eval_adav80x_remove),
.id_table = bfin_eval_adav80x_ids,
};
module_platform_driver(bfin_eval_adav80x_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("ALSA SoC bfin adav80x driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
omega-roms/N9005_Omega_Kernel_LL | drivers/leds/leds-hp6xx.c | 7848 | 2283 | /*
* LED Triggers Core
* For the HP Jornada 620/660/680/690 handhelds
*
* Copyright 2008 Kristoffer Ericson <kristoffer.ericson@gmail.com>
* this driver is based on leds-spitz.c by Richard Purdie.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <asm/hd64461.h>
#include <mach/hp6xx.h>
static void hp6xxled_green_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
u8 v8;
v8 = inb(PKDR);
if (value)
outb(v8 & (~PKDR_LED_GREEN), PKDR);
else
outb(v8 | PKDR_LED_GREEN, PKDR);
}
static void hp6xxled_red_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
u16 v16;
v16 = inw(HD64461_GPBDR);
if (value)
outw(v16 & (~HD64461_GPBDR_LED_RED), HD64461_GPBDR);
else
outw(v16 | HD64461_GPBDR_LED_RED, HD64461_GPBDR);
}
static struct led_classdev hp6xx_red_led = {
.name = "hp6xx:red",
.default_trigger = "hp6xx-charge",
.brightness_set = hp6xxled_red_set,
.flags = LED_CORE_SUSPENDRESUME,
};
static struct led_classdev hp6xx_green_led = {
.name = "hp6xx:green",
.default_trigger = "ide-disk",
.brightness_set = hp6xxled_green_set,
.flags = LED_CORE_SUSPENDRESUME,
};
static int hp6xxled_probe(struct platform_device *pdev)
{
int ret;
ret = led_classdev_register(&pdev->dev, &hp6xx_red_led);
if (ret < 0)
return ret;
ret = led_classdev_register(&pdev->dev, &hp6xx_green_led);
if (ret < 0)
led_classdev_unregister(&hp6xx_red_led);
return ret;
}
static int hp6xxled_remove(struct platform_device *pdev)
{
led_classdev_unregister(&hp6xx_red_led);
led_classdev_unregister(&hp6xx_green_led);
return 0;
}
static struct platform_driver hp6xxled_driver = {
.probe = hp6xxled_probe,
.remove = hp6xxled_remove,
.driver = {
.name = "hp6xx-led",
.owner = THIS_MODULE,
},
};
module_platform_driver(hp6xxled_driver);
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 6xx LED driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:hp6xx-led");
| gpl-2.0 |
vic3t3chn0/Samsung_Wave_Kernel_SD_NAND | drivers/net/wan/hdlc_raw.c | 13480 | 2617 | /*
* Generic HDLC support routines for Linux
* HDLC support
*
* Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <linux/hdlc.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pkt_sched.h>
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
static int raw_ioctl(struct net_device *dev, struct ifreq *ifr);
static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev)
{
return cpu_to_be16(ETH_P_IP);
}
static struct hdlc_proto proto = {
.type_trans = raw_type_trans,
.ioctl = raw_ioctl,
.module = THIS_MODULE,
};
static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
{
raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc;
const size_t size = sizeof(raw_hdlc_proto);
raw_hdlc_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
int result;
switch (ifr->ifr_settings.type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
ifr->ifr_settings.type = IF_PROTO_HDLC;
if (ifr->ifr_settings.size < size) {
ifr->ifr_settings.size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(raw_s, hdlc->state, size))
return -EFAULT;
return 0;
case IF_PROTO_HDLC:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (dev->flags & IFF_UP)
return -EBUSY;
if (copy_from_user(&new_settings, raw_s, size))
return -EFAULT;
if (new_settings.encoding == ENCODING_DEFAULT)
new_settings.encoding = ENCODING_NRZ;
if (new_settings.parity == PARITY_DEFAULT)
new_settings.parity = PARITY_CRC16_PR1_CCITT;
result = hdlc->attach(dev, new_settings.encoding,
new_settings.parity);
if (result)
return result;
result = attach_hdlc_protocol(dev, &proto,
sizeof(raw_hdlc_proto));
if (result)
return result;
memcpy(hdlc->state, &new_settings, size);
dev->type = ARPHRD_RAWHDLC;
netif_dormant_off(dev);
return 0;
}
return -EINVAL;
}
static int __init mod_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
static void __exit mod_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(mod_init);
module_exit(mod_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Raw HDLC protocol support for generic HDLC");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
ROM-Jeremy/android_kernel_lge_msm8226-old | drivers/net/wan/hdlc_raw.c | 13480 | 2617 | /*
* Generic HDLC support routines for Linux
* HDLC support
*
* Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <linux/hdlc.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pkt_sched.h>
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
static int raw_ioctl(struct net_device *dev, struct ifreq *ifr);
static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev)
{
return cpu_to_be16(ETH_P_IP);
}
static struct hdlc_proto proto = {
.type_trans = raw_type_trans,
.ioctl = raw_ioctl,
.module = THIS_MODULE,
};
static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
{
raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc;
const size_t size = sizeof(raw_hdlc_proto);
raw_hdlc_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
int result;
switch (ifr->ifr_settings.type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
ifr->ifr_settings.type = IF_PROTO_HDLC;
if (ifr->ifr_settings.size < size) {
ifr->ifr_settings.size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(raw_s, hdlc->state, size))
return -EFAULT;
return 0;
case IF_PROTO_HDLC:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (dev->flags & IFF_UP)
return -EBUSY;
if (copy_from_user(&new_settings, raw_s, size))
return -EFAULT;
if (new_settings.encoding == ENCODING_DEFAULT)
new_settings.encoding = ENCODING_NRZ;
if (new_settings.parity == PARITY_DEFAULT)
new_settings.parity = PARITY_CRC16_PR1_CCITT;
result = hdlc->attach(dev, new_settings.encoding,
new_settings.parity);
if (result)
return result;
result = attach_hdlc_protocol(dev, &proto,
sizeof(raw_hdlc_proto));
if (result)
return result;
memcpy(hdlc->state, &new_settings, size);
dev->type = ARPHRD_RAWHDLC;
netif_dormant_off(dev);
return 0;
}
return -EINVAL;
}
static int __init mod_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
static void __exit mod_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(mod_init);
module_exit(mod_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Raw HDLC protocol support for generic HDLC");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
CyanogenMod/android_kernel_huawei_angler | arch/powerpc/math-emu/fdiv.c | 13736 | 1157 | #include <linux/types.h>
#include <linux/errno.h>
#include <asm/uaccess.h>
#include <asm/sfp-machine.h>
#include <math-emu/soft-fp.h>
#include <math-emu/double.h>
int
fdiv(void *frD, void *frA, void *frB)
{
FP_DECL_D(A);
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
#endif
FP_UNPACK_DP(A, frA);
FP_UNPACK_DP(B, frB);
#ifdef DEBUG
printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c);
printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c);
#endif
if (A_c == FP_CLS_ZERO && B_c == FP_CLS_ZERO) {
FP_SET_EXCEPTION(EFLAG_VXZDZ);
#ifdef DEBUG
printk("%s: FPSCR_VXZDZ raised\n", __func__);
#endif
}
if (A_c == FP_CLS_INF && B_c == FP_CLS_INF) {
FP_SET_EXCEPTION(EFLAG_VXIDI);
#ifdef DEBUG
printk("%s: FPSCR_VXIDI raised\n", __func__);
#endif
}
if (B_c == FP_CLS_ZERO && A_c != FP_CLS_ZERO) {
FP_SET_EXCEPTION(EFLAG_DIVZERO);
if (__FPU_TRAP_P(EFLAG_DIVZERO))
return FP_CUR_EXCEPTIONS;
}
FP_DIV_D(R, A, B);
#ifdef DEBUG
printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c);
#endif
__FP_PACK_D(frD, R);
return FP_CUR_EXCEPTIONS;
}
| gpl-2.0 |
rjmccabe3701/LinuxViewPageTables | arch/arm/mach-orion5x/db88f5281-setup.c | 169 | 9972 | /*
* arch/arm/mach-orion5x/db88f5281-setup.c
*
* Marvell Orion-2 Development Board Setup
*
* Maintainer: Tzachi Perelstein <tzachi@marvell.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/nand.h>
#include <linux/timer.h>
#include <linux/mv643xx_eth.h>
#include <linux/i2c.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
#include <mach/orion5x.h>
#include <linux/platform_data/mtd-orion_nand.h>
#include "common.h"
#include "mpp.h"
/*****************************************************************************
* DB-88F5281 on board devices
****************************************************************************/
/*
* 512K NOR flash Device bus boot chip select
*/
#define DB88F5281_NOR_BOOT_BASE 0xf4000000
#define DB88F5281_NOR_BOOT_SIZE SZ_512K
/*
* 7-Segment on Device bus chip select 0
*/
#define DB88F5281_7SEG_BASE 0xfa000000
#define DB88F5281_7SEG_SIZE SZ_1K
/*
* 32M NOR flash on Device bus chip select 1
*/
#define DB88F5281_NOR_BASE 0xfc000000
#define DB88F5281_NOR_SIZE SZ_32M
/*
* 32M NAND flash on Device bus chip select 2
*/
#define DB88F5281_NAND_BASE 0xfa800000
#define DB88F5281_NAND_SIZE SZ_1K
/*
* PCI
*/
#define DB88F5281_PCI_SLOT0_OFFS 7
#define DB88F5281_PCI_SLOT0_IRQ_PIN 12
#define DB88F5281_PCI_SLOT1_SLOT2_IRQ_PIN 13
/*****************************************************************************
* 512M NOR Flash on Device bus Boot CS
****************************************************************************/
static struct physmap_flash_data db88f5281_boot_flash_data = {
.width = 1, /* 8 bit bus width */
};
static struct resource db88f5281_boot_flash_resource = {
.flags = IORESOURCE_MEM,
.start = DB88F5281_NOR_BOOT_BASE,
.end = DB88F5281_NOR_BOOT_BASE + DB88F5281_NOR_BOOT_SIZE - 1,
};
static struct platform_device db88f5281_boot_flash = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &db88f5281_boot_flash_data,
},
.num_resources = 1,
.resource = &db88f5281_boot_flash_resource,
};
/*****************************************************************************
* 32M NOR Flash on Device bus CS1
****************************************************************************/
static struct physmap_flash_data db88f5281_nor_flash_data = {
.width = 4, /* 32 bit bus width */
};
static struct resource db88f5281_nor_flash_resource = {
.flags = IORESOURCE_MEM,
.start = DB88F5281_NOR_BASE,
.end = DB88F5281_NOR_BASE + DB88F5281_NOR_SIZE - 1,
};
static struct platform_device db88f5281_nor_flash = {
.name = "physmap-flash",
.id = 1,
.dev = {
.platform_data = &db88f5281_nor_flash_data,
},
.num_resources = 1,
.resource = &db88f5281_nor_flash_resource,
};
/*****************************************************************************
* 32M NAND Flash on Device bus CS2
****************************************************************************/
static struct mtd_partition db88f5281_nand_parts[] = {
{
.name = "kernel",
.offset = 0,
.size = SZ_2M,
}, {
.name = "root",
.offset = SZ_2M,
.size = (SZ_16M - SZ_2M),
}, {
.name = "user",
.offset = SZ_16M,
.size = SZ_8M,
}, {
.name = "recovery",
.offset = (SZ_16M + SZ_8M),
.size = SZ_8M,
},
};
static struct resource db88f5281_nand_resource = {
.flags = IORESOURCE_MEM,
.start = DB88F5281_NAND_BASE,
.end = DB88F5281_NAND_BASE + DB88F5281_NAND_SIZE - 1,
};
static struct orion_nand_data db88f5281_nand_data = {
.parts = db88f5281_nand_parts,
.nr_parts = ARRAY_SIZE(db88f5281_nand_parts),
.cle = 0,
.ale = 1,
.width = 8,
};
static struct platform_device db88f5281_nand_flash = {
.name = "orion_nand",
.id = -1,
.dev = {
.platform_data = &db88f5281_nand_data,
},
.resource = &db88f5281_nand_resource,
.num_resources = 1,
};
/*****************************************************************************
* 7-Segment on Device bus CS0
* Dummy counter every 2 sec
****************************************************************************/
static void __iomem *db88f5281_7seg;
static struct timer_list db88f5281_timer;
static void db88f5281_7seg_event(unsigned long data)
{
static int count = 0;
writel(0, db88f5281_7seg + (count << 4));
count = (count + 1) & 7;
mod_timer(&db88f5281_timer, jiffies + 2 * HZ);
}
static int __init db88f5281_7seg_init(void)
{
if (machine_is_db88f5281()) {
db88f5281_7seg = ioremap(DB88F5281_7SEG_BASE,
DB88F5281_7SEG_SIZE);
if (!db88f5281_7seg) {
printk(KERN_ERR "Failed to ioremap db88f5281_7seg\n");
return -EIO;
}
setup_timer(&db88f5281_timer, db88f5281_7seg_event, 0);
mod_timer(&db88f5281_timer, jiffies + 2 * HZ);
}
return 0;
}
__initcall(db88f5281_7seg_init);
/*****************************************************************************
* PCI
****************************************************************************/
void __init db88f5281_pci_preinit(void)
{
int pin;
/*
* Configure PCI GPIO IRQ pins
*/
pin = DB88F5281_PCI_SLOT0_IRQ_PIN;
if (gpio_request(pin, "PCI Int1") == 0) {
if (gpio_direction_input(pin) == 0) {
irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "db88f5281_pci_preinit failed to "
"set_irq_type pin %d\n", pin);
gpio_free(pin);
}
} else {
printk(KERN_ERR "db88f5281_pci_preinit failed to gpio_request %d\n", pin);
}
pin = DB88F5281_PCI_SLOT1_SLOT2_IRQ_PIN;
if (gpio_request(pin, "PCI Int2") == 0) {
if (gpio_direction_input(pin) == 0) {
irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "db88f5281_pci_preinit failed "
"to set_irq_type pin %d\n", pin);
gpio_free(pin);
}
} else {
printk(KERN_ERR "db88f5281_pci_preinit failed to gpio_request %d\n", pin);
}
}
static int __init db88f5281_pci_map_irq(const struct pci_dev *dev, u8 slot,
u8 pin)
{
int irq;
/*
* Check for devices with hard-wired IRQs.
*/
irq = orion5x_pci_map_irq(dev, slot, pin);
if (irq != -1)
return irq;
/*
* PCI IRQs are connected via GPIOs.
*/
switch (slot - DB88F5281_PCI_SLOT0_OFFS) {
case 0:
return gpio_to_irq(DB88F5281_PCI_SLOT0_IRQ_PIN);
case 1:
case 2:
return gpio_to_irq(DB88F5281_PCI_SLOT1_SLOT2_IRQ_PIN);
default:
return -1;
}
}
static struct hw_pci db88f5281_pci __initdata = {
.nr_controllers = 2,
.preinit = db88f5281_pci_preinit,
.setup = orion5x_pci_sys_setup,
.scan = orion5x_pci_sys_scan_bus,
.map_irq = db88f5281_pci_map_irq,
};
static int __init db88f5281_pci_init(void)
{
if (machine_is_db88f5281())
pci_common_init(&db88f5281_pci);
return 0;
}
subsys_initcall(db88f5281_pci_init);
/*****************************************************************************
* Ethernet
****************************************************************************/
static struct mv643xx_eth_platform_data db88f5281_eth_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
/*****************************************************************************
* RTC DS1339 on I2C bus
****************************************************************************/
static struct i2c_board_info __initdata db88f5281_i2c_rtc = {
I2C_BOARD_INFO("ds1339", 0x68),
};
/*****************************************************************************
* General Setup
****************************************************************************/
static unsigned int db88f5281_mpp_modes[] __initdata = {
MPP0_GPIO, /* USB Over Current */
MPP1_GPIO, /* USB Vbat input */
MPP2_PCI_ARB, /* PCI_REQn[2] */
MPP3_PCI_ARB, /* PCI_GNTn[2] */
MPP4_PCI_ARB, /* PCI_REQn[3] */
MPP5_PCI_ARB, /* PCI_GNTn[3] */
MPP6_GPIO, /* JP0, CON17.2 */
MPP7_GPIO, /* JP1, CON17.1 */
MPP8_GPIO, /* JP2, CON11.2 */
MPP9_GPIO, /* JP3, CON11.3 */
MPP10_GPIO, /* RTC int */
MPP11_GPIO, /* Baud Rate Generator */
MPP12_GPIO, /* PCI int 1 */
MPP13_GPIO, /* PCI int 2 */
MPP14_NAND, /* NAND_REn[2] */
MPP15_NAND, /* NAND_WEn[2] */
MPP16_UART, /* UART1_RX */
MPP17_UART, /* UART1_TX */
MPP18_UART, /* UART1_CTSn */
MPP19_UART, /* UART1_RTSn */
0,
};
static void __init db88f5281_init(void)
{
/*
* Basic Orion setup. Need to be called early.
*/
orion5x_init();
orion5x_mpp_conf(db88f5281_mpp_modes);
writel(0, MPP_DEV_CTRL); /* DEV_D[31:16] */
/*
* Configure peripherals.
*/
orion5x_ehci0_init();
orion5x_eth_init(&db88f5281_eth_data);
orion5x_i2c_init();
orion5x_uart0_init();
orion5x_uart1_init();
mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
ORION_MBUS_DEVBUS_BOOT_ATTR,
DB88F5281_NOR_BOOT_BASE,
DB88F5281_NOR_BOOT_SIZE);
platform_device_register(&db88f5281_boot_flash);
mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_TARGET(0),
ORION_MBUS_DEVBUS_ATTR(0),
DB88F5281_7SEG_BASE,
DB88F5281_7SEG_SIZE);
mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_TARGET(1),
ORION_MBUS_DEVBUS_ATTR(1),
DB88F5281_NOR_BASE,
DB88F5281_NOR_SIZE);
platform_device_register(&db88f5281_nor_flash);
mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_TARGET(2),
ORION_MBUS_DEVBUS_ATTR(2),
DB88F5281_NAND_BASE,
DB88F5281_NAND_SIZE);
platform_device_register(&db88f5281_nand_flash);
i2c_register_board_info(0, &db88f5281_i2c_rtc, 1);
}
MACHINE_START(DB88F5281, "Marvell Orion-2 Development Board")
/* Maintainer: Tzachi Perelstein <tzachi@marvell.com> */
.atag_offset = 0x100,
.init_machine = db88f5281_init,
.map_io = orion5x_map_io,
.init_early = orion5x_init_early,
.init_irq = orion5x_init_irq,
.init_time = orion5x_timer_init,
.restart = orion5x_restart,
MACHINE_END
| gpl-2.0 |
SlimRoms/kernel_oneplus_msm8974 | arch/arm/mach-pxa/cpufreq-pxa2xx.c | 169 | 14834 | /*
* linux/arch/arm/mach-pxa/cpufreq-pxa2xx.c
*
* Copyright (C) 2002,2003 Intrinsyc Software
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* History:
* 31-Jul-2002 : Initial version [FB]
* 29-Jan-2003 : added PXA255 support [FB]
* 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.)
*
* Note:
* This driver may change the memory bus clock rate, but will not do any
* platform specific access timing changes... for example if you have flash
* memory connected to CS0, you will need to register a platform specific
* notifier which will adjust the memory access strobes to maintain a
* minimum strobe width.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
#include <linux/io.h>
#include <mach/pxa2xx-regs.h>
#include <mach/smemc.h>
#ifdef DEBUG
static unsigned int freq_debug;
module_param(freq_debug, uint, 0);
MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0");
#else
#define freq_debug 0
#endif
static struct regulator *vcc_core;
static unsigned int pxa27x_maxfreq;
module_param(pxa27x_maxfreq, uint, 0);
MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz"
"(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)");
typedef struct {
unsigned int khz;
unsigned int membus;
unsigned int cccr;
unsigned int div2;
unsigned int cclkcfg;
int vmin;
int vmax;
} pxa_freqs_t;
/* Define the refresh period in mSec for the SDRAM and the number of rows */
#define SDRAM_TREF 64 /* standard 64ms SDRAM */
static unsigned int sdram_rows;
#define CCLKCFG_TURBO 0x1
#define CCLKCFG_FCS 0x2
#define CCLKCFG_HALFTURBO 0x4
#define CCLKCFG_FASTBUS 0x8
#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2)
#define MDREFR_DRI_MASK 0xFFF
#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
/*
* PXA255 definitions
*/
/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */
#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS
static pxa_freqs_t pxa255_run_freqs[] =
{
/* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
{ 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
{132700, 132700, 0x123, 1, CCLKCFG, -1, -1}, /* 133, 133, 66, 66 */
{199100, 99500, 0x141, 0, CCLKCFG, -1, -1}, /* 199, 199, 99, 99 */
{265400, 132700, 0x143, 1, CCLKCFG, -1, -1}, /* 265, 265, 133, 66 */
{331800, 165900, 0x145, 1, CCLKCFG, -1, -1}, /* 331, 331, 166, 83 */
{398100, 99500, 0x161, 0, CCLKCFG, -1, -1}, /* 398, 398, 196, 99 */
};
/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */
static pxa_freqs_t pxa255_turbo_freqs[] =
{
/* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
{ 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
{199100, 99500, 0x221, 0, CCLKCFG, -1, -1}, /* 99, 199, 50, 99 */
{298500, 99500, 0x321, 0, CCLKCFG, -1, -1}, /* 99, 287, 50, 99 */
{298600, 99500, 0x1c1, 0, CCLKCFG, -1, -1}, /* 199, 287, 99, 99 */
{398100, 99500, 0x241, 0, CCLKCFG, -1, -1}, /* 199, 398, 99, 99 */
};
#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs)
#define NUM_PXA25x_TURBO_FREQS ARRAY_SIZE(pxa255_turbo_freqs)
static struct cpufreq_frequency_table
pxa255_run_freq_table[NUM_PXA25x_RUN_FREQS+1];
static struct cpufreq_frequency_table
pxa255_turbo_freq_table[NUM_PXA25x_TURBO_FREQS+1];
static unsigned int pxa255_turbo_table;
module_param(pxa255_turbo_table, uint, 0);
MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)");
/*
* PXA270 definitions
*
* For the PXA27x:
* Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG.
*
* A = 0 => memory controller clock from table 3-7,
* A = 1 => memory controller clock = system bus clock
* Run mode frequency = 13 MHz * L
* Turbo mode frequency = 13 MHz * L * N
* System bus frequency = 13 MHz * L / (B + 1)
*
* In CCCR:
* A = 1
* L = 16 oscillator to run mode ratio
* 2N = 6 2 * (turbo mode to run mode ratio)
*
* In CCLKCFG:
* B = 1 Fast bus mode
* HT = 0 Half-Turbo mode
* T = 1 Turbo mode
*
* For now, just support some of the combinations in table 3-7 of
* PXA27x Processor Family Developer's Manual to simplify frequency
* change sequences.
*/
#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L)
#define CCLKCFG2(B, HT, T) \
(CCLKCFG_FCS | \
((B) ? CCLKCFG_FASTBUS : 0) | \
((HT) ? CCLKCFG_HALFTURBO : 0) | \
((T) ? CCLKCFG_TURBO : 0))
static pxa_freqs_t pxa27x_freqs[] = {
{104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 },
{156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
{208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
{312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 },
{416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 },
{520000, 208000, PXA27x_CCCR(1, 16, 5), 1, CCLKCFG2(1, 0, 1), 1450000, 1705000 },
{624000, 208000, PXA27x_CCCR(1, 16, 6), 1, CCLKCFG2(1, 0, 1), 1550000, 1705000 }
};
#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs)
static struct cpufreq_frequency_table
pxa27x_freq_table[NUM_PXA27x_FREQS+1];
extern unsigned get_clk_frequency_khz(int info);
#ifdef CONFIG_REGULATOR
static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
{
int ret = 0;
int vmin, vmax;
if (!cpu_is_pxa27x())
return 0;
vmin = pxa_freq->vmin;
vmax = pxa_freq->vmax;
if ((vmin == -1) || (vmax == -1))
return 0;
ret = regulator_set_voltage(vcc_core, vmin, vmax);
if (ret)
pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n",
vmin, vmax);
return ret;
}
static __init void pxa_cpufreq_init_voltages(void)
{
vcc_core = regulator_get(NULL, "vcc_core");
if (IS_ERR(vcc_core)) {
pr_info("cpufreq: Didn't find vcc_core regulator\n");
vcc_core = NULL;
} else {
pr_info("cpufreq: Found vcc_core regulator\n");
}
}
#else
static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
{
return 0;
}
static __init void pxa_cpufreq_init_voltages(void) { }
#endif
static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
pxa_freqs_t **pxa_freqs)
{
if (cpu_is_pxa25x()) {
if (!pxa255_turbo_table) {
*pxa_freqs = pxa255_run_freqs;
*freq_table = pxa255_run_freq_table;
} else {
*pxa_freqs = pxa255_turbo_freqs;
*freq_table = pxa255_turbo_freq_table;
}
}
if (cpu_is_pxa27x()) {
*pxa_freqs = pxa27x_freqs;
*freq_table = pxa27x_freq_table;
}
}
static void pxa27x_guess_max_freq(void)
{
if (!pxa27x_maxfreq) {
pxa27x_maxfreq = 416000;
printk(KERN_INFO "PXA CPU 27x max frequency not defined "
"(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
pxa27x_maxfreq);
} else {
pxa27x_maxfreq *= 1000;
}
}
static void init_sdram_rows(void)
{
uint32_t mdcnfg = __raw_readl(MDCNFG);
unsigned int drac2 = 0, drac0 = 0;
if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
drac2 = MDCNFG_DRAC2(mdcnfg);
if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
drac0 = MDCNFG_DRAC0(mdcnfg);
sdram_rows = 1 << (11 + max(drac0, drac2));
}
static u32 mdrefr_dri(unsigned int freq)
{
u32 interval = freq * SDRAM_TREF / sdram_rows;
return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
}
/* find a valid frequency point */
static int pxa_verify_policy(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pxa_freqs_table;
pxa_freqs_t *pxa_freqs;
int ret;
find_freq_tables(&pxa_freqs_table, &pxa_freqs);
ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table);
if (freq_debug)
pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n",
policy->min, policy->max);
return ret;
}
static unsigned int pxa_cpufreq_get(unsigned int cpu)
{
return get_clk_frequency_khz(0);
}
static int pxa_set_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct cpufreq_frequency_table *pxa_freqs_table;
pxa_freqs_t *pxa_freq_settings;
struct cpufreq_freqs freqs;
unsigned int idx;
unsigned long flags;
unsigned int new_freq_cpu, new_freq_mem;
unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
int ret = 0;
/* Get the current policy */
find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
/* Lookup the next frequency */
if (cpufreq_frequency_table_target(policy, pxa_freqs_table,
target_freq, relation, &idx)) {
return -EINVAL;
}
new_freq_cpu = pxa_freq_settings[idx].khz;
new_freq_mem = pxa_freq_settings[idx].membus;
freqs.old = policy->cur;
freqs.new = new_freq_cpu;
if (freq_debug)
pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
(new_freq_mem / 2000) : (new_freq_mem / 1000));
if (vcc_core && freqs.new > freqs.old)
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
if (ret)
return ret;
/*
* Tell everyone what we're about to do...
* you should add a notify client with any platform specific
* Vcc changing capability
*/
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* Calculate the next MDREFR. If we're slowing down the SDRAM clock
* we need to preset the smaller DRI before the change. If we're
* speeding up we need to set the larger DRI value after the change.
*/
preset_mdrefr = postset_mdrefr = __raw_readl(MDREFR);
if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(new_freq_mem)) {
preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
preset_mdrefr |= mdrefr_dri(new_freq_mem);
}
postset_mdrefr =
(postset_mdrefr & ~MDREFR_DRI_MASK) | mdrefr_dri(new_freq_mem);
/* If we're dividing the memory clock by two for the SDRAM clock, this
* must be set prior to the change. Clearing the divide must be done
* after the change.
*/
if (pxa_freq_settings[idx].div2) {
preset_mdrefr |= MDREFR_DB2_MASK;
postset_mdrefr |= MDREFR_DB2_MASK;
} else {
postset_mdrefr &= ~MDREFR_DB2_MASK;
}
local_irq_save(flags);
/* Set new the CCCR and prepare CCLKCFG */
CCCR = pxa_freq_settings[idx].cccr;
cclkcfg = pxa_freq_settings[idx].cclkcfg;
asm volatile(" \n\
ldr r4, [%1] /* load MDREFR */ \n\
b 2f \n\
.align 5 \n\
1: \n\
str %3, [%1] /* preset the MDREFR */ \n\
mcr p14, 0, %2, c6, c0, 0 /* set CCLKCFG[FCS] */ \n\
str %4, [%1] /* postset the MDREFR */ \n\
\n\
b 3f \n\
2: b 1b \n\
3: nop \n\
"
: "=&r" (unused)
: "r" (MDREFR), "r" (cclkcfg),
"r" (preset_mdrefr), "r" (postset_mdrefr)
: "r4", "r5");
local_irq_restore(flags);
/*
* Tell everyone what we've just done...
* you should add a notify client with any platform specific
* SDRAM refresh timer adjustments
*/
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
/*
* Even if voltage setting fails, we don't report it, as the frequency
* change succeeded. The voltage reduction is not a critical failure,
* only power savings will suffer from this.
*
* Note: if the voltage change fails, and a return value is returned, a
* bug is triggered (seems a deadlock). Should anybody find out where,
* the "return 0" should become a "return ret".
*/
if (vcc_core && freqs.new < freqs.old)
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
return 0;
}
static int pxa_cpufreq_init(struct cpufreq_policy *policy)
{
int i;
unsigned int freq;
struct cpufreq_frequency_table *pxa255_freq_table;
pxa_freqs_t *pxa255_freqs;
/* try to guess pxa27x cpu */
if (cpu_is_pxa27x())
pxa27x_guess_max_freq();
pxa_cpufreq_init_voltages();
init_sdram_rows();
/* set default policy and cpuinfo */
policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
policy->cur = get_clk_frequency_khz(0); /* current freq */
policy->min = policy->max = policy->cur;
/* Generate pxa25x the run cpufreq_frequency_table struct */
for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz;
pxa255_run_freq_table[i].index = i;
}
pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END;
/* Generate pxa25x the turbo cpufreq_frequency_table struct */
for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) {
pxa255_turbo_freq_table[i].frequency =
pxa255_turbo_freqs[i].khz;
pxa255_turbo_freq_table[i].index = i;
}
pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END;
pxa255_turbo_table = !!pxa255_turbo_table;
/* Generate the pxa27x cpufreq_frequency_table struct */
for (i = 0; i < NUM_PXA27x_FREQS; i++) {
freq = pxa27x_freqs[i].khz;
if (freq > pxa27x_maxfreq)
break;
pxa27x_freq_table[i].frequency = freq;
pxa27x_freq_table[i].index = i;
}
pxa27x_freq_table[i].index = i;
pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END;
/*
* Set the policy's minimum and maximum frequencies from the tables
* just constructed. This sets cpuinfo.mxx_freq, min and max.
*/
if (cpu_is_pxa25x()) {
find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
pr_info("PXA255 cpufreq using %s frequency table\n",
pxa255_turbo_table ? "turbo" : "run");
cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table);
}
else if (cpu_is_pxa27x())
cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table);
printk(KERN_INFO "PXA CPU frequency change support initialized\n");
return 0;
}
static struct cpufreq_driver pxa_cpufreq_driver = {
.verify = pxa_verify_policy,
.target = pxa_set_target,
.init = pxa_cpufreq_init,
.get = pxa_cpufreq_get,
.name = "PXA2xx",
};
static int __init pxa_cpu_init(void)
{
int ret = -ENODEV;
if (cpu_is_pxa25x() || cpu_is_pxa27x())
ret = cpufreq_register_driver(&pxa_cpufreq_driver);
return ret;
}
static void __exit pxa_cpu_exit(void)
{
cpufreq_unregister_driver(&pxa_cpufreq_driver);
}
MODULE_AUTHOR("Intrinsyc Software Inc.");
MODULE_DESCRIPTION("CPU frequency changing driver for the PXA architecture");
MODULE_LICENSE("GPL");
module_init(pxa_cpu_init);
module_exit(pxa_cpu_exit);
| gpl-2.0 |
aidfarh/android_kernel_sony_apq8064 | drivers/net/wireless/rtlwifi/pci.c | 937 | 54479 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "wifi.h"
#include "core.h"
#include "pci.h"
#include "base.h"
#include "ps.h"
#include "efuse.h"
#include <linux/export.h>
static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
PCI_VENDOR_ID_INTEL,
PCI_VENDOR_ID_ATI,
PCI_VENDOR_ID_AMD,
PCI_VENDOR_ID_SI
};
static const u8 ac_to_hwq[] = {
VO_QUEUE,
VI_QUEUE,
BE_QUEUE,
BK_QUEUE
};
static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
__le16 fc = rtl_get_fc(skb);
u8 queue_index = skb_get_queue_mapping(skb);
if (unlikely(ieee80211_is_beacon(fc)))
return BEACON_QUEUE;
if (ieee80211_is_mgmt(fc))
return MGNT_QUEUE;
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
if (ieee80211_is_nullfunc(fc))
return HIGH_QUEUE;
return ac_to_hwq[queue_index];
}
/* Update PCI dependent default settings*/
static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
u8 init_aspm;
ppsc->reg_rfps_level = 0;
ppsc->support_aspm = false;
/*Update PCI ASPM setting */
ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
switch (rtlpci->const_pci_aspm) {
case 0:
/*No ASPM */
break;
case 1:
/*ASPM dynamically enabled/disable. */
ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM;
break;
case 2:
/*ASPM with Clock Req dynamically enabled/disable. */
ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM |
RT_RF_OFF_LEVL_CLK_REQ);
break;
case 3:
/*
* Always enable ASPM and Clock Req
* from initialization to halt.
* */
ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
RT_RF_OFF_LEVL_CLK_REQ);
break;
case 4:
/*
* Always enable ASPM without Clock Req
* from initialization to halt.
* */
ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
RT_RF_OFF_LEVL_CLK_REQ);
ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
break;
}
ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
/*Update Radio OFF setting */
switch (rtlpci->const_hwsw_rfoff_d3) {
case 1:
if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
break;
case 2:
if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
break;
case 3:
ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3;
break;
}
/*Set HW definition to determine if it supports ASPM. */
switch (rtlpci->const_support_pciaspm) {
case 0:{
/*Not support ASPM. */
bool support_aspm = false;
ppsc->support_aspm = support_aspm;
break;
}
case 1:{
/*Support ASPM. */
bool support_aspm = true;
bool support_backdoor = true;
ppsc->support_aspm = support_aspm;
/*if (priv->oem_id == RT_CID_TOSHIBA &&
!priv->ndis_adapter.amd_l1_patch)
support_backdoor = false; */
ppsc->support_backdoor = support_backdoor;
break;
}
case 2:
/*ASPM value set by chipset. */
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
bool support_aspm = true;
ppsc->support_aspm = support_aspm;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not processed\n");
break;
}
/* toshiba aspm issue, toshiba will set aspm selfly
* so we should not set aspm in driver */
pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
init_aspm == 0x43)
ppsc->support_aspm = false;
}
static bool _rtl_pci_platform_switch_device_pci_aspm(
struct ieee80211_hw *hw,
u8 value)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
value |= 0x40;
pci_write_config_byte(rtlpci->pdev, 0x80, value);
return false;
}
/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
pci_write_config_byte(rtlpci->pdev, 0x81, value);
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
udelay(100);
}
/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
/*Retrieve original configuration settings. */
u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
pcibridge_linkctrlreg;
u16 aspmlevel = 0;
u8 tmp_u1b = 0;
if (!ppsc->support_aspm)
return;
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
"PCI(Bridge) UNKNOWN\n");
return;
}
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
_rtl_pci_switch_clk_req(hw, 0x0);
}
/*for promising device will in L0 state after an I/O. */
pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
/*Set corresponding value. */
aspmlevel |= BIT(0) | BIT(1);
linkctrl_reg &= ~aspmlevel;
pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
_rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
udelay(50);
/*4 Disable Pci Bridge ASPM */
pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
pcibridge_linkctrlreg);
udelay(50);
}
/*
*Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
*power saving We should follow the sequence to enable
*RTL8192SE first then enable Pci Bridge ASPM
*or the system will show bluescreen.
*/
static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 pcibridge_busnum = pcipriv->ndis_adapter.pcibridge_busnum;
u8 pcibridge_devnum = pcipriv->ndis_adapter.pcibridge_devnum;
u8 pcibridge_funcnum = pcipriv->ndis_adapter.pcibridge_funcnum;
u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
u16 aspmlevel;
u8 u_pcibridge_aspmsetting;
u8 u_device_aspmsetting;
if (!ppsc->support_aspm)
return;
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
"PCI(Bridge) UNKNOWN\n");
return;
}
/*4 Enable Pci Bridge ASPM */
u_pcibridge_aspmsetting =
pcipriv->ndis_adapter.pcibridge_linkctrlreg |
rtlpci->const_hostpci_aspm_setting;
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
u_pcibridge_aspmsetting &= ~BIT(0);
pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
u_pcibridge_aspmsetting);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"PlatformEnableASPM():PciBridge busnumber[%x], DevNumbe[%x], funcnumber[%x], Write reg[%x] = %x\n",
pcibridge_busnum, pcibridge_devnum, pcibridge_funcnum,
(pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
u_pcibridge_aspmsetting);
udelay(50);
/*Get ASPM level (with/without Clock Req) */
aspmlevel = rtlpci->const_devicepci_aspm_setting;
u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
/*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
/*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
u_device_aspmsetting |= aspmlevel;
_rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
_rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
}
udelay(100);
}
static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
bool status = false;
u8 offset_e0;
unsigned offset_e4;
pci_write_config_byte(rtlpci->pdev, 0xe0, 0xa0);
pci_read_config_byte(rtlpci->pdev, 0xe0, &offset_e0);
if (offset_e0 == 0xA0) {
pci_read_config_dword(rtlpci->pdev, 0xe4, &offset_e4);
if (offset_e4 & BIT(23))
status = true;
}
return status;
}
static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
u8 linkctrl_reg;
u8 num4bbytes;
num4bbytes = (capabilityoffset + 0x10) / 4;
/*Read Link Control Register */
pci_read_config_byte(rtlpci->pdev, (num4bbytes << 2), &linkctrl_reg);
pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
}
static void rtl_pci_parse_configuration(struct pci_dev *pdev,
struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 tmp;
int pos;
u8 linkctrl_reg;
/*Link Control Register */
pos = pci_pcie_cap(pdev);
pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg);
pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
pcipriv->ndis_adapter.linkctrl_reg);
pci_read_config_byte(pdev, 0x98, &tmp);
tmp |= BIT(4);
pci_write_config_byte(pdev, 0x98, tmp);
tmp = 0x17;
pci_write_config_byte(pdev, 0x70f, tmp);
}
static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
{
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
_rtl_pci_update_default_setting(hw);
if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) {
/*Always enable ASPM & Clock Req. */
rtl_pci_enable_aspm(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
}
}
static void _rtl_pci_io_handler_init(struct device *dev,
struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->io.dev = dev;
rtlpriv->io.write8_async = pci_write8_async;
rtlpriv->io.write16_async = pci_write16_async;
rtlpriv->io.write32_async = pci_write32_async;
rtlpriv->io.read8_sync = pci_read8_sync;
rtlpriv->io.read16_sync = pci_read16_sync;
rtlpriv->io.read32_sync = pci_read32_sync;
}
static void _rtl_pci_io_handler_release(struct ieee80211_hw *hw)
{
}
static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc, u8 tid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u8 additionlen = FCS_LEN;
struct sk_buff *next_skb;
/* here open is 4, wep/tkip is 8, aes is 12*/
if (info->control.hw_key)
additionlen += info->control.hw_key->icv_len;
/* The most skb num is 6 */
tcb_desc->empkt_num = 0;
spin_lock_bh(&rtlpriv->locks.waitq_lock);
skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
struct ieee80211_tx_info *next_info;
next_info = IEEE80211_SKB_CB(next_skb);
if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
tcb_desc->empkt_len[tcb_desc->empkt_num] =
next_skb->len + additionlen;
tcb_desc->empkt_num++;
} else {
break;
}
if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
next_skb))
break;
if (tcb_desc->empkt_num >= 5)
break;
}
spin_unlock_bh(&rtlpriv->locks.waitq_lock);
return true;
}
/* just for early mode now */
static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct sk_buff *skb = NULL;
struct ieee80211_tx_info *info = NULL;
int tid;
if (!rtlpriv->rtlhal.earlymode_enable)
return;
/* we juse use em for BE/BK/VI/VO */
for (tid = 7; tid >= 0; tid--) {
u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(hw, tid)];
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
while (!mac->act_scanning &&
rtlpriv->psc.rfpwr_state == ERFON) {
struct rtl_tcb_desc tcb_desc;
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
spin_lock_bh(&rtlpriv->locks.waitq_lock);
if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
(ring->entries - skb_queue_len(&ring->queue) > 5)) {
skb = skb_dequeue(&mac->skb_waitq[tid]);
} else {
spin_unlock_bh(&rtlpriv->locks.waitq_lock);
break;
}
spin_unlock_bh(&rtlpriv->locks.waitq_lock);
/* Some macaddr can't do early mode. like
* multicast/broadcast/no_qos data */
info = IEEE80211_SKB_CB(skb);
if (info->flags & IEEE80211_TX_CTL_AMPDU)
_rtl_update_earlymode_info(hw, skb,
&tcb_desc, tid);
rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
}
}
}
static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
while (skb_queue_len(&ring->queue)) {
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb;
struct ieee80211_tx_info *info;
__le16 fc;
u8 tid;
u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true,
HW_DESC_OWN);
/*
*beacon packet will only use the first
*descriptor defautly,and the own may not
*be cleared by the hardware
*/
if (own)
return;
ring->idx = (ring->idx + 1) % ring->entries;
skb = __skb_dequeue(&ring->queue);
pci_unmap_single(rtlpci->pdev,
rtlpriv->cfg->ops->
get_desc((u8 *) entry, true,
HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
/* remove early mode header */
if (rtlpriv->rtlhal.earlymode_enable)
skb_pull(skb, EM_HDR_LEN);
RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
"new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
ring->idx,
skb_queue_len(&ring->queue),
*(u16 *) (skb->data + 22));
if (prio == TXCMD_QUEUE) {
dev_kfree_skb(skb);
goto tx_status_ok;
}
/* for sw LPS, just after NULL skb send out, we can
* sure AP kown we are sleeped, our we should not let
* rf to sleep*/
fc = rtl_get_fc(skb);
if (ieee80211_is_nullfunc(fc)) {
if (ieee80211_has_pm(fc)) {
rtlpriv->mac80211.offchan_delay = true;
rtlpriv->psc.state_inap = true;
} else {
rtlpriv->psc.state_inap = false;
}
}
/* update tid tx pkt num */
tid = rtl_get_tid(skb);
if (tid <= 7)
rtlpriv->link_info.tidtx_inperiod[tid]++;
info = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(info);
info->flags |= IEEE80211_TX_STAT_ACK;
/*info->status.rates[0].count = 1; */
ieee80211_tx_status_irqsafe(hw, skb);
if ((ring->entries - skb_queue_len(&ring->queue))
== 2) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
"more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%d\n",
prio, ring->idx,
skb_queue_len(&ring->queue));
ieee80211_wake_queue(hw,
skb_get_queue_mapping
(skb));
}
tx_status_ok:
skb = NULL;
}
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
schedule_work(&rtlpriv->works.lps_leave_work);
}
}
static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_rx_status rx_status)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
__le16 fc = rtl_get_fc(skb);
bool unicast = false;
struct sk_buff *uskb = NULL;
u8 *pdata;
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
if (is_broadcast_ether_addr(hdr->addr1)) {
;/*TODO*/
} else if (is_multicast_ether_addr(hdr->addr1)) {
;/*TODO*/
} else {
unicast = true;
rtlpriv->stats.rxbytesunicast += skb->len;
}
rtl_is_special_data(hw, skb, false);
if (ieee80211_is_data(fc)) {
rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
if (unicast)
rtlpriv->link_info.num_rx_inperiod++;
}
/* for sw lps */
rtl_swlps_beacon(hw, (void *)skb->data, skb->len);
rtl_recognize_peer(hw, (void *)skb->data, skb->len);
if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
(rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) &&
(ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)))
return;
if (unlikely(!rtl_action_proc(hw, skb, false)))
return;
uskb = dev_alloc_skb(skb->len + 128);
if (!uskb)
return; /* exit if allocation failed */
memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status));
pdata = (u8 *)skb_put(uskb, skb->len);
memcpy(pdata, skb->data, skb->len);
ieee80211_rx_irqsafe(hw, uskb);
}
static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
int rx_queue_idx = RTL_PCI_RX_MPDU_QUEUE;
struct ieee80211_rx_status rx_status = { 0 };
unsigned int count = rtlpci->rxringcount;
u8 own;
u8 tmp_one;
u32 bufferaddress;
struct rtl_stats stats = {
.signal = 0,
.noise = -98,
.rate = 0,
};
int index = rtlpci->rx_ring[rx_queue_idx].idx;
if (rtlpci->driver_is_goingto_unload)
return;
/*RX NORMAL PKT */
while (count--) {
/*rx descriptor */
struct rtl_rx_desc *pdesc = &rtlpci->rx_ring[rx_queue_idx].desc[
index];
/*rx pkt */
struct sk_buff *skb = rtlpci->rx_ring[rx_queue_idx].rx_buf[
index];
struct sk_buff *new_skb = NULL;
own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
false, HW_DESC_OWN);
/*wait data to be filled by hardware */
if (own)
break;
rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
&rx_status,
(u8 *) pdesc, skb);
if (stats.crc || stats.hwerror)
goto done;
new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (unlikely(!new_skb)) {
RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), DBG_DMESG,
"can't alloc skb for rx\n");
goto done;
}
pci_unmap_single(rtlpci->pdev,
*((dma_addr_t *) skb->cb),
rtlpci->rxbuffersize,
PCI_DMA_FROMDEVICE);
skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc, false,
HW_DESC_RXPKT_LEN));
skb_reserve(skb, stats.rx_drvinfo_size + stats.rx_bufshift);
/*
* NOTICE This can not be use for mac80211,
* this is done in mac80211 code,
* if you done here sec DHCP will fail
* skb_trim(skb, skb->len - 4);
*/
_rtl_receive_one(hw, skb, rx_status);
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
schedule_work(&rtlpriv->works.lps_leave_work);
}
dev_kfree_skb_any(skb);
skb = new_skb;
rtlpci->rx_ring[rx_queue_idx].rx_buf[index] = skb;
*((dma_addr_t *) skb->cb) =
pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
rtlpci->rxbuffersize,
PCI_DMA_FROMDEVICE);
done:
bufferaddress = (*((dma_addr_t *)skb->cb));
tmp_one = 1;
rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
HW_DESC_RXBUFF_ADDR,
(u8 *)&bufferaddress);
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
HW_DESC_RXPKT_LEN,
(u8 *)&rtlpci->rxbuffersize);
if (index == rtlpci->rxringcount - 1)
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
HW_DESC_RXERO,
(u8 *)&tmp_one);
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
(u8 *)&tmp_one);
index = (index + 1) % rtlpci->rxringcount;
}
rtlpci->rx_ring[rx_queue_idx].idx = index;
}
static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
{
struct ieee80211_hw *hw = dev_id;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
unsigned long flags;
u32 inta = 0;
u32 intb = 0;
irqreturn_t ret = IRQ_HANDLED;
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
/*read ISR: 4/8bytes */
rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
/*Shared IRQ or HW disappared */
if (!inta || inta == 0xffff) {
ret = IRQ_NONE;
goto done;
}
/*<1> beacon related */
if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"beacon ok interrupt!\n");
}
if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"beacon err interrupt!\n");
}
if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
}
if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"prepare beacon for interrupt!\n");
tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
}
/*<3> Tx related */
if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"Manage ok interrupt!\n");
_rtl_pci_tx_isr(hw, MGNT_QUEUE);
}
if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"HIGH_QUEUE ok interrupt!\n");
_rtl_pci_tx_isr(hw, HIGH_QUEUE);
}
if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"BK Tx OK interrupt!\n");
_rtl_pci_tx_isr(hw, BK_QUEUE);
}
if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"BE TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, BE_QUEUE);
}
if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"VI TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, VI_QUEUE);
}
if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"Vo TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, VO_QUEUE);
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"CMD TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, TXCMD_QUEUE);
}
}
/*<2> Rx related */
if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
_rtl_pci_rx_interrupt(hw);
}
if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"rx descriptor unavailable!\n");
_rtl_pci_rx_interrupt(hw);
}
if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
_rtl_pci_rx_interrupt(hw);
}
if (rtlpriv->rtlhal.earlymode_enable)
tasklet_schedule(&rtlpriv->works.irq_tasklet);
done:
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
return ret;
}
static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
{
_rtl_pci_tx_chk_waitq(hw);
}
static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl8192_tx_ring *ring = NULL;
struct ieee80211_hdr *hdr = NULL;
struct ieee80211_tx_info *info = NULL;
struct sk_buff *pskb = NULL;
struct rtl_tx_desc *pdesc = NULL;
struct rtl_tcb_desc tcb_desc;
u8 temp_one = 1;
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
ring = &rtlpci->tx_ring[BEACON_QUEUE];
pskb = __skb_dequeue(&ring->queue);
if (pskb) {
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops->get_desc(
(u8 *) entry, true, HW_DESC_TXBUFF_ADDR),
pskb->len, PCI_DMA_TODEVICE);
kfree_skb(pskb);
}
/*NB: the beacon data buffer must be 32-bit aligned. */
pskb = ieee80211_beacon_get(hw, mac->vif);
if (pskb == NULL)
return;
hdr = rtl_get_hdr(pskb);
info = IEEE80211_SKB_CB(pskb);
pdesc = &ring->desc[0];
rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
info, pskb, BEACON_QUEUE, &tcb_desc);
__skb_queue_tail(&ring->queue, pskb);
rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
(u8 *)&temp_one);
return;
}
static void rtl_lps_leave_work_callback(struct work_struct *work)
{
struct rtl_works *rtlworks =
container_of(work, struct rtl_works, lps_leave_work);
struct ieee80211_hw *hw = rtlworks->hw;
rtl_lps_leave(hw);
}
static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 i;
for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
rtlpci->txringcount[i] = RT_TXDESC_NUM;
/*
*we just alloc 2 desc for beacon queue,
*because we just need first desc in hw beacon.
*/
rtlpci->txringcount[BEACON_QUEUE] = 2;
/*
*BE queue need more descriptor for performance
*consideration or, No more tx desc will happen,
*and may cause mac80211 mem leakage.
*/
rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE;
rtlpci->rxbuffersize = 9100; /*2048/1024; */
rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */
}
static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
struct pci_dev *pdev)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
rtlpci->up_first_time = true;
rtlpci->being_init_adapter = false;
rtlhal->hw = hw;
rtlpci->pdev = pdev;
/*Tx/Rx related var */
_rtl_pci_init_trx_var(hw);
/*IBSS*/ mac->beacon_interval = 100;
/*AMPDU*/
mac->min_space_cfg = 0;
mac->max_mss_density = 0;
/*set sane AMPDU defaults */
mac->current_ampdu_density = 7;
mac->current_ampdu_factor = 3;
/*QOS*/
rtlpci->acm_method = eAcmWay2_SW;
/*task */
tasklet_init(&rtlpriv->works.irq_tasklet,
(void (*)(unsigned long))_rtl_pci_irq_tasklet,
(unsigned long)hw);
tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
(void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
(unsigned long)hw);
INIT_WORK(&rtlpriv->works.lps_leave_work, rtl_lps_leave_work_callback);
}
static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
unsigned int prio, unsigned int entries)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_tx_desc *ring;
dma_addr_t dma;
u32 nextdescaddress;
int i;
ring = pci_alloc_consistent(rtlpci->pdev,
sizeof(*ring) * entries, &dma);
if (!ring || (unsigned long)ring & 0xFF) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Cannot allocate TX ring (prio = %d)\n", prio);
return -ENOMEM;
}
memset(ring, 0, sizeof(*ring) * entries);
rtlpci->tx_ring[prio].desc = ring;
rtlpci->tx_ring[prio].dma = dma;
rtlpci->tx_ring[prio].idx = 0;
rtlpci->tx_ring[prio].entries = entries;
skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
prio, ring);
for (i = 0; i < entries; i++) {
nextdescaddress = (u32) dma +
((i + 1) % entries) *
sizeof(*ring);
rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
true, HW_DESC_TX_NEXTDESC_ADDR,
(u8 *)&nextdescaddress);
}
return 0;
}
static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_rx_desc *entry = NULL;
int i, rx_queue_idx;
u8 tmp_one = 1;
/*
*rx_queue_idx 0:RX_MPDU_QUEUE
*rx_queue_idx 1:RX_CMD_QUEUE
*/
for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
rx_queue_idx++) {
rtlpci->rx_ring[rx_queue_idx].desc =
pci_alloc_consistent(rtlpci->pdev,
sizeof(*rtlpci->rx_ring[rx_queue_idx].
desc) * rtlpci->rxringcount,
&rtlpci->rx_ring[rx_queue_idx].dma);
if (!rtlpci->rx_ring[rx_queue_idx].desc ||
(unsigned long)rtlpci->rx_ring[rx_queue_idx].desc & 0xFF) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Cannot allocate RX ring\n");
return -ENOMEM;
}
memset(rtlpci->rx_ring[rx_queue_idx].desc, 0,
sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) *
rtlpci->rxringcount);
rtlpci->rx_ring[rx_queue_idx].idx = 0;
/* If amsdu_8k is disabled, set buffersize to 4096. This
* change will reduce memory fragmentation.
*/
if (rtlpci->rxbuffersize > 4096 &&
rtlpriv->rtlhal.disable_amsdu_8k)
rtlpci->rxbuffersize = 4096;
for (i = 0; i < rtlpci->rxringcount; i++) {
struct sk_buff *skb =
dev_alloc_skb(rtlpci->rxbuffersize);
u32 bufferaddress;
if (!skb)
return 0;
entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
/*skb->dev = dev; */
rtlpci->rx_ring[rx_queue_idx].rx_buf[i] = skb;
/*
*just set skb->cb to mapping addr
*for pci_unmap_single use
*/
*((dma_addr_t *) skb->cb) =
pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
rtlpci->rxbuffersize,
PCI_DMA_FROMDEVICE);
bufferaddress = (*((dma_addr_t *)skb->cb));
rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
HW_DESC_RXBUFF_ADDR,
(u8 *)&bufferaddress);
rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
HW_DESC_RXPKT_LEN,
(u8 *)&rtlpci->
rxbuffersize);
rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
HW_DESC_RXOWN,
(u8 *)&tmp_one);
}
rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
HW_DESC_RXERO, (u8 *)&tmp_one);
}
return 0;
}
static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
unsigned int prio)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
while (skb_queue_len(&ring->queue)) {
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
pci_unmap_single(rtlpci->pdev,
rtlpriv->cfg->
ops->get_desc((u8 *) entry, true,
HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
if (ring->desc) {
pci_free_consistent(rtlpci->pdev,
sizeof(*ring->desc) * ring->entries,
ring->desc, ring->dma);
ring->desc = NULL;
}
}
static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci)
{
int i, rx_queue_idx;
/*rx_queue_idx 0:RX_MPDU_QUEUE */
/*rx_queue_idx 1:RX_CMD_QUEUE */
for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
rx_queue_idx++) {
for (i = 0; i < rtlpci->rxringcount; i++) {
struct sk_buff *skb =
rtlpci->rx_ring[rx_queue_idx].rx_buf[i];
if (!skb)
continue;
pci_unmap_single(rtlpci->pdev,
*((dma_addr_t *) skb->cb),
rtlpci->rxbuffersize,
PCI_DMA_FROMDEVICE);
kfree_skb(skb);
}
if (rtlpci->rx_ring[rx_queue_idx].desc) {
pci_free_consistent(rtlpci->pdev,
sizeof(*rtlpci->rx_ring[rx_queue_idx].
desc) * rtlpci->rxringcount,
rtlpci->rx_ring[rx_queue_idx].desc,
rtlpci->rx_ring[rx_queue_idx].dma);
rtlpci->rx_ring[rx_queue_idx].desc = NULL;
}
}
}
static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
int ret;
int i;
ret = _rtl_pci_init_rx_ring(hw);
if (ret)
return ret;
for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
ret = _rtl_pci_init_tx_ring(hw, i,
rtlpci->txringcount[i]);
if (ret)
goto err_free_rings;
}
return 0;
err_free_rings:
_rtl_pci_free_rx_ring(rtlpci);
for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
if (rtlpci->tx_ring[i].desc)
_rtl_pci_free_tx_ring(hw, i);
return 1;
}
static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u32 i;
/*free rx rings */
_rtl_pci_free_rx_ring(rtlpci);
/*free tx rings */
for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
_rtl_pci_free_tx_ring(hw, i);
return 0;
}
int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
int i, rx_queue_idx;
unsigned long flags;
u8 tmp_one = 1;
/*rx_queue_idx 0:RX_MPDU_QUEUE */
/*rx_queue_idx 1:RX_CMD_QUEUE */
for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
rx_queue_idx++) {
/*
*force the rx_ring[RX_MPDU_QUEUE/
*RX_CMD_QUEUE].idx to the first one
*/
if (rtlpci->rx_ring[rx_queue_idx].desc) {
struct rtl_rx_desc *entry = NULL;
for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
rtlpriv->cfg->ops->set_desc((u8 *) entry,
false,
HW_DESC_RXOWN,
(u8 *)&tmp_one);
}
rtlpci->rx_ring[rx_queue_idx].idx = 0;
}
}
/*
*after reset, release previous pending packet,
*and force the tx idx to the first one
*/
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
if (rtlpci->tx_ring[i].desc) {
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
while (skb_queue_len(&ring->queue)) {
struct rtl_tx_desc *entry =
&ring->desc[ring->idx];
struct sk_buff *skb =
__skb_dequeue(&ring->queue);
pci_unmap_single(rtlpci->pdev,
rtlpriv->cfg->ops->
get_desc((u8 *)
entry,
true,
HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
ring->idx = 0;
}
}
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
return 0;
}
static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = info->control.sta;
struct rtl_sta_info *sta_entry = NULL;
u8 tid = rtl_get_tid(skb);
if (!sta)
return false;
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
if (!rtlpriv->rtlhal.earlymode_enable)
return false;
if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
return false;
if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
return false;
if (tid > 7)
return false;
/* maybe every tid should be checked */
if (!rtlpriv->link_info.higher_busytxtraffic[tid])
return false;
spin_lock_bh(&rtlpriv->locks.waitq_lock);
skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
spin_unlock_bh(&rtlpriv->locks.waitq_lock);
return true;
}
static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
struct rtl_tcb_desc *ptcb_desc)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_sta_info *sta_entry = NULL;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = info->control.sta;
struct rtl8192_tx_ring *ring;
struct rtl_tx_desc *pdesc;
u8 idx;
u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
unsigned long flags;
struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
__le16 fc = rtl_get_fc(skb);
u8 *pda_addr = hdr->addr1;
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
/*ssn */
u8 tid = 0;
u16 seq_number = 0;
u8 own;
u8 temp_one = 1;
if (ieee80211_is_auth(fc)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
rtl_ips_nic_on(hw);
}
if (rtlpriv->psc.sw_ps_enabled) {
if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
!ieee80211_has_pm(fc))
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
}
rtl_action_proc(hw, skb, true);
if (is_multicast_ether_addr(pda_addr))
rtlpriv->stats.txbytesmulticast += skb->len;
else if (is_broadcast_ether_addr(pda_addr))
rtlpriv->stats.txbytesbroadcast += skb->len;
else
rtlpriv->stats.txbytesunicast += skb->len;
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
ring = &rtlpci->tx_ring[hw_queue];
if (hw_queue != BEACON_QUEUE)
idx = (ring->idx + skb_queue_len(&ring->queue)) %
ring->entries;
else
idx = 0;
pdesc = &ring->desc[idx];
own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
true, HW_DESC_OWN);
if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
hw_queue, ring->idx, idx,
skb_queue_len(&ring->queue));
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
return skb->len;
}
if (ieee80211_is_data_qos(fc)) {
tid = rtl_get_tid(skb);
if (sta) {
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
seq_number = (le16_to_cpu(hdr->seq_ctrl) &
IEEE80211_SCTL_SEQ) >> 4;
seq_number += 1;
if (!ieee80211_has_morefrags(hdr->frame_control))
sta_entry->tids[tid].seq_number = seq_number;
}
}
if (ieee80211_is_data(fc))
rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
info, skb, hw_queue, ptcb_desc);
__skb_queue_tail(&ring->queue, skb);
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true,
HW_DESC_OWN, (u8 *)&temp_one);
if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
hw_queue != BEACON_QUEUE) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
"less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
hw_queue, ring->idx, idx,
skb_queue_len(&ring->queue));
ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
}
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
return 0;
}
static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u16 i = 0;
int queue_id;
struct rtl8192_tx_ring *ring;
for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
u32 queue_len;
ring = &pcipriv->dev.tx_ring[queue_id];
queue_len = skb_queue_len(&ring->queue);
if (queue_len == 0 || queue_id == BEACON_QUEUE ||
queue_id == TXCMD_QUEUE) {
queue_id--;
continue;
} else {
msleep(20);
i++;
}
/* we just wait 1s for all queues */
if (rtlpriv->psc.rfpwr_state == ERFOFF ||
is_hal_stop(rtlhal) || i >= 200)
return;
}
}
static void rtl_pci_deinit(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
_rtl_pci_deinit_trx_ring(hw);
synchronize_irq(rtlpci->pdev->irq);
tasklet_kill(&rtlpriv->works.irq_tasklet);
cancel_work_sync(&rtlpriv->works.lps_leave_work);
flush_workqueue(rtlpriv->works.rtl_wq);
destroy_workqueue(rtlpriv->works.rtl_wq);
}
static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int err;
_rtl_pci_init_struct(hw, pdev);
err = _rtl_pci_init_trx_ring(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"tx ring initialization failed\n");
return err;
}
return 0;
}
static int rtl_pci_start(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
int err;
rtl_pci_reset_trx_ring(hw);
rtlpci->driver_is_goingto_unload = false;
err = rtlpriv->cfg->ops->hw_init(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"Failed to config hardware!\n");
return err;
}
rtlpriv->cfg->ops->enable_interrupt(hw);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
rtl_init_rx_config(hw);
/*should be after adapter start and interrupt enable. */
set_hal_start(rtlhal);
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
rtlpci->up_first_time = false;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "OK\n");
return 0;
}
static void rtl_pci_stop(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
unsigned long flags;
u8 RFInProgressTimeOut = 0;
/*
*should be before disable interrupt&adapter
*and will do it immediately.
*/
set_hal_stop(rtlhal);
rtlpci->driver_is_goingto_unload = true;
rtlpriv->cfg->ops->disable_interrupt(hw);
cancel_work_sync(&rtlpriv->works.lps_leave_work);
spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
while (ppsc->rfchange_inprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
if (RFInProgressTimeOut > 100) {
spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
break;
}
mdelay(1);
RFInProgressTimeOut++;
spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
}
ppsc->rfchange_inprogress = true;
spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
rtlpriv->cfg->ops->hw_disable(hw);
/* some things are not needed if firmware not available */
if (!rtlpriv->max_fw_size)
return;
rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
ppsc->rfchange_inprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
rtl_pci_enable_aspm(hw);
}
static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct pci_dev *bridge_pdev = pdev->bus->self;
u16 venderid;
u16 deviceid;
u8 revisionid;
u16 irqline;
u8 tmp;
pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
venderid = pdev->vendor;
deviceid = pdev->device;
pci_read_config_byte(pdev, 0x8, &revisionid);
pci_read_config_word(pdev, 0x3C, &irqline);
/* PCI ID 0x10ec:0x8192 occurs for both RTL8192E, which uses
* r8192e_pci, and RTL8192SE, which uses this driver. If the
* revision ID is RTL_PCI_REVISION_ID_8192PCIE (0x01), then
* the correct driver is r8192e_pci, thus this routine should
* return false.
*/
if (deviceid == RTL_PCI_8192SE_DID &&
revisionid == RTL_PCI_REVISION_ID_8192PCIE)
return false;
if (deviceid == RTL_PCI_8192_DID ||
deviceid == RTL_PCI_0044_DID ||
deviceid == RTL_PCI_0047_DID ||
deviceid == RTL_PCI_8192SE_DID ||
deviceid == RTL_PCI_8174_DID ||
deviceid == RTL_PCI_8173_DID ||
deviceid == RTL_PCI_8172_DID ||
deviceid == RTL_PCI_8171_DID) {
switch (revisionid) {
case RTL_PCI_REVISION_ID_8192PCIE:
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"8192 PCI-E is found - vid/did=%x/%x\n",
venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
break;
case RTL_PCI_REVISION_ID_8192SE:
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"8192SE is found - vid/did=%x/%x\n",
venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Err: Unknown device - vid/did=%x/%x\n",
venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
break;
}
} else if (deviceid == RTL_PCI_8192CET_DID ||
deviceid == RTL_PCI_8192CE_DID ||
deviceid == RTL_PCI_8191CE_DID ||
deviceid == RTL_PCI_8188CE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"8192C PCI-E is found - vid/did=%x/%x\n",
venderid, deviceid);
} else if (deviceid == RTL_PCI_8192DE_DID ||
deviceid == RTL_PCI_8192DE_DID2) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"8192D PCI-E is found - vid/did=%x/%x\n",
venderid, deviceid);
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Err: Unknown device - vid/did=%x/%x\n",
venderid, deviceid);
rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
if (revisionid == 0 || revisionid == 1) {
if (revisionid == 0) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Find 92DE MAC0\n");
rtlhal->interfaceindex = 0;
} else if (revisionid == 1) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Find 92DE MAC1\n");
rtlhal->interfaceindex = 1;
}
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
venderid, deviceid, revisionid);
rtlhal->interfaceindex = 0;
}
}
/*find bus info */
pcipriv->ndis_adapter.busnumber = pdev->bus->number;
pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
if (bridge_pdev) {
/*find bridge info if available */
pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
pcipriv->ndis_adapter.pcibridge_vendor = tmp;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"Pci Bridge Vendor is found index: %d\n",
tmp);
break;
}
}
}
if (pcipriv->ndis_adapter.pcibridge_vendor !=
PCI_BRIDGE_VENDOR_UNKNOWN) {
pcipriv->ndis_adapter.pcibridge_busnum =
bridge_pdev->bus->number;
pcipriv->ndis_adapter.pcibridge_devnum =
PCI_SLOT(bridge_pdev->devfn);
pcipriv->ndis_adapter.pcibridge_funcnum =
PCI_FUNC(bridge_pdev->devfn);
pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
pci_pcie_cap(bridge_pdev);
pcipriv->ndis_adapter.num4bytes =
(pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
rtl_pci_get_linkcontrol_field(hw);
if (pcipriv->ndis_adapter.pcibridge_vendor ==
PCI_BRIDGE_VENDOR_AMD) {
pcipriv->ndis_adapter.amd_l1_patch =
rtl_pci_get_amd_l1_patch(hw);
}
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
pcipriv->ndis_adapter.busnumber,
pcipriv->ndis_adapter.devnumber,
pcipriv->ndis_adapter.funcnumber,
pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
pcipriv->ndis_adapter.pcibridge_busnum,
pcipriv->ndis_adapter.pcibridge_devnum,
pcipriv->ndis_adapter.pcibridge_funcnum,
pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
pcipriv->ndis_adapter.pcibridge_linkctrlreg,
pcipriv->ndis_adapter.amd_l1_patch);
rtl_pci_parse_configuration(pdev, hw);
return true;
}
int __devinit rtl_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct ieee80211_hw *hw = NULL;
struct rtl_priv *rtlpriv = NULL;
struct rtl_pci_priv *pcipriv = NULL;
struct rtl_pci *rtlpci;
unsigned long pmem_start, pmem_len, pmem_flags;
int err;
err = pci_enable_device(pdev);
if (err) {
RT_ASSERT(false, "%s : Cannot enable new PCI device\n",
pci_name(pdev));
return err;
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
RT_ASSERT(false,
"Unable to obtain 32bit DMA for consistent allocations\n");
err = -ENOMEM;
goto fail1;
}
}
pci_set_master(pdev);
hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
sizeof(struct rtl_priv), &rtl_ops);
if (!hw) {
RT_ASSERT(false,
"%s : ieee80211 alloc failed\n", pci_name(pdev));
err = -ENOMEM;
goto fail1;
}
SET_IEEE80211_DEV(hw, &pdev->dev);
pci_set_drvdata(pdev, hw);
rtlpriv = hw->priv;
pcipriv = (void *)rtlpriv->priv;
pcipriv->dev.pdev = pdev;
init_completion(&rtlpriv->firmware_loading_complete);
/* init cfg & intf_ops */
rtlpriv->rtlhal.interface = INTF_PCI;
rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
rtlpriv->intf_ops = &rtl_pci_ops;
/*
*init dbgp flags before all
*other functions, because we will
*use it in other funtions like
*RT_TRACE/RT_PRINT/RTL_PRINT_DATA
*you can not use these macro
*before this
*/
rtl_dbgp_flag_init(hw);
/* MEM map */
err = pci_request_regions(pdev, KBUILD_MODNAME);
if (err) {
RT_ASSERT(false, "Can't obtain PCI resources\n");
goto fail1;
}
pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
/*shared mem start */
rtlpriv->io.pci_mem_start =
(unsigned long)pci_iomap(pdev,
rtlpriv->cfg->bar_id, pmem_len);
if (rtlpriv->io.pci_mem_start == 0) {
RT_ASSERT(false, "Can't map PCI mem\n");
err = -ENOMEM;
goto fail2;
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
pmem_start, pmem_len, pmem_flags,
rtlpriv->io.pci_mem_start);
/* Disable Clk Request */
pci_write_config_byte(pdev, 0x81, 0);
/* leave D3 mode */
pci_write_config_byte(pdev, 0x44, 0);
pci_write_config_byte(pdev, 0x04, 0x06);
pci_write_config_byte(pdev, 0x04, 0x07);
/* find adapter */
if (!_rtl_pci_find_adapter(pdev, hw)) {
err = -ENODEV;
goto fail3;
}
/* Init IO handler */
_rtl_pci_io_handler_init(&pdev->dev, hw);
/*like read eeprom and so on */
rtlpriv->cfg->ops->read_eeprom_info(hw);
/*aspm */
rtl_pci_init_aspm(hw);
/* Init mac80211 sw */
err = rtl_init_core(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Can't allocate sw for mac80211\n");
goto fail3;
}
/* Init PCI sw */
err = rtl_pci_init(hw, pdev);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to init PCI\n");
goto fail3;
}
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
err = -ENODEV;
goto fail3;
}
rtlpriv->cfg->ops->init_sw_leds(hw);
err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"failed to create sysfs device attributes\n");
goto fail3;
}
rtlpci = rtl_pcidev(pcipriv);
err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
IRQF_SHARED, KBUILD_MODNAME, hw);
if (err) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"%s: failed to register IRQ handler\n",
wiphy_name(hw->wiphy));
goto fail3;
}
rtlpci->irq_alloc = 1;
return 0;
fail3:
rtl_deinit_core(hw);
_rtl_pci_io_handler_release(hw);
if (rtlpriv->io.pci_mem_start != 0)
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
fail2:
pci_release_regions(pdev);
complete(&rtlpriv->firmware_loading_complete);
fail1:
if (hw)
ieee80211_free_hw(hw);
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
}
EXPORT_SYMBOL(rtl_pci_probe);
void rtl_pci_disconnect(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
/* just in case driver is removed before firmware callback */
wait_for_completion(&rtlpriv->firmware_loading_complete);
clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
/*ieee80211_unregister_hw will call ops_stop */
if (rtlmac->mac80211_registered == 1) {
ieee80211_unregister_hw(hw);
rtlmac->mac80211_registered = 0;
} else {
rtl_deinit_deferred_work(hw);
rtlpriv->intf_ops->adapter_stop(hw);
}
rtlpriv->cfg->ops->disable_interrupt(hw);
/*deinit rfkill */
rtl_deinit_rfkill(hw);
rtl_pci_deinit(hw);
rtl_deinit_core(hw);
_rtl_pci_io_handler_release(hw);
rtlpriv->cfg->ops->deinit_sw_vars(hw);
if (rtlpci->irq_alloc) {
free_irq(rtlpci->pdev->irq, hw);
rtlpci->irq_alloc = 0;
}
if (rtlpriv->io.pci_mem_start != 0) {
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
pci_release_regions(pdev);
}
pci_disable_device(pdev);
rtl_pci_disable_aspm(hw);
pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
}
EXPORT_SYMBOL(rtl_pci_disconnect);
/***************************************
kernel pci power state define:
PCI_D0 ((pci_power_t __force) 0)
PCI_D1 ((pci_power_t __force) 1)
PCI_D2 ((pci_power_t __force) 2)
PCI_D3hot ((pci_power_t __force) 3)
PCI_D3cold ((pci_power_t __force) 4)
PCI_UNKNOWN ((pci_power_t __force) 5)
This function is called when system
goes into suspend state mac80211 will
call rtl_mac_stop() from the mac80211
suspend function first, So there is
no need to call hw_disable here.
****************************************/
int rtl_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->cfg->ops->hw_suspend(hw);
rtl_deinit_rfkill(hw);
return 0;
}
EXPORT_SYMBOL(rtl_pci_suspend);
int rtl_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->cfg->ops->hw_resume(hw);
rtl_init_rfkill(hw);
return 0;
}
EXPORT_SYMBOL(rtl_pci_resume);
struct rtl_intf_ops rtl_pci_ops = {
.read_efuse_byte = read_efuse_byte,
.adapter_start = rtl_pci_start,
.adapter_stop = rtl_pci_stop,
.adapter_tx = rtl_pci_tx,
.flush = rtl_pci_flush,
.reset_trx_ring = rtl_pci_reset_trx_ring,
.waitq_insert = rtl_pci_tx_chk_waitq_insert,
.disable_aspm = rtl_pci_disable_aspm,
.enable_aspm = rtl_pci_enable_aspm,
};
| gpl-2.0 |
ProjectOpenCannibal/GingerKernel-VM701-2.6.35 | drivers/gpu/drm/drm_memory.c | 937 | 5039 | /**
* \file drm_memory.c
* Memory management wrappers for DRM
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/highmem.h>
#include "drmP.h"
/**
* Called when "/proc/dri/%dev%/mem" is read.
*
* \param buf output buffer.
* \param start start of output data.
* \param offset requested start offset.
* \param len requested number of bytes.
* \param eof whether there is no more data to return.
* \param data private data.
* \return number of written bytes.
*
* No-op.
*/
int drm_mem_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data)
{
return 0;
}
#if __OS_HAS_AGP
static void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device * dev)
{
unsigned long i, num_pages =
PAGE_ALIGN(size) / PAGE_SIZE;
struct drm_agp_mem *agpmem;
struct page **page_map;
struct page **phys_page_map;
void *addr;
size = PAGE_ALIGN(size);
#ifdef __alpha__
offset -= dev->hose->mem_space->start;
#endif
list_for_each_entry(agpmem, &dev->agp->memory, head)
if (agpmem->bound <= offset
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))
break;
if (&agpmem->head == &dev->agp->memory)
return NULL;
/*
* OK, we're mapping AGP space on a chipset/platform on which memory accesses by
* the CPU do not get remapped by the GART. We fix this by using the kernel's
* page-table instead (that's probably faster anyhow...).
*/
/* note: use vmalloc() because num_pages could be large... */
page_map = vmalloc(num_pages * sizeof(struct page *));
if (!page_map)
return NULL;
phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
for (i = 0; i < num_pages; ++i)
page_map[i] = phys_page_map[i];
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
vfree(page_map);
return addr;
}
/** Wrapper around agp_allocate_memory() */
DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
{
return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
}
/** Wrapper around agp_free_memory() */
int drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
return drm_agp_free_memory(handle) ? 0 : -EINVAL;
}
EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
{
return drm_agp_bind_memory(handle, start);
}
/** Wrapper around agp_unbind_memory() */
int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return drm_agp_unbind_memory(handle);
}
EXPORT_SYMBOL(drm_unbind_agp);
#else /* __OS_HAS_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device * dev)
{
return NULL;
}
#endif /* agp */
void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
{
if (drm_core_has_AGP(dev) &&
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap(map->offset, map->size);
}
EXPORT_SYMBOL(drm_core_ioremap);
void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
{
if (drm_core_has_AGP(dev) &&
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap_wc(map->offset, map->size);
}
EXPORT_SYMBOL(drm_core_ioremap_wc);
void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
{
if (!map->handle || !map->size)
return;
if (drm_core_has_AGP(dev) &&
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
vunmap(map->handle);
else
iounmap(map->handle);
}
EXPORT_SYMBOL(drm_core_ioremapfree);
| gpl-2.0 |
davidmueller13/f2fs-backport | arch/arm/mach-davinci/board-sffsdr.c | 2473 | 4508 | /*
* Lyrtech SFFSDR board support.
*
* Copyright (C) 2008 Philip Balister, OpenSDR <philip@opensdr.com>
* Copyright (C) 2008 Lyrtech <www.lyrtech.com>
*
* Based on DV-EVM platform, original copyright follows:
*
* Copyright (C) 2007 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/i2c/at24.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <mach/dm644x.h>
#include <mach/common.h>
#include <mach/i2c.h>
#include <mach/serial.h>
#include <mach/mux.h>
#include <mach/usb.h>
#define SFFSDR_PHY_ID "0:01"
static struct mtd_partition davinci_sffsdr_nandflash_partition[] = {
/* U-Boot Environment: Block 0
* UBL: Block 1
* U-Boot: Blocks 6-7 (256 kb)
* Integrity Kernel: Blocks 8-31 (3 Mb)
* Integrity Data: Blocks 100-END
*/
{
.name = "Linux Kernel",
.offset = 32 * SZ_128K,
.size = 16 * SZ_128K, /* 2 Mb */
.mask_flags = MTD_WRITEABLE, /* Force read-only */
},
{
.name = "Linux ROOT",
.offset = MTDPART_OFS_APPEND,
.size = 256 * SZ_128K, /* 32 Mb */
.mask_flags = 0, /* R/W */
},
};
static struct flash_platform_data davinci_sffsdr_nandflash_data = {
.parts = davinci_sffsdr_nandflash_partition,
.nr_parts = ARRAY_SIZE(davinci_sffsdr_nandflash_partition),
};
static struct resource davinci_sffsdr_nandflash_resource[] = {
{
.start = DM644X_ASYNC_EMIF_DATA_CE0_BASE,
.end = DM644X_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
.flags = IORESOURCE_MEM,
}, {
.start = DM644X_ASYNC_EMIF_CONTROL_BASE,
.end = DM644X_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device davinci_sffsdr_nandflash_device = {
.name = "davinci_nand", /* Name of driver */
.id = 0,
.dev = {
.platform_data = &davinci_sffsdr_nandflash_data,
},
.num_resources = ARRAY_SIZE(davinci_sffsdr_nandflash_resource),
.resource = davinci_sffsdr_nandflash_resource,
};
static struct at24_platform_data eeprom_info = {
.byte_len = (64*1024) / 8,
.page_size = 32,
.flags = AT24_FLAG_ADDR16,
};
static struct i2c_board_info __initdata i2c_info[] = {
{
I2C_BOARD_INFO("24lc64", 0x50),
.platform_data = &eeprom_info,
},
/* Other I2C devices:
* MSP430, addr 0x23 (not used)
* PCA9543, addr 0x70 (setup done by U-Boot)
* ADS7828, addr 0x48 (ADC for voltage monitoring.)
*/
};
static struct davinci_i2c_platform_data i2c_pdata = {
.bus_freq = 20 /* kHz */,
.bus_delay = 100 /* usec */,
};
static void __init sffsdr_init_i2c(void)
{
davinci_init_i2c(&i2c_pdata);
i2c_register_board_info(1, i2c_info, ARRAY_SIZE(i2c_info));
}
static struct platform_device *davinci_sffsdr_devices[] __initdata = {
&davinci_sffsdr_nandflash_device,
};
static struct davinci_uart_config uart_config __initdata = {
.enabled_uarts = (1 << 0),
};
static void __init davinci_sffsdr_map_io(void)
{
dm644x_init();
}
static __init void davinci_sffsdr_init(void)
{
struct davinci_soc_info *soc_info = &davinci_soc_info;
platform_add_devices(davinci_sffsdr_devices,
ARRAY_SIZE(davinci_sffsdr_devices));
sffsdr_init_i2c();
davinci_serial_init(&uart_config);
soc_info->emac_pdata->phy_id = SFFSDR_PHY_ID;
davinci_setup_usb(0, 0); /* We support only peripheral mode. */
/* mux VLYNQ pins */
davinci_cfg_reg(DM644X_VLYNQEN);
davinci_cfg_reg(DM644X_VLYNQWD);
}
MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
/* Maintainer: Hugo Villeneuve hugo.villeneuve@lyrtech.com */
.boot_params = (DAVINCI_DDR_BASE + 0x100),
.map_io = davinci_sffsdr_map_io,
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = davinci_sffsdr_init,
MACHINE_END
| gpl-2.0 |
CyanogenMod/android_kernel_samsung_lt03lte | drivers/misc/atmel_pwm.c | 5801 | 9378 | #include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/atmel_pwm.h>
/*
* This is a simple driver for the PWM controller found in various newer
* Atmel SOCs, including the AVR32 series and the AT91sam9263.
*
* Chips with current Linux ports have only 4 PWM channels, out of max 32.
* AT32UC3A and AT32UC3B chips have 7 channels (but currently no Linux).
* Docs are inconsistent about the width of the channel counter registers;
* it's at least 16 bits, but several places say 20 bits.
*/
#define PWM_NCHAN 4 /* max 32 */
struct pwm {
spinlock_t lock;
struct platform_device *pdev;
u32 mask;
int irq;
void __iomem *base;
struct clk *clk;
struct pwm_channel *channel[PWM_NCHAN];
void (*handler[PWM_NCHAN])(struct pwm_channel *);
};
/* global PWM controller registers */
#define PWM_MR 0x00
#define PWM_ENA 0x04
#define PWM_DIS 0x08
#define PWM_SR 0x0c
#define PWM_IER 0x10
#define PWM_IDR 0x14
#define PWM_IMR 0x18
#define PWM_ISR 0x1c
static inline void pwm_writel(const struct pwm *p, unsigned offset, u32 val)
{
__raw_writel(val, p->base + offset);
}
static inline u32 pwm_readl(const struct pwm *p, unsigned offset)
{
return __raw_readl(p->base + offset);
}
static inline void __iomem *pwmc_regs(const struct pwm *p, int index)
{
return p->base + 0x200 + index * 0x20;
}
static struct pwm *pwm;
static void pwm_dumpregs(struct pwm_channel *ch, char *tag)
{
struct device *dev = &pwm->pdev->dev;
dev_dbg(dev, "%s: mr %08x, sr %08x, imr %08x\n",
tag,
pwm_readl(pwm, PWM_MR),
pwm_readl(pwm, PWM_SR),
pwm_readl(pwm, PWM_IMR));
dev_dbg(dev,
"pwm ch%d - mr %08x, dty %u, prd %u, cnt %u\n",
ch->index,
pwm_channel_readl(ch, PWM_CMR),
pwm_channel_readl(ch, PWM_CDTY),
pwm_channel_readl(ch, PWM_CPRD),
pwm_channel_readl(ch, PWM_CCNT));
}
/**
* pwm_channel_alloc - allocate an unused PWM channel
* @index: identifies the channel
* @ch: structure to be initialized
*
* Drivers allocate PWM channels according to the board's wiring, and
* matching board-specific setup code. Returns zero or negative errno.
*/
int pwm_channel_alloc(int index, struct pwm_channel *ch)
{
unsigned long flags;
int status = 0;
/* insist on PWM init, with this signal pinned out */
if (!pwm || !(pwm->mask & 1 << index))
return -ENODEV;
if (index < 0 || index >= PWM_NCHAN || !ch)
return -EINVAL;
memset(ch, 0, sizeof *ch);
spin_lock_irqsave(&pwm->lock, flags);
if (pwm->channel[index])
status = -EBUSY;
else {
clk_enable(pwm->clk);
ch->regs = pwmc_regs(pwm, index);
ch->index = index;
/* REVISIT: ap7000 seems to go 2x as fast as we expect!! */
ch->mck = clk_get_rate(pwm->clk);
pwm->channel[index] = ch;
pwm->handler[index] = NULL;
/* channel and irq are always disabled when we return */
pwm_writel(pwm, PWM_DIS, 1 << index);
pwm_writel(pwm, PWM_IDR, 1 << index);
}
spin_unlock_irqrestore(&pwm->lock, flags);
return status;
}
EXPORT_SYMBOL(pwm_channel_alloc);
static int pwmcheck(struct pwm_channel *ch)
{
int index;
if (!pwm)
return -ENODEV;
if (!ch)
return -EINVAL;
index = ch->index;
if (index < 0 || index >= PWM_NCHAN || pwm->channel[index] != ch)
return -EINVAL;
return index;
}
/**
* pwm_channel_free - release a previously allocated channel
* @ch: the channel being released
*
* The channel is completely shut down (counter and IRQ disabled),
* and made available for re-use. Returns zero, or negative errno.
*/
int pwm_channel_free(struct pwm_channel *ch)
{
unsigned long flags;
int t;
spin_lock_irqsave(&pwm->lock, flags);
t = pwmcheck(ch);
if (t >= 0) {
pwm->channel[t] = NULL;
pwm->handler[t] = NULL;
/* channel and irq are always disabled when we return */
pwm_writel(pwm, PWM_DIS, 1 << t);
pwm_writel(pwm, PWM_IDR, 1 << t);
clk_disable(pwm->clk);
t = 0;
}
spin_unlock_irqrestore(&pwm->lock, flags);
return t;
}
EXPORT_SYMBOL(pwm_channel_free);
int __pwm_channel_onoff(struct pwm_channel *ch, int enabled)
{
unsigned long flags;
int t;
/* OMITTED FUNCTIONALITY: starting several channels in synch */
spin_lock_irqsave(&pwm->lock, flags);
t = pwmcheck(ch);
if (t >= 0) {
pwm_writel(pwm, enabled ? PWM_ENA : PWM_DIS, 1 << t);
t = 0;
pwm_dumpregs(ch, enabled ? "enable" : "disable");
}
spin_unlock_irqrestore(&pwm->lock, flags);
return t;
}
EXPORT_SYMBOL(__pwm_channel_onoff);
/**
* pwm_clk_alloc - allocate and configure CLKA or CLKB
* @prescale: from 0..10, the power of two used to divide MCK
* @div: from 1..255, the linear divisor to use
*
* Returns PWM_CPR_CLKA, PWM_CPR_CLKB, or negative errno. The allocated
* clock will run with a period of (2^prescale * div) / MCK, or twice as
* long if center aligned PWM output is used. The clock must later be
* deconfigured using pwm_clk_free().
*/
int pwm_clk_alloc(unsigned prescale, unsigned div)
{
unsigned long flags;
u32 mr;
u32 val = (prescale << 8) | div;
int ret = -EBUSY;
if (prescale >= 10 || div == 0 || div > 255)
return -EINVAL;
spin_lock_irqsave(&pwm->lock, flags);
mr = pwm_readl(pwm, PWM_MR);
if ((mr & 0xffff) == 0) {
mr |= val;
ret = PWM_CPR_CLKA;
} else if ((mr & (0xffff << 16)) == 0) {
mr |= val << 16;
ret = PWM_CPR_CLKB;
}
if (ret > 0)
pwm_writel(pwm, PWM_MR, mr);
spin_unlock_irqrestore(&pwm->lock, flags);
return ret;
}
EXPORT_SYMBOL(pwm_clk_alloc);
/**
* pwm_clk_free - deconfigure and release CLKA or CLKB
*
* Reverses the effect of pwm_clk_alloc().
*/
void pwm_clk_free(unsigned clk)
{
unsigned long flags;
u32 mr;
spin_lock_irqsave(&pwm->lock, flags);
mr = pwm_readl(pwm, PWM_MR);
if (clk == PWM_CPR_CLKA)
pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 0));
if (clk == PWM_CPR_CLKB)
pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 16));
spin_unlock_irqrestore(&pwm->lock, flags);
}
EXPORT_SYMBOL(pwm_clk_free);
/**
* pwm_channel_handler - manage channel's IRQ handler
* @ch: the channel
* @handler: the handler to use, possibly NULL
*
* If the handler is non-null, the handler will be called after every
* period of this PWM channel. If the handler is null, this channel
* won't generate an IRQ.
*/
int pwm_channel_handler(struct pwm_channel *ch,
void (*handler)(struct pwm_channel *ch))
{
unsigned long flags;
int t;
spin_lock_irqsave(&pwm->lock, flags);
t = pwmcheck(ch);
if (t >= 0) {
pwm->handler[t] = handler;
pwm_writel(pwm, handler ? PWM_IER : PWM_IDR, 1 << t);
t = 0;
}
spin_unlock_irqrestore(&pwm->lock, flags);
return t;
}
EXPORT_SYMBOL(pwm_channel_handler);
static irqreturn_t pwm_irq(int id, void *_pwm)
{
struct pwm *p = _pwm;
irqreturn_t handled = IRQ_NONE;
u32 irqstat;
int index;
spin_lock(&p->lock);
/* ack irqs, then handle them */
irqstat = pwm_readl(pwm, PWM_ISR);
while (irqstat) {
struct pwm_channel *ch;
void (*handler)(struct pwm_channel *ch);
index = ffs(irqstat) - 1;
irqstat &= ~(1 << index);
ch = pwm->channel[index];
handler = pwm->handler[index];
if (handler && ch) {
spin_unlock(&p->lock);
handler(ch);
spin_lock(&p->lock);
handled = IRQ_HANDLED;
}
}
spin_unlock(&p->lock);
return handled;
}
static int __init pwm_probe(struct platform_device *pdev)
{
struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int irq = platform_get_irq(pdev, 0);
u32 *mp = pdev->dev.platform_data;
struct pwm *p;
int status = -EIO;
if (pwm)
return -EBUSY;
if (!r || irq < 0 || !mp || !*mp)
return -ENODEV;
if (*mp & ~((1<<PWM_NCHAN)-1)) {
dev_warn(&pdev->dev, "mask 0x%x ... more than %d channels\n",
*mp, PWM_NCHAN);
return -EINVAL;
}
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
spin_lock_init(&p->lock);
p->pdev = pdev;
p->mask = *mp;
p->irq = irq;
p->base = ioremap(r->start, resource_size(r));
if (!p->base)
goto fail;
p->clk = clk_get(&pdev->dev, "pwm_clk");
if (IS_ERR(p->clk)) {
status = PTR_ERR(p->clk);
p->clk = NULL;
goto fail;
}
status = request_irq(irq, pwm_irq, 0, pdev->name, p);
if (status < 0)
goto fail;
pwm = p;
platform_set_drvdata(pdev, p);
return 0;
fail:
if (p->clk)
clk_put(p->clk);
if (p->base)
iounmap(p->base);
kfree(p);
return status;
}
static int __exit pwm_remove(struct platform_device *pdev)
{
struct pwm *p = platform_get_drvdata(pdev);
if (p != pwm)
return -EINVAL;
clk_enable(pwm->clk);
pwm_writel(pwm, PWM_DIS, (1 << PWM_NCHAN) - 1);
pwm_writel(pwm, PWM_IDR, (1 << PWM_NCHAN) - 1);
clk_disable(pwm->clk);
pwm = NULL;
free_irq(p->irq, p);
clk_put(p->clk);
iounmap(p->base);
kfree(p);
return 0;
}
static struct platform_driver atmel_pwm_driver = {
.driver = {
.name = "atmel_pwm",
.owner = THIS_MODULE,
},
.remove = __exit_p(pwm_remove),
/* NOTE: PWM can keep running in AVR32 "idle" and "frozen" states;
* and all AT91sam9263 states, albeit at reduced clock rate if
* MCK becomes the slow clock (i.e. what Linux labels STR).
*/
};
static int __init pwm_init(void)
{
return platform_driver_probe(&atmel_pwm_driver, pwm_probe);
}
module_init(pwm_init);
static void __exit pwm_exit(void)
{
platform_driver_unregister(&atmel_pwm_driver);
}
module_exit(pwm_exit);
MODULE_DESCRIPTION("Driver for AT32/AT91 PWM module");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:atmel_pwm");
| gpl-2.0 |
MikeUYSCUTI/surnia_msm8916 | arch/h8300/kernel/irq.c | 6825 | 3637 | /*
* linux/arch/h8300/kernel/irq.c
*
* Copyright 2007 Yoshinori Sato <ysato@users.sourceforge.jp>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <asm/traps.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/errno.h>
/*#define DEBUG*/
extern unsigned long *interrupt_redirect_table;
extern const int h8300_saved_vectors[];
extern const h8300_vector h8300_trap_table[];
int h8300_enable_irq_pin(unsigned int irq);
void h8300_disable_irq_pin(unsigned int irq);
#define CPU_VECTOR ((unsigned long *)0x000000)
#define ADDR_MASK (0xffffff)
static inline int is_ext_irq(unsigned int irq)
{
return (irq >= EXT_IRQ0 && irq <= (EXT_IRQ0 + EXT_IRQS));
}
static void h8300_enable_irq(struct irq_data *data)
{
if (is_ext_irq(data->irq))
IER_REGS |= 1 << (data->irq - EXT_IRQ0);
}
static void h8300_disable_irq(struct irq_data *data)
{
if (is_ext_irq(data->irq))
IER_REGS &= ~(1 << (data->irq - EXT_IRQ0));
}
static unsigned int h8300_startup_irq(struct irq_data *data)
{
if (is_ext_irq(data->irq))
return h8300_enable_irq_pin(data->irq);
else
return 0;
}
static void h8300_shutdown_irq(struct irq_data *data)
{
if (is_ext_irq(data->irq))
h8300_disable_irq_pin(data->irq);
}
/*
* h8300 interrupt controller implementation
*/
struct irq_chip h8300irq_chip = {
.name = "H8300-INTC",
.irq_startup = h8300_startup_irq,
.irq_shutdown = h8300_shutdown_irq,
.irq_enable = h8300_enable_irq,
.irq_disable = h8300_disable_irq,
};
#if defined(CONFIG_RAMKERNEL)
static unsigned long __init *get_vector_address(void)
{
unsigned long *rom_vector = CPU_VECTOR;
unsigned long base,tmp;
int vec_no;
base = rom_vector[EXT_IRQ0] & ADDR_MASK;
/* check romvector format */
for (vec_no = EXT_IRQ1; vec_no <= EXT_IRQ0+EXT_IRQS; vec_no++) {
if ((base+(vec_no - EXT_IRQ0)*4) != (rom_vector[vec_no] & ADDR_MASK))
return NULL;
}
/* ramvector base address */
base -= EXT_IRQ0*4;
/* writerble check */
tmp = ~(*(volatile unsigned long *)base);
(*(volatile unsigned long *)base) = tmp;
if ((*(volatile unsigned long *)base) != tmp)
return NULL;
return (unsigned long *)base;
}
static void __init setup_vector(void)
{
int i;
unsigned long *ramvec,*ramvec_p;
const h8300_vector *trap_entry;
const int *saved_vector;
ramvec = get_vector_address();
if (ramvec == NULL)
panic("interrupt vector serup failed.");
else
printk(KERN_INFO "virtual vector at 0x%08lx\n",(unsigned long)ramvec);
/* create redirect table */
ramvec_p = ramvec;
trap_entry = h8300_trap_table;
saved_vector = h8300_saved_vectors;
for ( i = 0; i < NR_IRQS; i++) {
if (i == *saved_vector) {
ramvec_p++;
saved_vector++;
} else {
if ( i < NR_TRAPS ) {
if (*trap_entry)
*ramvec_p = VECTOR(*trap_entry);
ramvec_p++;
trap_entry++;
} else
*ramvec_p++ = REDIRECT(interrupt_entry);
}
}
interrupt_redirect_table = ramvec;
#ifdef DEBUG
ramvec_p = ramvec;
for (i = 0; i < NR_IRQS; i++) {
if ((i % 8) == 0)
printk(KERN_DEBUG "\n%p: ",ramvec_p);
printk(KERN_DEBUG "%p ",*ramvec_p);
ramvec_p++;
}
printk(KERN_DEBUG "\n");
#endif
}
#else
#define setup_vector() do { } while(0)
#endif
void __init init_IRQ(void)
{
int c;
setup_vector();
for (c = 0; c < NR_IRQS; c++)
irq_set_chip_and_handler(c, &h8300irq_chip, handle_simple_irq);
}
asmlinkage void do_IRQ(int irq)
{
irq_enter();
generic_handle_irq(irq);
irq_exit();
}
| gpl-2.0 |
DooMLoRD/android_kernel_sony_msm8974 | arch/powerpc/oprofile/op_model_pa6t.c | 11689 | 5937 | /*
* Copyright (C) 2006-2007 PA Semi, Inc
*
* Author: Shashi Rao, PA Semi
*
* Maintained by: Olof Johansson <olof@lixom.net>
*
* Based on arch/powerpc/oprofile/op_model_power4.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/oprofile.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/oprofile_impl.h>
#include <asm/reg.h>
static unsigned char oprofile_running;
/* mmcr values are set in pa6t_reg_setup, used in pa6t_cpu_setup */
static u64 mmcr0_val;
static u64 mmcr1_val;
/* inited in pa6t_reg_setup */
static u64 reset_value[OP_MAX_COUNTER];
static inline u64 ctr_read(unsigned int i)
{
switch (i) {
case 0:
return mfspr(SPRN_PA6T_PMC0);
case 1:
return mfspr(SPRN_PA6T_PMC1);
case 2:
return mfspr(SPRN_PA6T_PMC2);
case 3:
return mfspr(SPRN_PA6T_PMC3);
case 4:
return mfspr(SPRN_PA6T_PMC4);
case 5:
return mfspr(SPRN_PA6T_PMC5);
default:
printk(KERN_ERR "ctr_read called with bad arg %u\n", i);
return 0;
}
}
static inline void ctr_write(unsigned int i, u64 val)
{
switch (i) {
case 0:
mtspr(SPRN_PA6T_PMC0, val);
break;
case 1:
mtspr(SPRN_PA6T_PMC1, val);
break;
case 2:
mtspr(SPRN_PA6T_PMC2, val);
break;
case 3:
mtspr(SPRN_PA6T_PMC3, val);
break;
case 4:
mtspr(SPRN_PA6T_PMC4, val);
break;
case 5:
mtspr(SPRN_PA6T_PMC5, val);
break;
default:
printk(KERN_ERR "ctr_write called with bad arg %u\n", i);
break;
}
}
/* precompute the values to stuff in the hardware registers */
static int pa6t_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
int num_ctrs)
{
int pmc;
/*
* adjust the mmcr0.en[0-5] and mmcr0.inten[0-5] values obtained from the
* event_mappings file by turning off the counters that the user doesn't
* care about
*
* setup user and kernel profiling
*/
for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++)
if (!ctr[pmc].enabled) {
sys->mmcr0 &= ~(0x1UL << pmc);
sys->mmcr0 &= ~(0x1UL << (pmc+12));
pr_debug("turned off counter %u\n", pmc);
}
if (sys->enable_kernel)
sys->mmcr0 |= PA6T_MMCR0_SUPEN | PA6T_MMCR0_HYPEN;
else
sys->mmcr0 &= ~(PA6T_MMCR0_SUPEN | PA6T_MMCR0_HYPEN);
if (sys->enable_user)
sys->mmcr0 |= PA6T_MMCR0_PREN;
else
sys->mmcr0 &= ~PA6T_MMCR0_PREN;
/*
* The performance counter event settings are given in the mmcr0 and
* mmcr1 values passed from the user in the op_system_config
* structure (sys variable).
*/
mmcr0_val = sys->mmcr0;
mmcr1_val = sys->mmcr1;
pr_debug("mmcr0_val inited to %016lx\n", sys->mmcr0);
pr_debug("mmcr1_val inited to %016lx\n", sys->mmcr1);
for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++) {
/* counters are 40 bit. Move to cputable at some point? */
reset_value[pmc] = (0x1UL << 39) - ctr[pmc].count;
pr_debug("reset_value for pmc%u inited to 0x%llx\n",
pmc, reset_value[pmc]);
}
return 0;
}
/* configure registers on this cpu */
static int pa6t_cpu_setup(struct op_counter_config *ctr)
{
u64 mmcr0 = mmcr0_val;
u64 mmcr1 = mmcr1_val;
/* Default is all PMCs off */
mmcr0 &= ~(0x3FUL);
mtspr(SPRN_PA6T_MMCR0, mmcr0);
/* program selected programmable events in */
mtspr(SPRN_PA6T_MMCR1, mmcr1);
pr_debug("setup on cpu %d, mmcr0 %016lx\n", smp_processor_id(),
mfspr(SPRN_PA6T_MMCR0));
pr_debug("setup on cpu %d, mmcr1 %016lx\n", smp_processor_id(),
mfspr(SPRN_PA6T_MMCR1));
return 0;
}
static int pa6t_start(struct op_counter_config *ctr)
{
int i;
/* Hold off event counting until rfid */
u64 mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS;
for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
if (ctr[i].enabled)
ctr_write(i, reset_value[i]);
else
ctr_write(i, 0UL);
mtspr(SPRN_PA6T_MMCR0, mmcr0);
oprofile_running = 1;
pr_debug("start on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
return 0;
}
static void pa6t_stop(void)
{
u64 mmcr0;
/* freeze counters */
mmcr0 = mfspr(SPRN_PA6T_MMCR0);
mmcr0 |= PA6T_MMCR0_FCM0;
mtspr(SPRN_PA6T_MMCR0, mmcr0);
oprofile_running = 0;
pr_debug("stop on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
}
/* handle the perfmon overflow vector */
static void pa6t_handle_interrupt(struct pt_regs *regs,
struct op_counter_config *ctr)
{
unsigned long pc = mfspr(SPRN_PA6T_SIAR);
int is_kernel = is_kernel_addr(pc);
u64 val;
int i;
u64 mmcr0;
/* disable perfmon counting until rfid */
mmcr0 = mfspr(SPRN_PA6T_MMCR0);
mtspr(SPRN_PA6T_MMCR0, mmcr0 | PA6T_MMCR0_HANDDIS);
/* Record samples. We've got one global bit for whether a sample
* was taken, so add it for any counter that triggered overflow.
*/
for (i = 0; i < cur_cpu_spec->num_pmcs; i++) {
val = ctr_read(i);
if (val & (0x1UL << 39)) { /* Overflow bit set */
if (oprofile_running && ctr[i].enabled) {
if (mmcr0 & PA6T_MMCR0_SIARLOG)
oprofile_add_ext_sample(pc, regs, i, is_kernel);
ctr_write(i, reset_value[i]);
} else {
ctr_write(i, 0UL);
}
}
}
/* Restore mmcr0 to a good known value since the PMI changes it */
mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS;
mtspr(SPRN_PA6T_MMCR0, mmcr0);
}
struct op_powerpc_model op_model_pa6t = {
.reg_setup = pa6t_reg_setup,
.cpu_setup = pa6t_cpu_setup,
.start = pa6t_start,
.stop = pa6t_stop,
.handle_interrupt = pa6t_handle_interrupt,
};
| gpl-2.0 |
jmztaylor/android_kernel_htc_primoc_backup | arch/sparc/prom/memory.c | 12201 | 1937 | /* memory.c: Prom routine for acquiring various bits of information
* about RAM on the machine, both virtual and physical.
*
* Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997 Michael A. Griffith (grif@acm.org)
*/
#include <linux/kernel.h>
#include <linux/sort.h>
#include <linux/init.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/page.h>
static int __init prom_meminit_v0(void)
{
struct linux_mlist_v0 *p;
int index;
index = 0;
for (p = *(romvec->pv_v0mem.v0_available); p; p = p->theres_more) {
sp_banks[index].base_addr = (unsigned long) p->start_adr;
sp_banks[index].num_bytes = p->num_bytes;
index++;
}
return index;
}
static int __init prom_meminit_v2(void)
{
struct linux_prom_registers reg[64];
phandle node;
int size, num_ents, i;
node = prom_searchsiblings(prom_getchild(prom_root_node), "memory");
size = prom_getproperty(node, "available", (char *) reg, sizeof(reg));
num_ents = size / sizeof(struct linux_prom_registers);
for (i = 0; i < num_ents; i++) {
sp_banks[i].base_addr = reg[i].phys_addr;
sp_banks[i].num_bytes = reg[i].reg_size;
}
return num_ents;
}
static int sp_banks_cmp(const void *a, const void *b)
{
const struct sparc_phys_banks *x = a, *y = b;
if (x->base_addr > y->base_addr)
return 1;
if (x->base_addr < y->base_addr)
return -1;
return 0;
}
/* Initialize the memory lists based upon the prom version. */
void __init prom_meminit(void)
{
int i, num_ents = 0;
switch (prom_vers) {
case PROM_V0:
num_ents = prom_meminit_v0();
break;
case PROM_V2:
case PROM_V3:
num_ents = prom_meminit_v2();
break;
default:
break;
}
sort(sp_banks, num_ents, sizeof(struct sparc_phys_banks),
sp_banks_cmp, NULL);
/* Sentinel. */
sp_banks[num_ents].base_addr = 0xdeadbeef;
sp_banks[num_ents].num_bytes = 0;
for (i = 0; i < num_ents; i++)
sp_banks[i].num_bytes &= PAGE_MASK;
}
| gpl-2.0 |
NamelessRom/android_kernel_oppo_msm8939 | arch/sparc/prom/memory.c | 12201 | 1937 | /* memory.c: Prom routine for acquiring various bits of information
* about RAM on the machine, both virtual and physical.
*
* Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997 Michael A. Griffith (grif@acm.org)
*/
#include <linux/kernel.h>
#include <linux/sort.h>
#include <linux/init.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/page.h>
static int __init prom_meminit_v0(void)
{
struct linux_mlist_v0 *p;
int index;
index = 0;
for (p = *(romvec->pv_v0mem.v0_available); p; p = p->theres_more) {
sp_banks[index].base_addr = (unsigned long) p->start_adr;
sp_banks[index].num_bytes = p->num_bytes;
index++;
}
return index;
}
static int __init prom_meminit_v2(void)
{
struct linux_prom_registers reg[64];
phandle node;
int size, num_ents, i;
node = prom_searchsiblings(prom_getchild(prom_root_node), "memory");
size = prom_getproperty(node, "available", (char *) reg, sizeof(reg));
num_ents = size / sizeof(struct linux_prom_registers);
for (i = 0; i < num_ents; i++) {
sp_banks[i].base_addr = reg[i].phys_addr;
sp_banks[i].num_bytes = reg[i].reg_size;
}
return num_ents;
}
static int sp_banks_cmp(const void *a, const void *b)
{
const struct sparc_phys_banks *x = a, *y = b;
if (x->base_addr > y->base_addr)
return 1;
if (x->base_addr < y->base_addr)
return -1;
return 0;
}
/* Initialize the memory lists based upon the prom version. */
void __init prom_meminit(void)
{
int i, num_ents = 0;
switch (prom_vers) {
case PROM_V0:
num_ents = prom_meminit_v0();
break;
case PROM_V2:
case PROM_V3:
num_ents = prom_meminit_v2();
break;
default:
break;
}
sort(sp_banks, num_ents, sizeof(struct sparc_phys_banks),
sp_banks_cmp, NULL);
/* Sentinel. */
sp_banks[num_ents].base_addr = 0xdeadbeef;
sp_banks[num_ents].num_bytes = 0;
for (i = 0; i < num_ents; i++)
sp_banks[i].num_bytes &= PAGE_MASK;
}
| gpl-2.0 |
ayeric/android_kernel_motorola_ghost | drivers/ide/ide-sysfs.c | 12457 | 2835 | #include <linux/kernel.h>
#include <linux/ide.h>
char *ide_media_string(ide_drive_t *drive)
{
switch (drive->media) {
case ide_disk:
return "disk";
case ide_cdrom:
return "cdrom";
case ide_tape:
return "tape";
case ide_floppy:
return "floppy";
case ide_optical:
return "optical";
default:
return "UNKNOWN";
}
}
static ssize_t media_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", ide_media_string(drive));
}
static ssize_t drivename_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", drive->name);
}
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "ide:m-%s\n", ide_media_string(drive));
}
static ssize_t model_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_PROD]);
}
static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_FW_REV]);
}
static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_SERNO]);
}
struct device_attribute ide_dev_attrs[] = {
__ATTR_RO(media),
__ATTR_RO(drivename),
__ATTR_RO(modalias),
__ATTR_RO(model),
__ATTR_RO(firmware),
__ATTR(serial, 0400, serial_show, NULL),
__ATTR(unload_heads, 0644, ide_park_show, ide_park_store),
__ATTR_NULL
};
static ssize_t store_delete_devices(struct device *portdev,
struct device_attribute *attr,
const char *buf, size_t n)
{
ide_hwif_t *hwif = dev_get_drvdata(portdev);
if (strncmp(buf, "1", n))
return -EINVAL;
ide_port_unregister_devices(hwif);
return n;
};
static DEVICE_ATTR(delete_devices, S_IWUSR, NULL, store_delete_devices);
static ssize_t store_scan(struct device *portdev,
struct device_attribute *attr,
const char *buf, size_t n)
{
ide_hwif_t *hwif = dev_get_drvdata(portdev);
if (strncmp(buf, "1", n))
return -EINVAL;
ide_port_unregister_devices(hwif);
ide_port_scan(hwif);
return n;
};
static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
static struct device_attribute *ide_port_attrs[] = {
&dev_attr_delete_devices,
&dev_attr_scan,
NULL
};
int ide_sysfs_register_port(ide_hwif_t *hwif)
{
int i, uninitialized_var(rc);
for (i = 0; ide_port_attrs[i]; i++) {
rc = device_create_file(hwif->portdev, ide_port_attrs[i]);
if (rc)
break;
}
return rc;
}
| gpl-2.0 |
smipi1/linux-tinification | drivers/hwspinlock/hwspinlock_core.c | 170 | 17938 | /*
* Hardware spinlock framework
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
*
* Contact: Ohad Ben-Cohen <ohad@wizery.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/jiffies.h>
#include <linux/radix-tree.h>
#include <linux/hwspinlock.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include "hwspinlock_internal.h"
/* radix tree tags */
#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
/*
* A radix tree is used to maintain the available hwspinlock instances.
* The tree associates hwspinlock pointers with their integer key id,
* and provides easy-to-use API which makes the hwspinlock core code simple
* and easy to read.
*
* Radix trees are quick on lookups, and reasonably efficient in terms of
* storage, especially with high density usages such as this framework
* requires (a continuous range of integer keys, beginning with zero, is
* used as the ID's of the hwspinlock instances).
*
* The radix tree API supports tagging items in the tree, which this
* framework uses to mark unused hwspinlock instances (see the
* HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
* tree, looking for an unused hwspinlock instance, is now reduced to a
* single radix tree API call.
*/
static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
/*
* Synchronization of access to the tree is achieved using this mutex,
* as the radix-tree API requires that users provide all synchronisation.
* A mutex is needed because we're using non-atomic radix tree allocations.
*/
static DEFINE_MUTEX(hwspinlock_tree_lock);
/**
* __hwspin_trylock() - attempt to lock a specific hwspinlock
* @hwlock: an hwspinlock which we want to trylock
* @mode: controls whether local interrupts are disabled or not
* @flags: a pointer where the caller's interrupt state will be saved at (if
* requested)
*
* This function attempts to lock an hwspinlock, and will immediately
* fail if the hwspinlock is already taken.
*
* Upon a successful return from this function, preemption (and possibly
* interrupts) is disabled, so the caller must not sleep, and is advised to
* release the hwspinlock as soon as possible. This is required in order to
* minimize remote cores polling on the hardware interconnect.
*
* The user decides whether local interrupts are disabled or not, and if yes,
* whether he wants their previous state to be saved. It is up to the user
* to choose the appropriate @mode of operation, exactly the same way users
* should decide between spin_trylock, spin_trylock_irq and
* spin_trylock_irqsave.
*
* Returns 0 if we successfully locked the hwspinlock or -EBUSY if
* the hwspinlock was already taken.
* This function will never sleep.
*/
int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
int ret;
BUG_ON(!hwlock);
BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
/*
* This spin_lock{_irq, _irqsave} serves three purposes:
*
* 1. Disable preemption, in order to minimize the period of time
* in which the hwspinlock is taken. This is important in order
* to minimize the possible polling on the hardware interconnect
* by a remote user of this lock.
* 2. Make the hwspinlock SMP-safe (so we can take it from
* additional contexts on the local host).
* 3. Ensure that in_atomic/might_sleep checks catch potential
* problems with hwspinlock usage (e.g. scheduler checks like
* 'scheduling while atomic' etc.)
*/
if (mode == HWLOCK_IRQSTATE)
ret = spin_trylock_irqsave(&hwlock->lock, *flags);
else if (mode == HWLOCK_IRQ)
ret = spin_trylock_irq(&hwlock->lock);
else
ret = spin_trylock(&hwlock->lock);
/* is lock already taken by another context on the local cpu ? */
if (!ret)
return -EBUSY;
/* try to take the hwspinlock device */
ret = hwlock->bank->ops->trylock(hwlock);
/* if hwlock is already taken, undo spin_trylock_* and exit */
if (!ret) {
if (mode == HWLOCK_IRQSTATE)
spin_unlock_irqrestore(&hwlock->lock, *flags);
else if (mode == HWLOCK_IRQ)
spin_unlock_irq(&hwlock->lock);
else
spin_unlock(&hwlock->lock);
return -EBUSY;
}
/*
* We can be sure the other core's memory operations
* are observable to us only _after_ we successfully take
* the hwspinlock, and we must make sure that subsequent memory
* operations (both reads and writes) will not be reordered before
* we actually took the hwspinlock.
*
* Note: the implicit memory barrier of the spinlock above is too
* early, so we need this additional explicit memory barrier.
*/
mb();
return 0;
}
EXPORT_SYMBOL_GPL(__hwspin_trylock);
/**
* __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
* @hwlock: the hwspinlock to be locked
* @timeout: timeout value in msecs
* @mode: mode which controls whether local interrupts are disabled or not
* @flags: a pointer to where the caller's interrupt state will be saved at (if
* requested)
*
* This function locks the given @hwlock. If the @hwlock
* is already taken, the function will busy loop waiting for it to
* be released, but give up after @timeout msecs have elapsed.
*
* Upon a successful return from this function, preemption is disabled
* (and possibly local interrupts, too), so the caller must not sleep,
* and is advised to release the hwspinlock as soon as possible.
* This is required in order to minimize remote cores polling on the
* hardware interconnect.
*
* The user decides whether local interrupts are disabled or not, and if yes,
* whether he wants their previous state to be saved. It is up to the user
* to choose the appropriate @mode of operation, exactly the same way users
* should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
*
* Returns 0 when the @hwlock was successfully taken, and an appropriate
* error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
* busy after @timeout msecs). The function will never sleep.
*/
int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
int mode, unsigned long *flags)
{
int ret;
unsigned long expire;
expire = msecs_to_jiffies(to) + jiffies;
for (;;) {
/* Try to take the hwspinlock */
ret = __hwspin_trylock(hwlock, mode, flags);
if (ret != -EBUSY)
break;
/*
* The lock is already taken, let's check if the user wants
* us to try again
*/
if (time_is_before_eq_jiffies(expire))
return -ETIMEDOUT;
/*
* Allow platform-specific relax handlers to prevent
* hogging the interconnect (no sleeping, though)
*/
if (hwlock->bank->ops->relax)
hwlock->bank->ops->relax(hwlock);
}
return ret;
}
EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
/**
* __hwspin_unlock() - unlock a specific hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to unlock
* @mode: controls whether local interrupts needs to be restored or not
* @flags: previous caller's interrupt state to restore (if requested)
*
* This function will unlock a specific hwspinlock, enable preemption and
* (possibly) enable interrupts or restore their previous state.
* @hwlock must be already locked before calling this function: it is a bug
* to call unlock on a @hwlock that is already unlocked.
*
* The user decides whether local interrupts should be enabled or not, and
* if yes, whether he wants their previous state to be restored. It is up
* to the user to choose the appropriate @mode of operation, exactly the
* same way users decide between spin_unlock, spin_unlock_irq and
* spin_unlock_irqrestore.
*
* The function will never sleep.
*/
void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
BUG_ON(!hwlock);
BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
/*
* We must make sure that memory operations (both reads and writes),
* done before unlocking the hwspinlock, will not be reordered
* after the lock is released.
*
* That's the purpose of this explicit memory barrier.
*
* Note: the memory barrier induced by the spin_unlock below is too
* late; the other core is going to access memory soon after it will
* take the hwspinlock, and by then we want to be sure our memory
* operations are already observable.
*/
mb();
hwlock->bank->ops->unlock(hwlock);
/* Undo the spin_trylock{_irq, _irqsave} called while locking */
if (mode == HWLOCK_IRQSTATE)
spin_unlock_irqrestore(&hwlock->lock, *flags);
else if (mode == HWLOCK_IRQ)
spin_unlock_irq(&hwlock->lock);
else
spin_unlock(&hwlock->lock);
}
EXPORT_SYMBOL_GPL(__hwspin_unlock);
static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
{
struct hwspinlock *tmp;
int ret;
mutex_lock(&hwspinlock_tree_lock);
ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
if (ret) {
if (ret == -EEXIST)
pr_err("hwspinlock id %d already exists!\n", id);
goto out;
}
/* mark this hwspinlock as available */
tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
/* self-sanity check which should never fail */
WARN_ON(tmp != hwlock);
out:
mutex_unlock(&hwspinlock_tree_lock);
return 0;
}
static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
{
struct hwspinlock *hwlock = NULL;
int ret;
mutex_lock(&hwspinlock_tree_lock);
/* make sure the hwspinlock is not in use (tag is set) */
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
if (ret == 0) {
pr_err("hwspinlock %d still in use (or not present)\n", id);
goto out;
}
hwlock = radix_tree_delete(&hwspinlock_tree, id);
if (!hwlock) {
pr_err("failed to delete hwspinlock %d\n", id);
goto out;
}
out:
mutex_unlock(&hwspinlock_tree_lock);
return hwlock;
}
/**
* hwspin_lock_register() - register a new hw spinlock device
* @bank: the hwspinlock device, which usually provides numerous hw locks
* @dev: the backing device
* @ops: hwspinlock handlers for this device
* @base_id: id of the first hardware spinlock in this bank
* @num_locks: number of hwspinlocks provided by this device
*
* This function should be called from the underlying platform-specific
* implementation, to register a new hwspinlock device instance.
*
* Should be called from a process context (might sleep)
*
* Returns 0 on success, or an appropriate error code on failure
*/
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
const struct hwspinlock_ops *ops, int base_id, int num_locks)
{
struct hwspinlock *hwlock;
int ret = 0, i;
if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
!ops->unlock) {
pr_err("invalid parameters\n");
return -EINVAL;
}
bank->dev = dev;
bank->ops = ops;
bank->base_id = base_id;
bank->num_locks = num_locks;
for (i = 0; i < num_locks; i++) {
hwlock = &bank->lock[i];
spin_lock_init(&hwlock->lock);
hwlock->bank = bank;
ret = hwspin_lock_register_single(hwlock, base_id + i);
if (ret)
goto reg_failed;
}
return 0;
reg_failed:
while (--i >= 0)
hwspin_lock_unregister_single(base_id + i);
return ret;
}
EXPORT_SYMBOL_GPL(hwspin_lock_register);
/**
* hwspin_lock_unregister() - unregister an hw spinlock device
* @bank: the hwspinlock device, which usually provides numerous hw locks
*
* This function should be called from the underlying platform-specific
* implementation, to unregister an existing (and unused) hwspinlock.
*
* Should be called from a process context (might sleep)
*
* Returns 0 on success, or an appropriate error code on failure
*/
int hwspin_lock_unregister(struct hwspinlock_device *bank)
{
struct hwspinlock *hwlock, *tmp;
int i;
for (i = 0; i < bank->num_locks; i++) {
hwlock = &bank->lock[i];
tmp = hwspin_lock_unregister_single(bank->base_id + i);
if (!tmp)
return -EBUSY;
/* self-sanity check that should never fail */
WARN_ON(tmp != hwlock);
}
return 0;
}
EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
/**
* __hwspin_lock_request() - tag an hwspinlock as used and power it up
*
* This is an internal function that prepares an hwspinlock instance
* before it is given to the user. The function assumes that
* hwspinlock_tree_lock is taken.
*
* Returns 0 or positive to indicate success, and a negative value to
* indicate an error (with the appropriate error code)
*/
static int __hwspin_lock_request(struct hwspinlock *hwlock)
{
struct device *dev = hwlock->bank->dev;
struct hwspinlock *tmp;
int ret;
/* prevent underlying implementation from being removed */
if (!try_module_get(dev->driver->owner)) {
dev_err(dev, "%s: can't get owner\n", __func__);
return -EINVAL;
}
/* notify PM core that power is now needed */
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "%s: can't power on device\n", __func__);
return ret;
}
/* mark hwspinlock as used, should not fail */
tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
HWSPINLOCK_UNUSED);
/* self-sanity check that should never fail */
WARN_ON(tmp != hwlock);
return ret;
}
/**
* hwspin_lock_get_id() - retrieve id number of a given hwspinlock
* @hwlock: a valid hwspinlock instance
*
* Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
*/
int hwspin_lock_get_id(struct hwspinlock *hwlock)
{
if (!hwlock) {
pr_err("invalid hwlock\n");
return -EINVAL;
}
return hwlock_to_id(hwlock);
}
EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
/**
* hwspin_lock_request() - request an hwspinlock
*
* This function should be called by users of the hwspinlock device,
* in order to dynamically assign them an unused hwspinlock.
* Usually the user of this lock will then have to communicate the lock's id
* to the remote core before it can be used for synchronization (to get the
* id of a given hwlock, use hwspin_lock_get_id()).
*
* Should be called from a process context (might sleep)
*
* Returns the address of the assigned hwspinlock, or NULL on error
*/
struct hwspinlock *hwspin_lock_request(void)
{
struct hwspinlock *hwlock;
int ret;
mutex_lock(&hwspinlock_tree_lock);
/* look for an unused lock */
ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
0, 1, HWSPINLOCK_UNUSED);
if (ret == 0) {
pr_warn("a free hwspinlock is not available\n");
hwlock = NULL;
goto out;
}
/* sanity check that should never fail */
WARN_ON(ret > 1);
/* mark as used and power up */
ret = __hwspin_lock_request(hwlock);
if (ret < 0)
hwlock = NULL;
out:
mutex_unlock(&hwspinlock_tree_lock);
return hwlock;
}
EXPORT_SYMBOL_GPL(hwspin_lock_request);
/**
* hwspin_lock_request_specific() - request for a specific hwspinlock
* @id: index of the specific hwspinlock that is requested
*
* This function should be called by users of the hwspinlock module,
* in order to assign them a specific hwspinlock.
* Usually early board code will be calling this function in order to
* reserve specific hwspinlock ids for predefined purposes.
*
* Should be called from a process context (might sleep)
*
* Returns the address of the assigned hwspinlock, or NULL on error
*/
struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
{
struct hwspinlock *hwlock;
int ret;
mutex_lock(&hwspinlock_tree_lock);
/* make sure this hwspinlock exists */
hwlock = radix_tree_lookup(&hwspinlock_tree, id);
if (!hwlock) {
pr_warn("hwspinlock %u does not exist\n", id);
goto out;
}
/* sanity check (this shouldn't happen) */
WARN_ON(hwlock_to_id(hwlock) != id);
/* make sure this hwspinlock is unused */
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
if (ret == 0) {
pr_warn("hwspinlock %u is already in use\n", id);
hwlock = NULL;
goto out;
}
/* mark as used and power up */
ret = __hwspin_lock_request(hwlock);
if (ret < 0)
hwlock = NULL;
out:
mutex_unlock(&hwspinlock_tree_lock);
return hwlock;
}
EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
/**
* hwspin_lock_free() - free a specific hwspinlock
* @hwlock: the specific hwspinlock to free
*
* This function mark @hwlock as free again.
* Should only be called with an @hwlock that was retrieved from
* an earlier call to omap_hwspin_lock_request{_specific}.
*
* Should be called from a process context (might sleep)
*
* Returns 0 on success, or an appropriate error code on failure
*/
int hwspin_lock_free(struct hwspinlock *hwlock)
{
struct device *dev;
struct hwspinlock *tmp;
int ret;
if (!hwlock) {
pr_err("invalid hwlock\n");
return -EINVAL;
}
dev = hwlock->bank->dev;
mutex_lock(&hwspinlock_tree_lock);
/* make sure the hwspinlock is used */
ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
HWSPINLOCK_UNUSED);
if (ret == 1) {
dev_err(dev, "%s: hwlock is already free\n", __func__);
dump_stack();
ret = -EINVAL;
goto out;
}
/* notify the underlying device that power is not needed */
ret = pm_runtime_put(dev);
if (ret < 0)
goto out;
/* mark this hwspinlock as available */
tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
HWSPINLOCK_UNUSED);
/* sanity check (this shouldn't happen) */
WARN_ON(tmp != hwlock);
module_put(dev->driver->owner);
out:
mutex_unlock(&hwspinlock_tree_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hwspin_lock_free);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Hardware spinlock interface");
MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
| gpl-2.0 |
coredumb/linux-grsecurity | net/sched/cls_tcindex.c | 170 | 12326 | /*
* net/sched/cls_tcindex.c Packet classifier for skb->tc_index
*
* Written 1998,1999 by Werner Almesberger, EPFL ICA
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <net/act_api.h>
#include <net/netlink.h>
#include <net/pkt_cls.h>
/*
* Passing parameters to the root seems to be done more awkwardly than really
* necessary. At least, u32 doesn't seem to use such dirty hacks. To be
* verified. FIXME.
*/
#define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
struct tcindex_filter_result {
struct tcf_exts exts;
struct tcf_result res;
};
struct tcindex_filter {
u16 key;
struct tcindex_filter_result result;
struct tcindex_filter *next;
};
struct tcindex_data {
struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
struct tcindex_filter **h; /* imperfect hash; only used if !perfect;
NULL if unused */
u16 mask; /* AND key with mask */
int shift; /* shift ANDed key to the right */
int hash; /* hash table size; 0 if undefined */
int alloc_hash; /* allocated size */
int fall_through; /* 0: only classify if explicit match */
};
static inline int
tcindex_filter_is_set(struct tcindex_filter_result *r)
{
return tcf_exts_is_predicative(&r->exts) || r->res.classid;
}
static struct tcindex_filter_result *
tcindex_lookup(struct tcindex_data *p, u16 key)
{
struct tcindex_filter *f;
if (p->perfect)
return tcindex_filter_is_set(p->perfect + key) ?
p->perfect + key : NULL;
else if (p->h) {
for (f = p->h[key % p->hash]; f; f = f->next)
if (f->key == key)
return &f->result;
}
return NULL;
}
static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
struct tcindex_data *p = tp->root;
struct tcindex_filter_result *f;
int key = (skb->tc_index & p->mask) >> p->shift;
pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
skb, tp, res, p);
f = tcindex_lookup(p, key);
if (!f) {
if (!p->fall_through)
return -1;
res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key);
res->class = 0;
pr_debug("alg 0x%x\n", res->classid);
return 0;
}
*res = f->res;
pr_debug("map 0x%x\n", res->classid);
return tcf_exts_exec(skb, &f->exts, res);
}
static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
{
struct tcindex_data *p = tp->root;
struct tcindex_filter_result *r;
pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
if (p->perfect && handle >= p->alloc_hash)
return 0;
r = tcindex_lookup(p, handle);
return r && tcindex_filter_is_set(r) ? (unsigned long) r : 0UL;
}
static void tcindex_put(struct tcf_proto *tp, unsigned long f)
{
pr_debug("tcindex_put(tp %p,f 0x%lx)\n", tp, f);
}
static int tcindex_init(struct tcf_proto *tp)
{
struct tcindex_data *p;
pr_debug("tcindex_init(tp %p)\n", tp);
p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->mask = 0xffff;
p->hash = DEFAULT_HASH_SIZE;
p->fall_through = 1;
tp->root = p;
return 0;
}
static int
__tcindex_delete(struct tcf_proto *tp, unsigned long arg, int lock)
{
struct tcindex_data *p = tp->root;
struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
struct tcindex_filter *f = NULL;
pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n", tp, arg, p, f);
if (p->perfect) {
if (!r->res.class)
return -ENOENT;
} else {
int i;
struct tcindex_filter **walk = NULL;
for (i = 0; i < p->hash; i++)
for (walk = p->h+i; *walk; walk = &(*walk)->next)
if (&(*walk)->result == r)
goto found;
return -ENOENT;
found:
f = *walk;
if (lock)
tcf_tree_lock(tp);
*walk = f->next;
if (lock)
tcf_tree_unlock(tp);
}
tcf_unbind_filter(tp, &r->res);
tcf_exts_destroy(tp, &r->exts);
kfree(f);
return 0;
}
static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
{
return __tcindex_delete(tp, arg, 1);
}
static inline int
valid_perfect_hash(struct tcindex_data *p)
{
return p->hash > (p->mask >> p->shift);
}
static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
[TCA_TCINDEX_HASH] = { .type = NLA_U32 },
[TCA_TCINDEX_MASK] = { .type = NLA_U16 },
[TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
[TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
[TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
};
static void tcindex_filter_result_init(struct tcindex_filter_result *r)
{
memset(r, 0, sizeof(*r));
tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
}
static int
tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
u32 handle, struct tcindex_data *p,
struct tcindex_filter_result *r, struct nlattr **tb,
struct nlattr *est)
{
int err, balloc = 0;
struct tcindex_filter_result new_filter_result, *old_r = r;
struct tcindex_filter_result cr;
struct tcindex_data cp;
struct tcindex_filter *f = NULL; /* make gcc behave */
struct tcf_exts e;
tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
err = tcf_exts_validate(net, tp, tb, est, &e);
if (err < 0)
return err;
memcpy(&cp, p, sizeof(cp));
tcindex_filter_result_init(&new_filter_result);
tcindex_filter_result_init(&cr);
if (old_r)
cr.res = r->res;
if (tb[TCA_TCINDEX_HASH])
cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
if (tb[TCA_TCINDEX_MASK])
cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
if (tb[TCA_TCINDEX_SHIFT])
cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
err = -EBUSY;
/* Hash already allocated, make sure that we still meet the
* requirements for the allocated hash.
*/
if (cp.perfect) {
if (!valid_perfect_hash(&cp) ||
cp.hash > cp.alloc_hash)
goto errout;
} else if (cp.h && cp.hash != cp.alloc_hash)
goto errout;
err = -EINVAL;
if (tb[TCA_TCINDEX_FALL_THROUGH])
cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
if (!cp.hash) {
/* Hash not specified, use perfect hash if the upper limit
* of the hashing index is below the threshold.
*/
if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
cp.hash = (cp.mask >> cp.shift) + 1;
else
cp.hash = DEFAULT_HASH_SIZE;
}
if (!cp.perfect && !cp.h)
cp.alloc_hash = cp.hash;
/* Note: this could be as restrictive as if (handle & ~(mask >> shift))
* but then, we'd fail handles that may become valid after some future
* mask change. While this is extremely unlikely to ever matter,
* the check below is safer (and also more backwards-compatible).
*/
if (cp.perfect || valid_perfect_hash(&cp))
if (handle >= cp.alloc_hash)
goto errout;
err = -ENOMEM;
if (!cp.perfect && !cp.h) {
if (valid_perfect_hash(&cp)) {
int i;
cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
if (!cp.perfect)
goto errout;
for (i = 0; i < cp.hash; i++)
tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT,
TCA_TCINDEX_POLICE);
balloc = 1;
} else {
cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
if (!cp.h)
goto errout;
balloc = 2;
}
}
if (cp.perfect)
r = cp.perfect + handle;
else
r = tcindex_lookup(&cp, handle) ? : &new_filter_result;
if (r == &new_filter_result) {
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
goto errout_alloc;
}
if (tb[TCA_TCINDEX_CLASSID]) {
cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
tcf_bind_filter(tp, &cr.res, base);
}
if (old_r)
tcf_exts_change(tp, &r->exts, &e);
else
tcf_exts_change(tp, &cr.exts, &e);
tcf_tree_lock(tp);
if (old_r && old_r != r)
tcindex_filter_result_init(old_r);
memcpy(p, &cp, sizeof(cp));
r->res = cr.res;
if (r == &new_filter_result) {
struct tcindex_filter **fp;
f->key = handle;
f->result = new_filter_result;
f->next = NULL;
for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next)
/* nothing */;
*fp = f;
}
tcf_tree_unlock(tp);
return 0;
errout_alloc:
if (balloc == 1)
kfree(cp.perfect);
else if (balloc == 2)
kfree(cp.h);
errout:
tcf_exts_destroy(tp, &e);
return err;
}
static int
tcindex_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca, unsigned long *arg)
{
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_TCINDEX_MAX + 1];
struct tcindex_data *p = tp->root;
struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg;
int err;
pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
"p %p,r %p,*arg 0x%lx\n",
tp, handle, tca, arg, opt, p, r, arg ? *arg : 0L);
if (!opt)
return 0;
err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy);
if (err < 0)
return err;
return tcindex_set_parms(net, tp, base, handle, p, r, tb,
tca[TCA_RATE]);
}
static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
{
struct tcindex_data *p = tp->root;
struct tcindex_filter *f, *next;
int i;
pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
if (p->perfect) {
for (i = 0; i < p->hash; i++) {
if (!p->perfect[i].res.class)
continue;
if (walker->count >= walker->skip) {
if (walker->fn(tp,
(unsigned long) (p->perfect+i), walker)
< 0) {
walker->stop = 1;
return;
}
}
walker->count++;
}
}
if (!p->h)
return;
for (i = 0; i < p->hash; i++) {
for (f = p->h[i]; f; f = next) {
next = f->next;
if (walker->count >= walker->skip) {
if (walker->fn(tp, (unsigned long) &f->result,
walker) < 0) {
walker->stop = 1;
return;
}
}
walker->count++;
}
}
}
static int tcindex_destroy_element(struct tcf_proto *tp,
unsigned long arg, struct tcf_walker *walker)
{
return __tcindex_delete(tp, arg, 0);
}
static void tcindex_destroy(struct tcf_proto *tp)
{
struct tcindex_data *p = tp->root;
struct tcf_walker walker;
pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
walker.count = 0;
walker.skip = 0;
walker.fn = &tcindex_destroy_element;
tcindex_walk(tp, &walker);
kfree(p->perfect);
kfree(p->h);
kfree(p);
tp->root = NULL;
}
static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
struct tcindex_data *p = tp->root;
struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n",
tp, fh, skb, t, p, r, b);
pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
if (!fh) {
t->tcm_handle = ~0; /* whatever ... */
if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
goto nla_put_failure;
nla_nest_end(skb, nest);
} else {
if (p->perfect) {
t->tcm_handle = r-p->perfect;
} else {
struct tcindex_filter *f;
int i;
t->tcm_handle = 0;
for (i = 0; !t->tcm_handle && i < p->hash; i++) {
for (f = p->h[i]; !t->tcm_handle && f;
f = f->next) {
if (&f->result == r)
t->tcm_handle = f->key;
}
}
}
pr_debug("handle = %d\n", t->tcm_handle);
if (r->res.class &&
nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
goto nla_put_failure;
if (tcf_exts_dump(skb, &r->exts) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
if (tcf_exts_dump_stats(skb, &r->exts) < 0)
goto nla_put_failure;
}
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
.kind = "tcindex",
.classify = tcindex_classify,
.init = tcindex_init,
.destroy = tcindex_destroy,
.get = tcindex_get,
.put = tcindex_put,
.change = tcindex_change,
.delete = tcindex_delete,
.walk = tcindex_walk,
.dump = tcindex_dump,
.owner = THIS_MODULE,
};
static int __init init_tcindex(void)
{
return register_tcf_proto_ops(&cls_tcindex_ops);
}
static void __exit exit_tcindex(void)
{
unregister_tcf_proto_ops(&cls_tcindex_ops);
}
module_init(init_tcindex)
module_exit(exit_tcindex)
MODULE_LICENSE("GPL");
| gpl-2.0 |
sudipm-mukherjee/parport | arch/x86/um/signal.c | 426 | 14727 | /*
* Copyright (C) 2003 PathScale, Inc.
* Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/kernel.h>
#include <asm/unistd.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include <frame_kern.h>
#include <skas.h>
#ifdef CONFIG_X86_32
/*
* FPU tag word conversions.
*/
static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
{
unsigned int tmp; /* to avoid 16 bit prefixes in the code */
/* Transform each pair of bits into 01 (valid) or 00 (empty) */
tmp = ~twd;
tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
/* and move the valid bits to the lower byte. */
tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
return tmp;
}
static inline unsigned long twd_fxsr_to_i387(struct user_fxsr_struct *fxsave)
{
struct _fpxreg *st = NULL;
unsigned long twd = (unsigned long) fxsave->twd;
unsigned long tag;
unsigned long ret = 0xffff0000;
int i;
#define FPREG_ADDR(f, n) ((char *)&(f)->st_space + (n) * 16)
for (i = 0; i < 8; i++) {
if (twd & 0x1) {
st = (struct _fpxreg *) FPREG_ADDR(fxsave, i);
switch (st->exponent & 0x7fff) {
case 0x7fff:
tag = 2; /* Special */
break;
case 0x0000:
if ( !st->significand[0] &&
!st->significand[1] &&
!st->significand[2] &&
!st->significand[3] ) {
tag = 1; /* Zero */
} else {
tag = 2; /* Special */
}
break;
default:
if (st->significand[3] & 0x8000) {
tag = 0; /* Valid */
} else {
tag = 2; /* Special */
}
break;
}
} else {
tag = 3; /* Empty */
}
ret |= (tag << (2 * i));
twd = twd >> 1;
}
return ret;
}
static int convert_fxsr_to_user(struct _fpstate __user *buf,
struct user_fxsr_struct *fxsave)
{
unsigned long env[7];
struct _fpreg __user *to;
struct _fpxreg *from;
int i;
env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
env[2] = twd_fxsr_to_i387(fxsave);
env[3] = fxsave->fip;
env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
env[5] = fxsave->foo;
env[6] = fxsave->fos;
if (__copy_to_user(buf, env, 7 * sizeof(unsigned long)))
return 1;
to = &buf->_st[0];
from = (struct _fpxreg *) &fxsave->st_space[0];
for (i = 0; i < 8; i++, to++, from++) {
unsigned long __user *t = (unsigned long __user *)to;
unsigned long *f = (unsigned long *)from;
if (__put_user(*f, t) ||
__put_user(*(f + 1), t + 1) ||
__put_user(from->exponent, &to->exponent))
return 1;
}
return 0;
}
static int convert_fxsr_from_user(struct user_fxsr_struct *fxsave,
struct _fpstate __user *buf)
{
unsigned long env[7];
struct _fpxreg *to;
struct _fpreg __user *from;
int i;
if (copy_from_user( env, buf, 7 * sizeof(long)))
return 1;
fxsave->cwd = (unsigned short)(env[0] & 0xffff);
fxsave->swd = (unsigned short)(env[1] & 0xffff);
fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
fxsave->fip = env[3];
fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
fxsave->fcs = (env[4] & 0xffff);
fxsave->foo = env[5];
fxsave->fos = env[6];
to = (struct _fpxreg *) &fxsave->st_space[0];
from = &buf->_st[0];
for (i = 0; i < 8; i++, to++, from++) {
unsigned long *t = (unsigned long *)to;
unsigned long __user *f = (unsigned long __user *)from;
if (__get_user(*t, f) ||
__get_user(*(t + 1), f + 1) ||
__get_user(to->exponent, &from->exponent))
return 1;
}
return 0;
}
extern int have_fpx_regs;
#endif
static int copy_sc_from_user(struct pt_regs *regs,
struct sigcontext __user *from)
{
struct sigcontext sc;
int err, pid;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
err = copy_from_user(&sc, from, sizeof(sc));
if (err)
return err;
#define GETREG(regno, regname) regs->regs.gp[HOST_##regno] = sc.regname
#ifdef CONFIG_X86_32
GETREG(GS, gs);
GETREG(FS, fs);
GETREG(ES, es);
GETREG(DS, ds);
#endif
GETREG(DI, di);
GETREG(SI, si);
GETREG(BP, bp);
GETREG(SP, sp);
GETREG(BX, bx);
GETREG(DX, dx);
GETREG(CX, cx);
GETREG(AX, ax);
GETREG(IP, ip);
#ifdef CONFIG_X86_64
GETREG(R8, r8);
GETREG(R9, r9);
GETREG(R10, r10);
GETREG(R11, r11);
GETREG(R12, r12);
GETREG(R13, r13);
GETREG(R14, r14);
GETREG(R15, r15);
#endif
GETREG(CS, cs);
GETREG(EFLAGS, flags);
#ifdef CONFIG_X86_32
GETREG(SS, ss);
#endif
#undef GETREG
pid = userspace_pid[current_thread_info()->cpu];
#ifdef CONFIG_X86_32
if (have_fpx_regs) {
struct user_fxsr_struct fpx;
err = copy_from_user(&fpx,
&((struct _fpstate __user *)sc.fpstate)->_fxsr_env[0],
sizeof(struct user_fxsr_struct));
if (err)
return 1;
err = convert_fxsr_from_user(&fpx, sc.fpstate);
if (err)
return 1;
err = restore_fpx_registers(pid, (unsigned long *) &fpx);
if (err < 0) {
printk(KERN_ERR "copy_sc_from_user - "
"restore_fpx_registers failed, errno = %d\n",
-err);
return 1;
}
} else
#endif
{
struct user_i387_struct fp;
err = copy_from_user(&fp, sc.fpstate,
sizeof(struct user_i387_struct));
if (err)
return 1;
err = restore_fp_registers(pid, (unsigned long *) &fp);
if (err < 0) {
printk(KERN_ERR "copy_sc_from_user - "
"restore_fp_registers failed, errno = %d\n",
-err);
return 1;
}
}
return 0;
}
static int copy_sc_to_user(struct sigcontext __user *to,
struct _fpstate __user *to_fp, struct pt_regs *regs,
unsigned long mask)
{
struct sigcontext sc;
struct faultinfo * fi = ¤t->thread.arch.faultinfo;
int err, pid;
memset(&sc, 0, sizeof(struct sigcontext));
#define PUTREG(regno, regname) sc.regname = regs->regs.gp[HOST_##regno]
#ifdef CONFIG_X86_32
PUTREG(GS, gs);
PUTREG(FS, fs);
PUTREG(ES, es);
PUTREG(DS, ds);
#endif
PUTREG(DI, di);
PUTREG(SI, si);
PUTREG(BP, bp);
PUTREG(SP, sp);
PUTREG(BX, bx);
PUTREG(DX, dx);
PUTREG(CX, cx);
PUTREG(AX, ax);
#ifdef CONFIG_X86_64
PUTREG(R8, r8);
PUTREG(R9, r9);
PUTREG(R10, r10);
PUTREG(R11, r11);
PUTREG(R12, r12);
PUTREG(R13, r13);
PUTREG(R14, r14);
PUTREG(R15, r15);
#endif
sc.cr2 = fi->cr2;
sc.err = fi->error_code;
sc.trapno = fi->trap_no;
PUTREG(IP, ip);
PUTREG(CS, cs);
PUTREG(EFLAGS, flags);
#ifdef CONFIG_X86_32
PUTREG(SP, sp_at_signal);
PUTREG(SS, ss);
#endif
#undef PUTREG
sc.oldmask = mask;
sc.fpstate = to_fp;
err = copy_to_user(to, &sc, sizeof(struct sigcontext));
if (err)
return 1;
pid = userspace_pid[current_thread_info()->cpu];
#ifdef CONFIG_X86_32
if (have_fpx_regs) {
struct user_fxsr_struct fpx;
err = save_fpx_registers(pid, (unsigned long *) &fpx);
if (err < 0){
printk(KERN_ERR "copy_sc_to_user - save_fpx_registers "
"failed, errno = %d\n", err);
return 1;
}
err = convert_fxsr_to_user(to_fp, &fpx);
if (err)
return 1;
err |= __put_user(fpx.swd, &to_fp->status);
err |= __put_user(X86_FXSR_MAGIC, &to_fp->magic);
if (err)
return 1;
if (copy_to_user(&to_fp->_fxsr_env[0], &fpx,
sizeof(struct user_fxsr_struct)))
return 1;
} else
#endif
{
struct user_i387_struct fp;
err = save_fp_registers(pid, (unsigned long *) &fp);
if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
return 1;
}
return 0;
}
#ifdef CONFIG_X86_32
static int copy_ucontext_to_user(struct ucontext __user *uc,
struct _fpstate __user *fp, sigset_t *set,
unsigned long sp)
{
int err = 0;
err |= __save_altstack(&uc->uc_stack, sp);
err |= copy_sc_to_user(&uc->uc_mcontext, fp, ¤t->thread.regs, 0);
err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set));
return err;
}
struct sigframe
{
char __user *pretcode;
int sig;
struct sigcontext sc;
struct _fpstate fpstate;
unsigned long extramask[_NSIG_WORDS-1];
char retcode[8];
};
struct rt_sigframe
{
char __user *pretcode;
int sig;
struct siginfo __user *pinfo;
void __user *puc;
struct siginfo info;
struct ucontext uc;
struct _fpstate fpstate;
char retcode[8];
};
int setup_signal_stack_sc(unsigned long stack_top, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *mask)
{
struct sigframe __user *frame;
void __user *restorer;
int err = 0, sig = ksig->sig;
/* This is the same calculation as i386 - ((sp + 4) & 15) == 0 */
stack_top = ((stack_top + 4) & -16UL) - 4;
frame = (struct sigframe __user *) stack_top - 1;
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return 1;
restorer = frame->retcode;
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
err |= __put_user(restorer, &frame->pretcode);
err |= __put_user(sig, &frame->sig);
err |= copy_sc_to_user(&frame->sc, &frame->fpstate, regs, mask->sig[0]);
if (_NSIG_WORDS > 1)
err |= __copy_to_user(&frame->extramask, &mask->sig[1],
sizeof(frame->extramask));
/*
* This is popl %eax ; movl $,%eax ; int $0x80
*
* WE DO NOT USE IT ANY MORE! It's only left here for historical
* reasons and because gdb uses it as a signature to notice
* signal handler stack frames.
*/
err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
if (err)
return err;
PT_REGS_SP(regs) = (unsigned long) frame;
PT_REGS_IP(regs) = (unsigned long) ksig->ka.sa.sa_handler;
PT_REGS_AX(regs) = (unsigned long) sig;
PT_REGS_DX(regs) = (unsigned long) 0;
PT_REGS_CX(regs) = (unsigned long) 0;
return 0;
}
int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *mask)
{
struct rt_sigframe __user *frame;
void __user *restorer;
int err = 0, sig = ksig->sig;
stack_top &= -8UL;
frame = (struct rt_sigframe __user *) stack_top - 1;
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return 1;
restorer = frame->retcode;
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
err |= __put_user(restorer, &frame->pretcode);
err |= __put_user(sig, &frame->sig);
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask,
PT_REGS_SP(regs));
/*
* This is movl $,%eax ; int $0x80
*
* WE DO NOT USE IT ANY MORE! It's only left here for historical
* reasons and because gdb uses it as a signature to notice
* signal handler stack frames.
*/
err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
if (err)
return err;
PT_REGS_SP(regs) = (unsigned long) frame;
PT_REGS_IP(regs) = (unsigned long) ksig->ka.sa.sa_handler;
PT_REGS_AX(regs) = (unsigned long) sig;
PT_REGS_DX(regs) = (unsigned long) &frame->info;
PT_REGS_CX(regs) = (unsigned long) &frame->uc;
return 0;
}
long sys_sigreturn(void)
{
unsigned long sp = PT_REGS_SP(¤t->thread.regs);
struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
sigset_t set;
struct sigcontext __user *sc = &frame->sc;
unsigned long __user *oldmask = &sc->oldmask;
unsigned long __user *extramask = frame->extramask;
int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) ||
copy_from_user(&set.sig[1], extramask, sig_size))
goto segfault;
set_current_blocked(&set);
if (copy_sc_from_user(¤t->thread.regs, sc))
goto segfault;
/* Avoid ERESTART handling */
PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1;
return PT_REGS_SYSCALL_RET(¤t->thread.regs);
segfault:
force_sig(SIGSEGV, current);
return 0;
}
#else
struct rt_sigframe
{
char __user *pretcode;
struct ucontext uc;
struct siginfo info;
struct _fpstate fpstate;
};
int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set)
{
struct rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
frame = (struct rt_sigframe __user *)
round_down(stack_top - sizeof(struct rt_sigframe), 16);
/* Subtract 128 for a red zone and 8 for proper alignment */
frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8);
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto out;
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
if (err)
goto out;
}
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
set->sig[0]);
err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
if (sizeof(*set) == 16) {
err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
}
else
err |= __copy_to_user(&frame->uc.uc_sigmask, set,
sizeof(*set));
/*
* Set up to return from userspace. If provided, use a stub
* already in userspace.
*/
/* x86-64 should always use SA_RESTORER. */
if (ksig->ka.sa.sa_flags & SA_RESTORER)
err |= __put_user((void *)ksig->ka.sa.sa_restorer,
&frame->pretcode);
else
/* could use a vstub here */
return err;
if (err)
return err;
PT_REGS_SP(regs) = (unsigned long) frame;
PT_REGS_DI(regs) = sig;
/* In case the signal handler was declared without prototypes */
PT_REGS_AX(regs) = 0;
/*
* This also works for non SA_SIGINFO handlers because they expect the
* next argument after the signal number on the stack.
*/
PT_REGS_SI(regs) = (unsigned long) &frame->info;
PT_REGS_DX(regs) = (unsigned long) &frame->uc;
PT_REGS_IP(regs) = (unsigned long) ksig->ka.sa.sa_handler;
out:
return err;
}
#endif
long sys_rt_sigreturn(void)
{
unsigned long sp = PT_REGS_SP(¤t->thread.regs);
struct rt_sigframe __user *frame =
(struct rt_sigframe __user *)(sp - sizeof(long));
struct ucontext __user *uc = &frame->uc;
sigset_t set;
if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
goto segfault;
set_current_blocked(&set);
if (copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext))
goto segfault;
/* Avoid ERESTART handling */
PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1;
return PT_REGS_SYSCALL_RET(¤t->thread.regs);
segfault:
force_sig(SIGSEGV, current);
return 0;
}
| gpl-2.0 |
RidaShamasneh/nethunter_kernel_g5 | drivers/staging/rtl8723au/hal/rtl8723au_recv.c | 682 | 7453 | /******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
******************************************************************************/
#define _RTL8192CU_RECV_C_
#include <osdep_service.h>
#include <drv_types.h>
#include <recv_osdep.h>
#include <mlme_osdep.h>
#include <linux/ip.h>
#include <linux/if_ether.h>
#include <usb_ops.h>
#include <wifi.h>
#include <rtl8723a_hal.h>
int rtl8723au_init_recv_priv(struct rtw_adapter *padapter)
{
struct recv_priv *precvpriv = &padapter->recvpriv;
int i, size, res = _SUCCESS;
struct recv_buf *precvbuf;
unsigned long tmpaddr;
unsigned long alignment;
struct sk_buff *pskb;
tasklet_init(&precvpriv->recv_tasklet,
(void(*)(unsigned long))rtl8723au_recv_tasklet,
(unsigned long)padapter);
precvpriv->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!precvpriv->int_in_urb)
DBG_8723A("alloc_urb for interrupt in endpoint fail !!!!\n");
precvpriv->int_in_buf = kzalloc(USB_INTR_CONTENT_LENGTH, GFP_KERNEL);
if (!precvpriv->int_in_buf)
DBG_8723A("alloc_mem for interrupt in endpoint fail !!!!\n");
size = NR_RECVBUFF * sizeof(struct recv_buf);
precvpriv->precv_buf = kzalloc(size, GFP_KERNEL);
if (!precvpriv->precv_buf) {
res = _FAIL;
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
("alloc recv_buf fail!\n"));
goto exit;
}
precvbuf = (struct recv_buf *)precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
INIT_LIST_HEAD(&precvbuf->list);
precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
if (!precvbuf->purb)
break;
precvbuf->adapter = padapter;
precvbuf++;
}
skb_queue_head_init(&precvpriv->rx_skb_queue);
skb_queue_head_init(&precvpriv->free_recv_skb_queue);
for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
size = MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ;
pskb = __netdev_alloc_skb(padapter->pnetdev, size, GFP_KERNEL);
if (pskb) {
pskb->dev = padapter->pnetdev;
tmpaddr = (unsigned long)pskb->data;
alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));
skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
}
pskb = NULL;
}
exit:
return res;
}
void rtl8723au_free_recv_priv(struct rtw_adapter *padapter)
{
int i;
struct recv_buf *precvbuf;
struct recv_priv *precvpriv = &padapter->recvpriv;
precvbuf = (struct recv_buf *)precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
usb_free_urb(precvbuf->purb);
if (precvbuf->pskb)
dev_kfree_skb_any(precvbuf->pskb);
precvbuf++;
}
kfree(precvpriv->precv_buf);
usb_free_urb(precvpriv->int_in_urb);
kfree(precvpriv->int_in_buf);
if (skb_queue_len(&precvpriv->rx_skb_queue))
DBG_8723A(KERN_WARNING "rx_skb_queue not empty\n");
skb_queue_purge(&precvpriv->rx_skb_queue);
if (skb_queue_len(&precvpriv->free_recv_skb_queue)) {
DBG_8723A(KERN_WARNING "free_recv_skb_queue not empty, %d\n",
skb_queue_len(&precvpriv->free_recv_skb_queue));
}
skb_queue_purge(&precvpriv->free_recv_skb_queue);
}
struct recv_stat_cpu {
u32 rxdw0;
u32 rxdw1;
u32 rxdw2;
u32 rxdw3;
u32 rxdw4;
u32 rxdw5;
};
void update_recvframe_attrib(struct recv_frame *precvframe,
struct recv_stat *prxstat)
{
struct rx_pkt_attrib *pattrib;
struct recv_stat_cpu report;
struct rxreport_8723a *prxreport;
report.rxdw0 = le32_to_cpu(prxstat->rxdw0);
report.rxdw1 = le32_to_cpu(prxstat->rxdw1);
report.rxdw2 = le32_to_cpu(prxstat->rxdw2);
report.rxdw3 = le32_to_cpu(prxstat->rxdw3);
report.rxdw4 = le32_to_cpu(prxstat->rxdw4);
report.rxdw5 = le32_to_cpu(prxstat->rxdw5);
prxreport = (struct rxreport_8723a *)&report;
pattrib = &precvframe->attrib;
memset(pattrib, 0, sizeof(struct rx_pkt_attrib));
/* update rx report to recv_frame attribute */
pattrib->pkt_len = (u16)prxreport->pktlen;
pattrib->drvinfo_sz = (u8)(prxreport->drvinfosize << 3);
pattrib->physt = (u8)prxreport->physt;
pattrib->crc_err = (u8)prxreport->crc32;
pattrib->icv_err = (u8)prxreport->icverr;
pattrib->bdecrypted = (u8)(prxreport->swdec ? 0 : 1);
pattrib->encrypt = (u8)prxreport->security;
pattrib->qos = (u8)prxreport->qos;
pattrib->priority = (u8)prxreport->tid;
pattrib->amsdu = (u8)prxreport->amsdu;
pattrib->seq_num = (u16)prxreport->seq;
pattrib->frag_num = (u8)prxreport->frag;
pattrib->mfrag = (u8)prxreport->mf;
pattrib->mdata = (u8)prxreport->md;
pattrib->mcs_rate = (u8)prxreport->rxmcs;
pattrib->rxht = (u8)prxreport->rxht;
}
void update_recvframe_phyinfo(struct recv_frame *precvframe,
struct phy_stat *pphy_status)
{
struct rtw_adapter *padapter = precvframe->adapter;
struct rx_pkt_attrib *pattrib = &precvframe->attrib;
struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
struct phy_info *pPHYInfo = &pattrib->phy_info;
struct odm_packet_info pkt_info;
u8 *sa = NULL, *da;
struct sta_priv *pstapriv;
struct sta_info *psta;
struct sk_buff *skb = precvframe->pkt;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
bool matchbssid = false;
u8 *bssid;
matchbssid = (!ieee80211_is_ctl(hdr->frame_control) &&
!pattrib->icv_err && !pattrib->crc_err);
if (matchbssid) {
switch (hdr->frame_control &
cpu_to_le16(IEEE80211_FCTL_TODS |
IEEE80211_FCTL_FROMDS)) {
case cpu_to_le16(IEEE80211_FCTL_TODS):
bssid = hdr->addr1;
break;
case cpu_to_le16(IEEE80211_FCTL_FROMDS):
bssid = hdr->addr2;
break;
case cpu_to_le16(0):
bssid = hdr->addr3;
break;
default:
bssid = NULL;
matchbssid = false;
}
if (bssid)
matchbssid = ether_addr_equal(
get_bssid(&padapter->mlmepriv), bssid);
}
pkt_info.bPacketMatchBSSID = matchbssid;
da = ieee80211_get_DA(hdr);
pkt_info.bPacketToSelf = pkt_info.bPacketMatchBSSID &&
(!memcmp(da, myid(&padapter->eeprompriv), ETH_ALEN));
pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID &&
ieee80211_is_beacon(hdr->frame_control);
pkt_info.StationID = 0xFF;
if (pkt_info.bPacketBeacon) {
if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE) == true)
sa = padapter->mlmepriv.cur_network.network.MacAddress;
/* to do Ad-hoc */
} else {
sa = ieee80211_get_SA(hdr);
}
pstapriv = &padapter->stapriv;
psta = rtw_get_stainfo23a(pstapriv, sa);
if (psta) {
pkt_info.StationID = psta->mac_id;
/* printk("%s ==> StationID(%d)\n", __func__, pkt_info.StationID); */
}
pkt_info.Rate = pattrib->mcs_rate;
ODM_PhyStatusQuery23a(&pHalData->odmpriv, pPHYInfo,
(u8 *)pphy_status, &pkt_info);
precvframe->psta = NULL;
if (pkt_info.bPacketMatchBSSID &&
(check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) == true)) {
if (psta) {
precvframe->psta = psta;
rtl8723a_process_phy_info(padapter, precvframe);
}
} else if (pkt_info.bPacketToSelf || pkt_info.bPacketBeacon) {
if (check_fwstate(&padapter->mlmepriv,
WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE) ==
true) {
if (psta)
precvframe->psta = psta;
}
rtl8723a_process_phy_info(padapter, precvframe);
}
}
| gpl-2.0 |
jstotero/lge_e510_kernel | arch/arm/mach-msm/modem_notifier.c | 938 | 5198 | /* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*/
/*
* Modem Restart Notifier -- Provides notification
* of modem restart events.
*/
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include "modem_notifier.h"
#define DEBUG
static struct srcu_notifier_head modem_notifier_list;
static struct workqueue_struct *modem_notifier_wq;
static void notify_work_smsm_init(struct work_struct *work)
{
modem_notify(0, MODEM_NOTIFIER_SMSM_INIT);
}
static DECLARE_WORK(modem_notifier_smsm_init_work, ¬ify_work_smsm_init);
void modem_queue_smsm_init_notify(void)
{
int ret;
ret = queue_work(modem_notifier_wq, &modem_notifier_smsm_init_work);
if (!ret)
printk(KERN_ERR "%s\n", __func__);
}
EXPORT_SYMBOL(modem_queue_smsm_init_notify);
static void notify_work_start_reset(struct work_struct *work)
{
modem_notify(0, MODEM_NOTIFIER_START_RESET);
}
static DECLARE_WORK(modem_notifier_start_reset_work, ¬ify_work_start_reset);
void modem_queue_start_reset_notify(void)
{
int ret;
ret = queue_work(modem_notifier_wq, &modem_notifier_start_reset_work);
if (!ret)
printk(KERN_ERR "%s\n", __func__);
}
EXPORT_SYMBOL(modem_queue_start_reset_notify);
static void notify_work_end_reset(struct work_struct *work)
{
modem_notify(0, MODEM_NOTIFIER_END_RESET);
}
static DECLARE_WORK(modem_notifier_end_reset_work, ¬ify_work_end_reset);
void modem_queue_end_reset_notify(void)
{
int ret;
ret = queue_work(modem_notifier_wq, &modem_notifier_end_reset_work);
if (!ret)
printk(KERN_ERR "%s\n", __func__);
}
EXPORT_SYMBOL(modem_queue_end_reset_notify);
int modem_register_notifier(struct notifier_block *nb)
{
int ret;
ret = srcu_notifier_chain_register(
&modem_notifier_list, nb);
return ret;
}
EXPORT_SYMBOL(modem_register_notifier);
int modem_unregister_notifier(struct notifier_block *nb)
{
int ret;
ret = srcu_notifier_chain_unregister(
&modem_notifier_list, nb);
return ret;
}
EXPORT_SYMBOL(modem_unregister_notifier);
void modem_notify(void *data, unsigned int state)
{
srcu_notifier_call_chain(&modem_notifier_list, state, data);
}
EXPORT_SYMBOL(modem_notify);
#if defined(CONFIG_DEBUG_FS)
static int debug_reset_start(const char __user *buf, int count)
{
modem_queue_start_reset_notify();
return 0;
}
static int debug_reset_end(const char __user *buf, int count)
{
modem_queue_end_reset_notify();
return 0;
}
static ssize_t debug_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
int (*fling)(const char __user *buf, int max) = file->private_data;
fling(buf, count);
return count;
}
static int debug_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static const struct file_operations debug_ops = {
.write = debug_write,
.open = debug_open,
};
static void debug_create(const char *name, mode_t mode,
struct dentry *dent,
int (*fling)(const char __user *buf, int max))
{
debugfs_create_file(name, mode, dent, fling, &debug_ops);
}
static void modem_notifier_debugfs_init(void)
{
struct dentry *dent;
dent = debugfs_create_dir("modem_notifier", 0);
if (IS_ERR(dent))
return;
debug_create("reset_start", 0444, dent, debug_reset_start);
debug_create("reset_end", 0444, dent, debug_reset_end);
}
#else
static void modem_notifier_debugfs_init(void) {}
#endif
#if defined(DEBUG)
static int modem_notifier_test_call(struct notifier_block *this,
unsigned long code,
void *_cmd)
{
switch (code) {
case MODEM_NOTIFIER_START_RESET:
printk(KERN_ERR "Notify: start reset\n");
break;
case MODEM_NOTIFIER_END_RESET:
printk(KERN_ERR "Notify: end reset\n");
break;
case MODEM_NOTIFIER_SMSM_INIT:
printk(KERN_ERR "Notify: smsm init\n");
break;
default:
printk(KERN_ERR "Notify: general\n");
break;
}
return NOTIFY_DONE;
}
static struct notifier_block nb = {
.notifier_call = modem_notifier_test_call,
};
static void register_test_notifier(void)
{
modem_register_notifier(&nb);
}
#endif
static int __init init_modem_notifier_list(void)
{
srcu_init_notifier_head(&modem_notifier_list);
modem_notifier_debugfs_init();
#if defined(DEBUG)
register_test_notifier();
#endif
/* Create the workqueue */
modem_notifier_wq = create_singlethread_workqueue("modem_notifier");
if (!modem_notifier_wq) {
srcu_cleanup_notifier_head(&modem_notifier_list);
return -ENOMEM;
}
return 0;
}
module_init(init_modem_notifier_list);
| gpl-2.0 |
arunthomas/linux | drivers/input/keyboard/st-keyscan.c | 1706 | 6729 | /*
* STMicroelectronics Key Scanning driver
*
* Copyright (c) 2014 STMicroelectonics Ltd.
* Author: Stuart Menefy <stuart.menefy@st.com>
*
* Based on sh_keysc.c, copyright 2008 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/input/matrix_keypad.h>
#define ST_KEYSCAN_MAXKEYS 16
#define KEYSCAN_CONFIG_OFF 0x0
#define KEYSCAN_CONFIG_ENABLE 0x1
#define KEYSCAN_DEBOUNCE_TIME_OFF 0x4
#define KEYSCAN_MATRIX_STATE_OFF 0x8
#define KEYSCAN_MATRIX_DIM_OFF 0xc
#define KEYSCAN_MATRIX_DIM_X_SHIFT 0x0
#define KEYSCAN_MATRIX_DIM_Y_SHIFT 0x2
struct st_keyscan {
void __iomem *base;
int irq;
struct clk *clk;
struct input_dev *input_dev;
unsigned long last_state;
unsigned int n_rows;
unsigned int n_cols;
unsigned int debounce_us;
};
static irqreturn_t keyscan_isr(int irq, void *dev_id)
{
struct st_keyscan *keypad = dev_id;
unsigned short *keycode = keypad->input_dev->keycode;
unsigned long state, change;
int bit_nr;
state = readl(keypad->base + KEYSCAN_MATRIX_STATE_OFF) & 0xffff;
change = keypad->last_state ^ state;
keypad->last_state = state;
for_each_set_bit(bit_nr, &change, BITS_PER_LONG)
input_report_key(keypad->input_dev,
keycode[bit_nr], state & BIT(bit_nr));
input_sync(keypad->input_dev);
return IRQ_HANDLED;
}
static int keyscan_start(struct st_keyscan *keypad)
{
int error;
error = clk_enable(keypad->clk);
if (error)
return error;
writel(keypad->debounce_us * (clk_get_rate(keypad->clk) / 1000000),
keypad->base + KEYSCAN_DEBOUNCE_TIME_OFF);
writel(((keypad->n_cols - 1) << KEYSCAN_MATRIX_DIM_X_SHIFT) |
((keypad->n_rows - 1) << KEYSCAN_MATRIX_DIM_Y_SHIFT),
keypad->base + KEYSCAN_MATRIX_DIM_OFF);
writel(KEYSCAN_CONFIG_ENABLE, keypad->base + KEYSCAN_CONFIG_OFF);
return 0;
}
static void keyscan_stop(struct st_keyscan *keypad)
{
writel(0, keypad->base + KEYSCAN_CONFIG_OFF);
clk_disable(keypad->clk);
}
static int keyscan_open(struct input_dev *dev)
{
struct st_keyscan *keypad = input_get_drvdata(dev);
return keyscan_start(keypad);
}
static void keyscan_close(struct input_dev *dev)
{
struct st_keyscan *keypad = input_get_drvdata(dev);
keyscan_stop(keypad);
}
static int keypad_matrix_key_parse_dt(struct st_keyscan *keypad_data)
{
struct device *dev = keypad_data->input_dev->dev.parent;
struct device_node *np = dev->of_node;
int error;
error = matrix_keypad_parse_of_params(dev, &keypad_data->n_rows,
&keypad_data->n_cols);
if (error) {
dev_err(dev, "failed to parse keypad params\n");
return error;
}
of_property_read_u32(np, "st,debounce-us", &keypad_data->debounce_us);
dev_dbg(dev, "n_rows=%d n_col=%d debounce=%d\n",
keypad_data->n_rows, keypad_data->n_cols,
keypad_data->debounce_us);
return 0;
}
static int keyscan_probe(struct platform_device *pdev)
{
struct st_keyscan *keypad_data;
struct input_dev *input_dev;
struct resource *res;
int error;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "no DT data present\n");
return -EINVAL;
}
keypad_data = devm_kzalloc(&pdev->dev, sizeof(*keypad_data),
GFP_KERNEL);
if (!keypad_data)
return -ENOMEM;
input_dev = devm_input_allocate_device(&pdev->dev);
if (!input_dev) {
dev_err(&pdev->dev, "failed to allocate the input device\n");
return -ENOMEM;
}
input_dev->name = pdev->name;
input_dev->phys = "keyscan-keys/input0";
input_dev->dev.parent = &pdev->dev;
input_dev->open = keyscan_open;
input_dev->close = keyscan_close;
input_dev->id.bustype = BUS_HOST;
error = keypad_matrix_key_parse_dt(keypad_data);
if (error)
return error;
error = matrix_keypad_build_keymap(NULL, NULL,
keypad_data->n_rows,
keypad_data->n_cols,
NULL, input_dev);
if (error) {
dev_err(&pdev->dev, "failed to build keymap\n");
return error;
}
input_set_drvdata(input_dev, keypad_data);
keypad_data->input_dev = input_dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
keypad_data->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(keypad_data->base))
return PTR_ERR(keypad_data->base);
keypad_data->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(keypad_data->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
return PTR_ERR(keypad_data->clk);
}
error = clk_enable(keypad_data->clk);
if (error) {
dev_err(&pdev->dev, "failed to enable clock\n");
return error;
}
keyscan_stop(keypad_data);
keypad_data->irq = platform_get_irq(pdev, 0);
if (keypad_data->irq < 0) {
dev_err(&pdev->dev, "no IRQ specified\n");
return -EINVAL;
}
error = devm_request_irq(&pdev->dev, keypad_data->irq, keyscan_isr, 0,
pdev->name, keypad_data);
if (error) {
dev_err(&pdev->dev, "failed to request IRQ\n");
return error;
}
error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev, "failed to register input device\n");
return error;
}
platform_set_drvdata(pdev, keypad_data);
device_set_wakeup_capable(&pdev->dev, 1);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int keyscan_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct st_keyscan *keypad = platform_get_drvdata(pdev);
struct input_dev *input = keypad->input_dev;
mutex_lock(&input->mutex);
if (device_may_wakeup(dev))
enable_irq_wake(keypad->irq);
else if (input->users)
keyscan_stop(keypad);
mutex_unlock(&input->mutex);
return 0;
}
static int keyscan_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct st_keyscan *keypad = platform_get_drvdata(pdev);
struct input_dev *input = keypad->input_dev;
int retval = 0;
mutex_lock(&input->mutex);
if (device_may_wakeup(dev))
disable_irq_wake(keypad->irq);
else if (input->users)
retval = keyscan_start(keypad);
mutex_unlock(&input->mutex);
return retval;
}
#endif
static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
static const struct of_device_id keyscan_of_match[] = {
{ .compatible = "st,sti-keyscan" },
{ },
};
MODULE_DEVICE_TABLE(of, keyscan_of_match);
static struct platform_driver keyscan_device_driver = {
.probe = keyscan_probe,
.driver = {
.name = "st-keyscan",
.pm = &keyscan_dev_pm_ops,
.of_match_table = of_match_ptr(keyscan_of_match),
}
};
module_platform_driver(keyscan_device_driver);
MODULE_AUTHOR("Stuart Menefy <stuart.menefy@st.com>");
MODULE_DESCRIPTION("STMicroelectronics keyscan device driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
varchild/vigor_aosp_kernel | arch/mips/kernel/unaligned.c | 2474 | 13884 | /*
* Handle unaligned accesses by emulation.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
*
* This file contains exception handler for address error exception with the
* special capability to execute faulting instructions in software. The
* handler does not try to handle the case when the program counter points
* to an address not aligned to a word boundary.
*
* Putting data to unaligned addresses is a bad practice even on Intel where
* only the performance is affected. Much worse is that such code is non-
* portable. Due to several programs that die on MIPS due to alignment
* problems I decided to implement this handler anyway though I originally
* didn't intend to do this at all for user code.
*
* For now I enable fixing of address errors by default to make life easier.
* I however intend to disable this somewhen in the future when the alignment
* problems with user programs have been fixed. For programmers this is the
* right way to go.
*
* Fixing address errors is a per process option. The option is inherited
* across fork(2) and execve(2) calls. If you really want to use the
* option in your user programs - I discourage the use of the software
* emulation strongly - use the following code in your userland stuff:
*
* #include <sys/sysmips.h>
*
* ...
* sysmips(MIPS_FIXADE, x);
* ...
*
* The argument x is 0 for disabling software emulation, enabled otherwise.
*
* Below a little program to play around with this feature.
*
* #include <stdio.h>
* #include <sys/sysmips.h>
*
* struct foo {
* unsigned char bar[8];
* };
*
* main(int argc, char *argv[])
* {
* struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
* unsigned int *p = (unsigned int *) (x.bar + 3);
* int i;
*
* if (argc > 1)
* sysmips(MIPS_FIXADE, atoi(argv[1]));
*
* printf("*p = %08lx\n", *p);
*
* *p = 0xdeadface;
*
* for(i = 0; i <= 7; i++)
* printf("%02x ", x.bar[i]);
* printf("\n");
* }
*
* Coprocessor loads are not supported; I think this case is unimportant
* in the practice.
*
* TODO: Handle ndc (attempted store to doubleword in uncached memory)
* exception for the R6000.
* A store crossing a page boundary might be executed only partially.
* Undo the partial store in this case.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/perf_event.h>
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/byteorder.h>
#include <asm/cop2.h>
#include <asm/inst.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#define STR(x) __STR(x)
#define __STR(x) #x
enum {
UNALIGNED_ACTION_QUIET,
UNALIGNED_ACTION_SIGNAL,
UNALIGNED_ACTION_SHOW,
};
#ifdef CONFIG_DEBUG_FS
static u32 unaligned_instructions;
static u32 unaligned_action;
#else
#define unaligned_action UNALIGNED_ACTION_QUIET
#endif
extern void show_registers(struct pt_regs *regs);
static void emulate_load_store_insn(struct pt_regs *regs,
void __user *addr, unsigned int __user *pc)
{
union mips_instruction insn;
unsigned long value;
unsigned int res;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0);
/*
* This load never faults.
*/
__get_user(insn.word, pc);
switch (insn.i_format.opcode) {
/*
* These are instructions that a compiler doesn't generate. We
* can assume therefore that the code is MIPS-aware and
* really buggy. Emulating these instructions would break the
* semantics anyway.
*/
case ll_op:
case lld_op:
case sc_op:
case scd_op:
/*
* For these instructions the only way to create an address
* error is an attempted access to kernel/supervisor address
* space.
*/
case ldl_op:
case ldr_op:
case lwl_op:
case lwr_op:
case sdl_op:
case sdr_op:
case swl_op:
case swr_op:
case lb_op:
case lbu_op:
case sb_op:
goto sigbus;
/*
* The remaining opcodes are the ones that are really of interest.
*/
case lh_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
__asm__ __volatile__ (".set\tnoat\n"
#ifdef __BIG_ENDIAN
"1:\tlb\t%0, 0(%2)\n"
"2:\tlbu\t$1, 1(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlb\t%0, 1(%2)\n"
"2:\tlbu\t$1, 0(%2)\n\t"
#endif
"sll\t%0, 0x8\n\t"
"or\t%0, $1\n\t"
"li\t%1, 0\n"
"3:\t.set\tat\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lw_op:
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tlwl\t%0, (%2)\n"
"2:\tlwr\t%0, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlwl\t%0, 3(%2)\n"
"2:\tlwr\t%0, (%2)\n\t"
#endif
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lhu_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
__asm__ __volatile__ (
".set\tnoat\n"
#ifdef __BIG_ENDIAN
"1:\tlbu\t%0, 0(%2)\n"
"2:\tlbu\t$1, 1(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlbu\t%0, 1(%2)\n"
"2:\tlbu\t$1, 0(%2)\n\t"
#endif
"sll\t%0, 0x8\n\t"
"or\t%0, $1\n\t"
"li\t%1, 0\n"
"3:\t.set\tat\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lwu_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tlwl\t%0, (%2)\n"
"2:\tlwr\t%0, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlwl\t%0, 3(%2)\n"
"2:\tlwr\t%0, (%2)\n\t"
#endif
"dsll\t%0, %0, 32\n\t"
"dsrl\t%0, %0, 32\n\t"
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case ld_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_READ, addr, 8))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tldl\t%0, (%2)\n"
"2:\tldr\t%0, 7(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tldl\t%0, 7(%2)\n"
"2:\tldr\t%0, (%2)\n\t"
#endif
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case sh_op:
if (!access_ok(VERIFY_WRITE, addr, 2))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
".set\tnoat\n"
"1:\tsb\t%1, 1(%2)\n\t"
"srl\t$1, %1, 0x8\n"
"2:\tsb\t$1, 0(%2)\n\t"
".set\tat\n\t"
#endif
#ifdef __LITTLE_ENDIAN
".set\tnoat\n"
"1:\tsb\t%1, 0(%2)\n\t"
"srl\t$1,%1, 0x8\n"
"2:\tsb\t$1, 1(%2)\n\t"
".set\tat\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
case sw_op:
if (!access_ok(VERIFY_WRITE, addr, 4))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tswl\t%1,(%2)\n"
"2:\tswr\t%1, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tswl\t%1, 3(%2)\n"
"2:\tswr\t%1, (%2)\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
case sd_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_WRITE, addr, 8))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tsdl\t%1,(%2)\n"
"2:\tsdr\t%1, 7(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tsdl\t%1, 7(%2)\n"
"2:\tsdr\t%1, (%2)\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case lwc1_op:
case ldc1_op:
case swc1_op:
case sdc1_op:
/*
* I herewith declare: this does not happen. So send SIGBUS.
*/
goto sigbus;
/*
* COP2 is available to implementor for application specific use.
* It's up to applications to register a notifier chain and do
* whatever they have to do, including possible sending of signals.
*/
case lwc2_op:
cu2_notifier_call_chain(CU2_LWC2_OP, regs);
break;
case ldc2_op:
cu2_notifier_call_chain(CU2_LDC2_OP, regs);
break;
case swc2_op:
cu2_notifier_call_chain(CU2_SWC2_OP, regs);
break;
case sdc2_op:
cu2_notifier_call_chain(CU2_SDC2_OP, regs);
break;
default:
/*
* Pheeee... We encountered an yet unknown instruction or
* cache coherence problem. Die sucker, die ...
*/
goto sigill;
}
#ifdef CONFIG_DEBUG_FS
unaligned_instructions++;
#endif
return;
fault:
/* Did we have an exception handler installed? */
if (fixup_exception(regs))
return;
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGSEGV, current);
return;
sigbus:
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGBUS, current);
return;
sigill:
die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
force_sig(SIGILL, current);
}
asmlinkage void do_ade(struct pt_regs *regs)
{
unsigned int __user *pc;
mm_segment_t seg;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1, 0, regs, regs->cp0_badvaddr);
/*
* Did we catch a fault trying to load an instruction?
* Or are we running in MIPS16 mode?
*/
if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
goto sigbus;
pc = (unsigned int __user *) exception_epc(regs);
if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
goto sigbus;
if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
goto sigbus;
else if (unaligned_action == UNALIGNED_ACTION_SHOW)
show_registers(regs);
/*
* Do branch emulation only if we didn't forward the exception.
* This is all so but ugly ...
*/
seg = get_fs();
if (!user_mode(regs))
set_fs(KERNEL_DS);
emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
set_fs(seg);
return;
sigbus:
die_if_kernel("Kernel unaligned instruction access", regs);
force_sig(SIGBUS, current);
/*
* XXX On return from the signal handler we should advance the epc
*/
}
#ifdef CONFIG_DEBUG_FS
extern struct dentry *mips_debugfs_dir;
static int __init debugfs_unaligned(void)
{
struct dentry *d;
if (!mips_debugfs_dir)
return -ENODEV;
d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
mips_debugfs_dir, &unaligned_instructions);
if (!d)
return -ENOMEM;
d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
mips_debugfs_dir, &unaligned_action);
if (!d)
return -ENOMEM;
return 0;
}
__initcall(debugfs_unaligned);
#endif
| gpl-2.0 |
THEindian/glacier_kernel | arch/powerpc/platforms/powermac/low_i2c.c | 2730 | 37112 | /*
* arch/powerpc/platforms/powermac/low_i2c.c
*
* Copyright (C) 2003-2005 Ben. Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* The linux i2c layer isn't completely suitable for our needs for various
* reasons ranging from too late initialisation to semantics not perfectly
* matching some requirements of the apple platform functions etc...
*
* This file thus provides a simple low level unified i2c interface for
* powermac that covers the various types of i2c busses used in Apple machines.
* For now, keywest, PMU and SMU, though we could add Cuda, or other bit
* banging busses found on older chipstes in earlier machines if we ever need
* one of them.
*
* The drivers in this file are synchronous/blocking. In addition, the
* keywest one is fairly slow due to the use of msleep instead of interrupts
* as the interrupt is currently used by i2c-keywest. In the long run, we
* might want to get rid of those high-level interfaces to linux i2c layer
* either completely (converting all drivers) or replacing them all with a
* single stub driver on top of this one. Once done, the interrupt will be
* available for our use.
*/
#undef DEBUG
#undef DEBUG_LOW
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/mutex.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <asm/keylargo.h>
#include <asm/uninorth.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/smu.h>
#include <asm/pmac_pfunc.h>
#include <asm/pmac_low_i2c.h>
#ifdef DEBUG
#define DBG(x...) do {\
printk(KERN_DEBUG "low_i2c:" x); \
} while(0)
#else
#define DBG(x...)
#endif
#ifdef DEBUG_LOW
#define DBG_LOW(x...) do {\
printk(KERN_DEBUG "low_i2c:" x); \
} while(0)
#else
#define DBG_LOW(x...)
#endif
static int pmac_i2c_force_poll = 1;
/*
* A bus structure. Each bus in the system has such a structure associated.
*/
struct pmac_i2c_bus
{
struct list_head link;
struct device_node *controller;
struct device_node *busnode;
int type;
int flags;
struct i2c_adapter adapter;
void *hostdata;
int channel; /* some hosts have multiple */
int mode; /* current mode */
struct mutex mutex;
int opened;
int polled; /* open mode */
struct platform_device *platform_dev;
/* ops */
int (*open)(struct pmac_i2c_bus *bus);
void (*close)(struct pmac_i2c_bus *bus);
int (*xfer)(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
u32 subaddr, u8 *data, int len);
};
static LIST_HEAD(pmac_i2c_busses);
/*
* Keywest implementation
*/
struct pmac_i2c_host_kw
{
struct mutex mutex; /* Access mutex for use by
* i2c-keywest */
void __iomem *base; /* register base address */
int bsteps; /* register stepping */
int speed; /* speed */
int irq;
u8 *data;
unsigned len;
int state;
int rw;
int polled;
int result;
struct completion complete;
spinlock_t lock;
struct timer_list timeout_timer;
};
/* Register indices */
typedef enum {
reg_mode = 0,
reg_control,
reg_status,
reg_isr,
reg_ier,
reg_addr,
reg_subaddr,
reg_data
} reg_t;
/* The Tumbler audio equalizer can be really slow sometimes */
#define KW_POLL_TIMEOUT (2*HZ)
/* Mode register */
#define KW_I2C_MODE_100KHZ 0x00
#define KW_I2C_MODE_50KHZ 0x01
#define KW_I2C_MODE_25KHZ 0x02
#define KW_I2C_MODE_DUMB 0x00
#define KW_I2C_MODE_STANDARD 0x04
#define KW_I2C_MODE_STANDARDSUB 0x08
#define KW_I2C_MODE_COMBINED 0x0C
#define KW_I2C_MODE_MODE_MASK 0x0C
#define KW_I2C_MODE_CHAN_MASK 0xF0
/* Control register */
#define KW_I2C_CTL_AAK 0x01
#define KW_I2C_CTL_XADDR 0x02
#define KW_I2C_CTL_STOP 0x04
#define KW_I2C_CTL_START 0x08
/* Status register */
#define KW_I2C_STAT_BUSY 0x01
#define KW_I2C_STAT_LAST_AAK 0x02
#define KW_I2C_STAT_LAST_RW 0x04
#define KW_I2C_STAT_SDA 0x08
#define KW_I2C_STAT_SCL 0x10
/* IER & ISR registers */
#define KW_I2C_IRQ_DATA 0x01
#define KW_I2C_IRQ_ADDR 0x02
#define KW_I2C_IRQ_STOP 0x04
#define KW_I2C_IRQ_START 0x08
#define KW_I2C_IRQ_MASK 0x0F
/* State machine states */
enum {
state_idle,
state_addr,
state_read,
state_write,
state_stop,
state_dead
};
#define WRONG_STATE(name) do {\
printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s " \
"(isr: %02x)\n", \
name, __kw_state_names[host->state], isr); \
} while(0)
static const char *__kw_state_names[] = {
"state_idle",
"state_addr",
"state_read",
"state_write",
"state_stop",
"state_dead"
};
static inline u8 __kw_read_reg(struct pmac_i2c_host_kw *host, reg_t reg)
{
return readb(host->base + (((unsigned int)reg) << host->bsteps));
}
static inline void __kw_write_reg(struct pmac_i2c_host_kw *host,
reg_t reg, u8 val)
{
writeb(val, host->base + (((unsigned)reg) << host->bsteps));
(void)__kw_read_reg(host, reg_subaddr);
}
#define kw_write_reg(reg, val) __kw_write_reg(host, reg, val)
#define kw_read_reg(reg) __kw_read_reg(host, reg)
static u8 kw_i2c_wait_interrupt(struct pmac_i2c_host_kw *host)
{
int i, j;
u8 isr;
for (i = 0; i < 1000; i++) {
isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK;
if (isr != 0)
return isr;
/* This code is used with the timebase frozen, we cannot rely
* on udelay nor schedule when in polled mode !
* For now, just use a bogus loop....
*/
if (host->polled) {
for (j = 1; j < 100000; j++)
mb();
} else
msleep(1);
}
return isr;
}
static void kw_i2c_do_stop(struct pmac_i2c_host_kw *host, int result)
{
kw_write_reg(reg_control, KW_I2C_CTL_STOP);
host->state = state_stop;
host->result = result;
}
static void kw_i2c_handle_interrupt(struct pmac_i2c_host_kw *host, u8 isr)
{
u8 ack;
DBG_LOW("kw_handle_interrupt(%s, isr: %x)\n",
__kw_state_names[host->state], isr);
if (host->state == state_idle) {
printk(KERN_WARNING "low_i2c: Keywest got an out of state"
" interrupt, ignoring\n");
kw_write_reg(reg_isr, isr);
return;
}
if (isr == 0) {
printk(KERN_WARNING "low_i2c: Timeout in i2c transfer"
" on keywest !\n");
if (host->state != state_stop) {
kw_i2c_do_stop(host, -EIO);
return;
}
ack = kw_read_reg(reg_status);
if (ack & KW_I2C_STAT_BUSY)
kw_write_reg(reg_status, 0);
host->state = state_idle;
kw_write_reg(reg_ier, 0x00);
if (!host->polled)
complete(&host->complete);
return;
}
if (isr & KW_I2C_IRQ_ADDR) {
ack = kw_read_reg(reg_status);
if (host->state != state_addr) {
WRONG_STATE("KW_I2C_IRQ_ADDR");
kw_i2c_do_stop(host, -EIO);
}
if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
host->result = -ENXIO;
host->state = state_stop;
DBG_LOW("KW: NAK on address\n");
} else {
if (host->len == 0)
kw_i2c_do_stop(host, 0);
else if (host->rw) {
host->state = state_read;
if (host->len > 1)
kw_write_reg(reg_control,
KW_I2C_CTL_AAK);
} else {
host->state = state_write;
kw_write_reg(reg_data, *(host->data++));
host->len--;
}
}
kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
}
if (isr & KW_I2C_IRQ_DATA) {
if (host->state == state_read) {
*(host->data++) = kw_read_reg(reg_data);
host->len--;
kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
if (host->len == 0)
host->state = state_stop;
else if (host->len == 1)
kw_write_reg(reg_control, 0);
} else if (host->state == state_write) {
ack = kw_read_reg(reg_status);
if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
DBG_LOW("KW: nack on data write\n");
host->result = -EFBIG;
host->state = state_stop;
} else if (host->len) {
kw_write_reg(reg_data, *(host->data++));
host->len--;
} else
kw_i2c_do_stop(host, 0);
} else {
WRONG_STATE("KW_I2C_IRQ_DATA");
if (host->state != state_stop)
kw_i2c_do_stop(host, -EIO);
}
kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
}
if (isr & KW_I2C_IRQ_STOP) {
kw_write_reg(reg_isr, KW_I2C_IRQ_STOP);
if (host->state != state_stop) {
WRONG_STATE("KW_I2C_IRQ_STOP");
host->result = -EIO;
}
host->state = state_idle;
if (!host->polled)
complete(&host->complete);
}
/* Below should only happen in manual mode which we don't use ... */
if (isr & KW_I2C_IRQ_START)
kw_write_reg(reg_isr, KW_I2C_IRQ_START);
}
/* Interrupt handler */
static irqreturn_t kw_i2c_irq(int irq, void *dev_id)
{
struct pmac_i2c_host_kw *host = dev_id;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
del_timer(&host->timeout_timer);
kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
if (host->state != state_idle) {
host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
add_timer(&host->timeout_timer);
}
spin_unlock_irqrestore(&host->lock, flags);
return IRQ_HANDLED;
}
static void kw_i2c_timeout(unsigned long data)
{
struct pmac_i2c_host_kw *host = (struct pmac_i2c_host_kw *)data;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
if (host->state != state_idle) {
host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
add_timer(&host->timeout_timer);
}
spin_unlock_irqrestore(&host->lock, flags);
}
static int kw_i2c_open(struct pmac_i2c_bus *bus)
{
struct pmac_i2c_host_kw *host = bus->hostdata;
mutex_lock(&host->mutex);
return 0;
}
static void kw_i2c_close(struct pmac_i2c_bus *bus)
{
struct pmac_i2c_host_kw *host = bus->hostdata;
mutex_unlock(&host->mutex);
}
static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
u32 subaddr, u8 *data, int len)
{
struct pmac_i2c_host_kw *host = bus->hostdata;
u8 mode_reg = host->speed;
int use_irq = host->irq != NO_IRQ && !bus->polled;
/* Setup mode & subaddress if any */
switch(bus->mode) {
case pmac_i2c_mode_dumb:
return -EINVAL;
case pmac_i2c_mode_std:
mode_reg |= KW_I2C_MODE_STANDARD;
if (subsize != 0)
return -EINVAL;
break;
case pmac_i2c_mode_stdsub:
mode_reg |= KW_I2C_MODE_STANDARDSUB;
if (subsize != 1)
return -EINVAL;
break;
case pmac_i2c_mode_combined:
mode_reg |= KW_I2C_MODE_COMBINED;
if (subsize != 1)
return -EINVAL;
break;
}
/* Setup channel & clear pending irqs */
kw_write_reg(reg_isr, kw_read_reg(reg_isr));
kw_write_reg(reg_mode, mode_reg | (bus->channel << 4));
kw_write_reg(reg_status, 0);
/* Set up address and r/w bit, strip possible stale bus number from
* address top bits
*/
kw_write_reg(reg_addr, addrdir & 0xff);
/* Set up the sub address */
if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB
|| (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED)
kw_write_reg(reg_subaddr, subaddr);
/* Prepare for async operations */
host->data = data;
host->len = len;
host->state = state_addr;
host->result = 0;
host->rw = (addrdir & 1);
host->polled = bus->polled;
/* Enable interrupt if not using polled mode and interrupt is
* available
*/
if (use_irq) {
/* Clear completion */
INIT_COMPLETION(host->complete);
/* Ack stale interrupts */
kw_write_reg(reg_isr, kw_read_reg(reg_isr));
/* Arm timeout */
host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
add_timer(&host->timeout_timer);
/* Enable emission */
kw_write_reg(reg_ier, KW_I2C_IRQ_MASK);
}
/* Start sending address */
kw_write_reg(reg_control, KW_I2C_CTL_XADDR);
/* Wait for completion */
if (use_irq)
wait_for_completion(&host->complete);
else {
while(host->state != state_idle) {
unsigned long flags;
u8 isr = kw_i2c_wait_interrupt(host);
spin_lock_irqsave(&host->lock, flags);
kw_i2c_handle_interrupt(host, isr);
spin_unlock_irqrestore(&host->lock, flags);
}
}
/* Disable emission */
kw_write_reg(reg_ier, 0);
return host->result;
}
static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
{
struct pmac_i2c_host_kw *host;
const u32 *psteps, *prate, *addrp;
u32 steps;
host = kzalloc(sizeof(struct pmac_i2c_host_kw), GFP_KERNEL);
if (host == NULL) {
printk(KERN_ERR "low_i2c: Can't allocate host for %s\n",
np->full_name);
return NULL;
}
/* Apple is kind enough to provide a valid AAPL,address property
* on all i2c keywest nodes so far ... we would have to fallback
* to macio parsing if that wasn't the case
*/
addrp = of_get_property(np, "AAPL,address", NULL);
if (addrp == NULL) {
printk(KERN_ERR "low_i2c: Can't find address for %s\n",
np->full_name);
kfree(host);
return NULL;
}
mutex_init(&host->mutex);
init_completion(&host->complete);
spin_lock_init(&host->lock);
init_timer(&host->timeout_timer);
host->timeout_timer.function = kw_i2c_timeout;
host->timeout_timer.data = (unsigned long)host;
psteps = of_get_property(np, "AAPL,address-step", NULL);
steps = psteps ? (*psteps) : 0x10;
for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++)
steps >>= 1;
/* Select interface rate */
host->speed = KW_I2C_MODE_25KHZ;
prate = of_get_property(np, "AAPL,i2c-rate", NULL);
if (prate) switch(*prate) {
case 100:
host->speed = KW_I2C_MODE_100KHZ;
break;
case 50:
host->speed = KW_I2C_MODE_50KHZ;
break;
case 25:
host->speed = KW_I2C_MODE_25KHZ;
break;
}
host->irq = irq_of_parse_and_map(np, 0);
if (host->irq == NO_IRQ)
printk(KERN_WARNING
"low_i2c: Failed to map interrupt for %s\n",
np->full_name);
host->base = ioremap((*addrp), 0x1000);
if (host->base == NULL) {
printk(KERN_ERR "low_i2c: Can't map registers for %s\n",
np->full_name);
kfree(host);
return NULL;
}
/* Make sure IRQ is disabled */
kw_write_reg(reg_ier, 0);
/* Request chip interrupt. We set IRQF_NO_SUSPEND because we don't
* want that interrupt disabled between the 2 passes of driver
* suspend or we'll have issues running the pfuncs
*/
if (request_irq(host->irq, kw_i2c_irq, IRQF_NO_SUSPEND,
"keywest i2c", host))
host->irq = NO_IRQ;
printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n",
*addrp, host->irq, np->full_name);
return host;
}
static void __init kw_i2c_add(struct pmac_i2c_host_kw *host,
struct device_node *controller,
struct device_node *busnode,
int channel)
{
struct pmac_i2c_bus *bus;
bus = kzalloc(sizeof(struct pmac_i2c_bus), GFP_KERNEL);
if (bus == NULL)
return;
bus->controller = of_node_get(controller);
bus->busnode = of_node_get(busnode);
bus->type = pmac_i2c_bus_keywest;
bus->hostdata = host;
bus->channel = channel;
bus->mode = pmac_i2c_mode_std;
bus->open = kw_i2c_open;
bus->close = kw_i2c_close;
bus->xfer = kw_i2c_xfer;
mutex_init(&bus->mutex);
if (controller == busnode)
bus->flags = pmac_i2c_multibus;
list_add(&bus->link, &pmac_i2c_busses);
printk(KERN_INFO " channel %d bus %s\n", channel,
(controller == busnode) ? "<multibus>" : busnode->full_name);
}
static void __init kw_i2c_probe(void)
{
struct device_node *np, *child, *parent;
/* Probe keywest-i2c busses */
for_each_compatible_node(np, "i2c","keywest-i2c") {
struct pmac_i2c_host_kw *host;
int multibus;
/* Found one, init a host structure */
host = kw_i2c_host_init(np);
if (host == NULL)
continue;
/* Now check if we have a multibus setup (old style) or if we
* have proper bus nodes. Note that the "new" way (proper bus
* nodes) might cause us to not create some busses that are
* kept hidden in the device-tree. In the future, we might
* want to work around that by creating busses without a node
* but not for now
*/
child = of_get_next_child(np, NULL);
multibus = !child || strcmp(child->name, "i2c-bus");
of_node_put(child);
/* For a multibus setup, we get the bus count based on the
* parent type
*/
if (multibus) {
int chans, i;
parent = of_get_parent(np);
if (parent == NULL)
continue;
chans = parent->name[0] == 'u' ? 2 : 1;
for (i = 0; i < chans; i++)
kw_i2c_add(host, np, np, i);
} else {
for (child = NULL;
(child = of_get_next_child(np, child)) != NULL;) {
const u32 *reg = of_get_property(child,
"reg", NULL);
if (reg == NULL)
continue;
kw_i2c_add(host, np, child, *reg);
}
}
}
}
/*
*
* PMU implementation
*
*/
#ifdef CONFIG_ADB_PMU
/*
* i2c command block to the PMU
*/
struct pmu_i2c_hdr {
u8 bus;
u8 mode;
u8 bus2;
u8 address;
u8 sub_addr;
u8 comb_addr;
u8 count;
u8 data[];
};
static void pmu_i2c_complete(struct adb_request *req)
{
complete(req->arg);
}
static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
u32 subaddr, u8 *data, int len)
{
struct adb_request *req = bus->hostdata;
struct pmu_i2c_hdr *hdr = (struct pmu_i2c_hdr *)&req->data[1];
struct completion comp;
int read = addrdir & 1;
int retry;
int rc = 0;
/* For now, limit ourselves to 16 bytes transfers */
if (len > 16)
return -EINVAL;
init_completion(&comp);
for (retry = 0; retry < 16; retry++) {
memset(req, 0, sizeof(struct adb_request));
hdr->bus = bus->channel;
hdr->count = len;
switch(bus->mode) {
case pmac_i2c_mode_std:
if (subsize != 0)
return -EINVAL;
hdr->address = addrdir;
hdr->mode = PMU_I2C_MODE_SIMPLE;
break;
case pmac_i2c_mode_stdsub:
case pmac_i2c_mode_combined:
if (subsize != 1)
return -EINVAL;
hdr->address = addrdir & 0xfe;
hdr->comb_addr = addrdir;
hdr->sub_addr = subaddr;
if (bus->mode == pmac_i2c_mode_stdsub)
hdr->mode = PMU_I2C_MODE_STDSUB;
else
hdr->mode = PMU_I2C_MODE_COMBINED;
break;
default:
return -EINVAL;
}
INIT_COMPLETION(comp);
req->data[0] = PMU_I2C_CMD;
req->reply[0] = 0xff;
req->nbytes = sizeof(struct pmu_i2c_hdr) + 1;
req->done = pmu_i2c_complete;
req->arg = ∁
if (!read && len) {
memcpy(hdr->data, data, len);
req->nbytes += len;
}
rc = pmu_queue_request(req);
if (rc)
return rc;
wait_for_completion(&comp);
if (req->reply[0] == PMU_I2C_STATUS_OK)
break;
msleep(15);
}
if (req->reply[0] != PMU_I2C_STATUS_OK)
return -EIO;
for (retry = 0; retry < 16; retry++) {
memset(req, 0, sizeof(struct adb_request));
/* I know that looks like a lot, slow as hell, but darwin
* does it so let's be on the safe side for now
*/
msleep(15);
hdr->bus = PMU_I2C_BUS_STATUS;
INIT_COMPLETION(comp);
req->data[0] = PMU_I2C_CMD;
req->reply[0] = 0xff;
req->nbytes = 2;
req->done = pmu_i2c_complete;
req->arg = ∁
rc = pmu_queue_request(req);
if (rc)
return rc;
wait_for_completion(&comp);
if (req->reply[0] == PMU_I2C_STATUS_OK && !read)
return 0;
if (req->reply[0] == PMU_I2C_STATUS_DATAREAD && read) {
int rlen = req->reply_len - 1;
if (rlen != len) {
printk(KERN_WARNING "low_i2c: PMU returned %d"
" bytes, expected %d !\n", rlen, len);
return -EIO;
}
if (len)
memcpy(data, &req->reply[1], len);
return 0;
}
}
return -EIO;
}
static void __init pmu_i2c_probe(void)
{
struct pmac_i2c_bus *bus;
struct device_node *busnode;
int channel, sz;
if (!pmu_present())
return;
/* There might or might not be a "pmu-i2c" node, we use that
* or via-pmu itself, whatever we find. I haven't seen a machine
* with separate bus nodes, so we assume a multibus setup
*/
busnode = of_find_node_by_name(NULL, "pmu-i2c");
if (busnode == NULL)
busnode = of_find_node_by_name(NULL, "via-pmu");
if (busnode == NULL)
return;
printk(KERN_INFO "PMU i2c %s\n", busnode->full_name);
/*
* We add bus 1 and 2 only for now, bus 0 is "special"
*/
for (channel = 1; channel <= 2; channel++) {
sz = sizeof(struct pmac_i2c_bus) + sizeof(struct adb_request);
bus = kzalloc(sz, GFP_KERNEL);
if (bus == NULL)
return;
bus->controller = busnode;
bus->busnode = busnode;
bus->type = pmac_i2c_bus_pmu;
bus->channel = channel;
bus->mode = pmac_i2c_mode_std;
bus->hostdata = bus + 1;
bus->xfer = pmu_i2c_xfer;
mutex_init(&bus->mutex);
bus->flags = pmac_i2c_multibus;
list_add(&bus->link, &pmac_i2c_busses);
printk(KERN_INFO " channel %d bus <multibus>\n", channel);
}
}
#endif /* CONFIG_ADB_PMU */
/*
*
* SMU implementation
*
*/
#ifdef CONFIG_PMAC_SMU
static void smu_i2c_complete(struct smu_i2c_cmd *cmd, void *misc)
{
complete(misc);
}
static int smu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
u32 subaddr, u8 *data, int len)
{
struct smu_i2c_cmd *cmd = bus->hostdata;
struct completion comp;
int read = addrdir & 1;
int rc = 0;
if ((read && len > SMU_I2C_READ_MAX) ||
((!read) && len > SMU_I2C_WRITE_MAX))
return -EINVAL;
memset(cmd, 0, sizeof(struct smu_i2c_cmd));
cmd->info.bus = bus->channel;
cmd->info.devaddr = addrdir;
cmd->info.datalen = len;
switch(bus->mode) {
case pmac_i2c_mode_std:
if (subsize != 0)
return -EINVAL;
cmd->info.type = SMU_I2C_TRANSFER_SIMPLE;
break;
case pmac_i2c_mode_stdsub:
case pmac_i2c_mode_combined:
if (subsize > 3 || subsize < 1)
return -EINVAL;
cmd->info.sublen = subsize;
/* that's big-endian only but heh ! */
memcpy(&cmd->info.subaddr, ((char *)&subaddr) + (4 - subsize),
subsize);
if (bus->mode == pmac_i2c_mode_stdsub)
cmd->info.type = SMU_I2C_TRANSFER_STDSUB;
else
cmd->info.type = SMU_I2C_TRANSFER_COMBINED;
break;
default:
return -EINVAL;
}
if (!read && len)
memcpy(cmd->info.data, data, len);
init_completion(&comp);
cmd->done = smu_i2c_complete;
cmd->misc = ∁
rc = smu_queue_i2c(cmd);
if (rc < 0)
return rc;
wait_for_completion(&comp);
rc = cmd->status;
if (read && len)
memcpy(data, cmd->info.data, len);
return rc < 0 ? rc : 0;
}
static void __init smu_i2c_probe(void)
{
struct device_node *controller, *busnode;
struct pmac_i2c_bus *bus;
const u32 *reg;
int sz;
if (!smu_present())
return;
controller = of_find_node_by_name(NULL, "smu-i2c-control");
if (controller == NULL)
controller = of_find_node_by_name(NULL, "smu");
if (controller == NULL)
return;
printk(KERN_INFO "SMU i2c %s\n", controller->full_name);
/* Look for childs, note that they might not be of the right
* type as older device trees mix i2c busses and other things
* at the same level
*/
for (busnode = NULL;
(busnode = of_get_next_child(controller, busnode)) != NULL;) {
if (strcmp(busnode->type, "i2c") &&
strcmp(busnode->type, "i2c-bus"))
continue;
reg = of_get_property(busnode, "reg", NULL);
if (reg == NULL)
continue;
sz = sizeof(struct pmac_i2c_bus) + sizeof(struct smu_i2c_cmd);
bus = kzalloc(sz, GFP_KERNEL);
if (bus == NULL)
return;
bus->controller = controller;
bus->busnode = of_node_get(busnode);
bus->type = pmac_i2c_bus_smu;
bus->channel = *reg;
bus->mode = pmac_i2c_mode_std;
bus->hostdata = bus + 1;
bus->xfer = smu_i2c_xfer;
mutex_init(&bus->mutex);
bus->flags = 0;
list_add(&bus->link, &pmac_i2c_busses);
printk(KERN_INFO " channel %x bus %s\n",
bus->channel, busnode->full_name);
}
}
#endif /* CONFIG_PMAC_SMU */
/*
*
* Core code
*
*/
struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node)
{
struct device_node *p = of_node_get(node);
struct device_node *prev = NULL;
struct pmac_i2c_bus *bus;
while(p) {
list_for_each_entry(bus, &pmac_i2c_busses, link) {
if (p == bus->busnode) {
if (prev && bus->flags & pmac_i2c_multibus) {
const u32 *reg;
reg = of_get_property(prev, "reg",
NULL);
if (!reg)
continue;
if (((*reg) >> 8) != bus->channel)
continue;
}
of_node_put(p);
of_node_put(prev);
return bus;
}
}
of_node_put(prev);
prev = p;
p = of_get_parent(p);
}
return NULL;
}
EXPORT_SYMBOL_GPL(pmac_i2c_find_bus);
u8 pmac_i2c_get_dev_addr(struct device_node *device)
{
const u32 *reg = of_get_property(device, "reg", NULL);
if (reg == NULL)
return 0;
return (*reg) & 0xff;
}
EXPORT_SYMBOL_GPL(pmac_i2c_get_dev_addr);
struct device_node *pmac_i2c_get_controller(struct pmac_i2c_bus *bus)
{
return bus->controller;
}
EXPORT_SYMBOL_GPL(pmac_i2c_get_controller);
struct device_node *pmac_i2c_get_bus_node(struct pmac_i2c_bus *bus)
{
return bus->busnode;
}
EXPORT_SYMBOL_GPL(pmac_i2c_get_bus_node);
int pmac_i2c_get_type(struct pmac_i2c_bus *bus)
{
return bus->type;
}
EXPORT_SYMBOL_GPL(pmac_i2c_get_type);
int pmac_i2c_get_flags(struct pmac_i2c_bus *bus)
{
return bus->flags;
}
EXPORT_SYMBOL_GPL(pmac_i2c_get_flags);
int pmac_i2c_get_channel(struct pmac_i2c_bus *bus)
{
return bus->channel;
}
EXPORT_SYMBOL_GPL(pmac_i2c_get_channel);
struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus)
{
return &bus->adapter;
}
EXPORT_SYMBOL_GPL(pmac_i2c_get_adapter);
struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter)
{
struct pmac_i2c_bus *bus;
list_for_each_entry(bus, &pmac_i2c_busses, link)
if (&bus->adapter == adapter)
return bus;
return NULL;
}
EXPORT_SYMBOL_GPL(pmac_i2c_adapter_to_bus);
int pmac_i2c_match_adapter(struct device_node *dev, struct i2c_adapter *adapter)
{
struct pmac_i2c_bus *bus = pmac_i2c_find_bus(dev);
if (bus == NULL)
return 0;
return (&bus->adapter == adapter);
}
EXPORT_SYMBOL_GPL(pmac_i2c_match_adapter);
int pmac_low_i2c_lock(struct device_node *np)
{
struct pmac_i2c_bus *bus, *found = NULL;
list_for_each_entry(bus, &pmac_i2c_busses, link) {
if (np == bus->controller) {
found = bus;
break;
}
}
if (!found)
return -ENODEV;
return pmac_i2c_open(bus, 0);
}
EXPORT_SYMBOL_GPL(pmac_low_i2c_lock);
int pmac_low_i2c_unlock(struct device_node *np)
{
struct pmac_i2c_bus *bus, *found = NULL;
list_for_each_entry(bus, &pmac_i2c_busses, link) {
if (np == bus->controller) {
found = bus;
break;
}
}
if (!found)
return -ENODEV;
pmac_i2c_close(bus);
return 0;
}
EXPORT_SYMBOL_GPL(pmac_low_i2c_unlock);
int pmac_i2c_open(struct pmac_i2c_bus *bus, int polled)
{
int rc;
mutex_lock(&bus->mutex);
bus->polled = polled || pmac_i2c_force_poll;
bus->opened = 1;
bus->mode = pmac_i2c_mode_std;
if (bus->open && (rc = bus->open(bus)) != 0) {
bus->opened = 0;
mutex_unlock(&bus->mutex);
return rc;
}
return 0;
}
EXPORT_SYMBOL_GPL(pmac_i2c_open);
void pmac_i2c_close(struct pmac_i2c_bus *bus)
{
WARN_ON(!bus->opened);
if (bus->close)
bus->close(bus);
bus->opened = 0;
mutex_unlock(&bus->mutex);
}
EXPORT_SYMBOL_GPL(pmac_i2c_close);
int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode)
{
WARN_ON(!bus->opened);
/* Report me if you see the error below as there might be a new
* "combined4" mode that I need to implement for the SMU bus
*/
if (mode < pmac_i2c_mode_dumb || mode > pmac_i2c_mode_combined) {
printk(KERN_ERR "low_i2c: Invalid mode %d requested on"
" bus %s !\n", mode, bus->busnode->full_name);
return -EINVAL;
}
bus->mode = mode;
return 0;
}
EXPORT_SYMBOL_GPL(pmac_i2c_setmode);
int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
u32 subaddr, u8 *data, int len)
{
int rc;
WARN_ON(!bus->opened);
DBG("xfer() chan=%d, addrdir=0x%x, mode=%d, subsize=%d, subaddr=0x%x,"
" %d bytes, bus %s\n", bus->channel, addrdir, bus->mode, subsize,
subaddr, len, bus->busnode->full_name);
rc = bus->xfer(bus, addrdir, subsize, subaddr, data, len);
#ifdef DEBUG
if (rc)
DBG("xfer error %d\n", rc);
#endif
return rc;
}
EXPORT_SYMBOL_GPL(pmac_i2c_xfer);
/* some quirks for platform function decoding */
enum {
pmac_i2c_quirk_invmask = 0x00000001u,
pmac_i2c_quirk_skip = 0x00000002u,
};
static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
int quirks))
{
struct pmac_i2c_bus *bus;
struct device_node *np;
static struct whitelist_ent {
char *name;
char *compatible;
int quirks;
} whitelist[] = {
/* XXX Study device-tree's & apple drivers are get the quirks
* right !
*/
/* Workaround: It seems that running the clockspreading
* properties on the eMac will cause lockups during boot.
* The machine seems to work fine without that. So for now,
* let's make sure i2c-hwclock doesn't match about "imic"
* clocks and we'll figure out if we really need to do
* something special about those later.
*/
{ "i2c-hwclock", "imic5002", pmac_i2c_quirk_skip },
{ "i2c-hwclock", "imic5003", pmac_i2c_quirk_skip },
{ "i2c-hwclock", NULL, pmac_i2c_quirk_invmask },
{ "i2c-cpu-voltage", NULL, 0},
{ "temp-monitor", NULL, 0 },
{ "supply-monitor", NULL, 0 },
{ NULL, NULL, 0 },
};
/* Only some devices need to have platform functions instanciated
* here. For now, we have a table. Others, like 9554 i2c GPIOs used
* on Xserve, if we ever do a driver for them, will use their own
* platform function instance
*/
list_for_each_entry(bus, &pmac_i2c_busses, link) {
for (np = NULL;
(np = of_get_next_child(bus->busnode, np)) != NULL;) {
struct whitelist_ent *p;
/* If multibus, check if device is on that bus */
if (bus->flags & pmac_i2c_multibus)
if (bus != pmac_i2c_find_bus(np))
continue;
for (p = whitelist; p->name != NULL; p++) {
if (strcmp(np->name, p->name))
continue;
if (p->compatible &&
!of_device_is_compatible(np, p->compatible))
continue;
if (p->quirks & pmac_i2c_quirk_skip)
break;
callback(np, p->quirks);
break;
}
}
}
}
#define MAX_I2C_DATA 64
struct pmac_i2c_pf_inst
{
struct pmac_i2c_bus *bus;
u8 addr;
u8 buffer[MAX_I2C_DATA];
u8 scratch[MAX_I2C_DATA];
int bytes;
int quirks;
};
static void* pmac_i2c_do_begin(struct pmf_function *func, struct pmf_args *args)
{
struct pmac_i2c_pf_inst *inst;
struct pmac_i2c_bus *bus;
bus = pmac_i2c_find_bus(func->node);
if (bus == NULL) {
printk(KERN_ERR "low_i2c: Can't find bus for %s (pfunc)\n",
func->node->full_name);
return NULL;
}
if (pmac_i2c_open(bus, 0)) {
printk(KERN_ERR "low_i2c: Can't open i2c bus for %s (pfunc)\n",
func->node->full_name);
return NULL;
}
/* XXX might need GFP_ATOMIC when called during the suspend process,
* but then, there are already lots of issues with suspending when
* near OOM that need to be resolved, the allocator itself should
* probably make GFP_NOIO implicit during suspend
*/
inst = kzalloc(sizeof(struct pmac_i2c_pf_inst), GFP_KERNEL);
if (inst == NULL) {
pmac_i2c_close(bus);
return NULL;
}
inst->bus = bus;
inst->addr = pmac_i2c_get_dev_addr(func->node);
inst->quirks = (int)(long)func->driver_data;
return inst;
}
static void pmac_i2c_do_end(struct pmf_function *func, void *instdata)
{
struct pmac_i2c_pf_inst *inst = instdata;
if (inst == NULL)
return;
pmac_i2c_close(inst->bus);
kfree(inst);
}
static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len)
{
struct pmac_i2c_pf_inst *inst = instdata;
inst->bytes = len;
return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 0, 0,
inst->buffer, len);
}
static int pmac_i2c_do_write(PMF_STD_ARGS, u32 len, const u8 *data)
{
struct pmac_i2c_pf_inst *inst = instdata;
return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0,
(u8 *)data, len);
}
/* This function is used to do the masking & OR'ing for the "rmw" type
* callbacks. Ze should apply the mask and OR in the values in the
* buffer before writing back. The problem is that it seems that
* various darwin drivers implement the mask/or differently, thus
* we need to check the quirks first
*/
static void pmac_i2c_do_apply_rmw(struct pmac_i2c_pf_inst *inst,
u32 len, const u8 *mask, const u8 *val)
{
int i;
if (inst->quirks & pmac_i2c_quirk_invmask) {
for (i = 0; i < len; i ++)
inst->scratch[i] = (inst->buffer[i] & mask[i]) | val[i];
} else {
for (i = 0; i < len; i ++)
inst->scratch[i] = (inst->buffer[i] & ~mask[i])
| (val[i] & mask[i]);
}
}
static int pmac_i2c_do_rmw(PMF_STD_ARGS, u32 masklen, u32 valuelen,
u32 totallen, const u8 *maskdata,
const u8 *valuedata)
{
struct pmac_i2c_pf_inst *inst = instdata;
if (masklen > inst->bytes || valuelen > inst->bytes ||
totallen > inst->bytes || valuelen > masklen)
return -EINVAL;
pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata);
return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0,
inst->scratch, totallen);
}
static int pmac_i2c_do_read_sub(PMF_STD_ARGS, u8 subaddr, u32 len)
{
struct pmac_i2c_pf_inst *inst = instdata;
inst->bytes = len;
return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 1, subaddr,
inst->buffer, len);
}
static int pmac_i2c_do_write_sub(PMF_STD_ARGS, u8 subaddr, u32 len,
const u8 *data)
{
struct pmac_i2c_pf_inst *inst = instdata;
return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1,
subaddr, (u8 *)data, len);
}
static int pmac_i2c_do_set_mode(PMF_STD_ARGS, int mode)
{
struct pmac_i2c_pf_inst *inst = instdata;
return pmac_i2c_setmode(inst->bus, mode);
}
static int pmac_i2c_do_rmw_sub(PMF_STD_ARGS, u8 subaddr, u32 masklen,
u32 valuelen, u32 totallen, const u8 *maskdata,
const u8 *valuedata)
{
struct pmac_i2c_pf_inst *inst = instdata;
if (masklen > inst->bytes || valuelen > inst->bytes ||
totallen > inst->bytes || valuelen > masklen)
return -EINVAL;
pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata);
return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1,
subaddr, inst->scratch, totallen);
}
static int pmac_i2c_do_mask_and_comp(PMF_STD_ARGS, u32 len,
const u8 *maskdata,
const u8 *valuedata)
{
struct pmac_i2c_pf_inst *inst = instdata;
int i, match;
/* Get return value pointer, it's assumed to be a u32 */
if (!args || !args->count || !args->u[0].p)
return -EINVAL;
/* Check buffer */
if (len > inst->bytes)
return -EINVAL;
for (i = 0, match = 1; match && i < len; i ++)
if ((inst->buffer[i] & maskdata[i]) != valuedata[i])
match = 0;
*args->u[0].p = match;
return 0;
}
static int pmac_i2c_do_delay(PMF_STD_ARGS, u32 duration)
{
msleep((duration + 999) / 1000);
return 0;
}
static struct pmf_handlers pmac_i2c_pfunc_handlers = {
.begin = pmac_i2c_do_begin,
.end = pmac_i2c_do_end,
.read_i2c = pmac_i2c_do_read,
.write_i2c = pmac_i2c_do_write,
.rmw_i2c = pmac_i2c_do_rmw,
.read_i2c_sub = pmac_i2c_do_read_sub,
.write_i2c_sub = pmac_i2c_do_write_sub,
.rmw_i2c_sub = pmac_i2c_do_rmw_sub,
.set_i2c_mode = pmac_i2c_do_set_mode,
.mask_and_compare = pmac_i2c_do_mask_and_comp,
.delay = pmac_i2c_do_delay,
};
static void __init pmac_i2c_dev_create(struct device_node *np, int quirks)
{
DBG("dev_create(%s)\n", np->full_name);
pmf_register_driver(np, &pmac_i2c_pfunc_handlers,
(void *)(long)quirks);
}
static void __init pmac_i2c_dev_init(struct device_node *np, int quirks)
{
DBG("dev_create(%s)\n", np->full_name);
pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
}
static void pmac_i2c_dev_suspend(struct device_node *np, int quirks)
{
DBG("dev_suspend(%s)\n", np->full_name);
pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_SLEEP, NULL);
}
static void pmac_i2c_dev_resume(struct device_node *np, int quirks)
{
DBG("dev_resume(%s)\n", np->full_name);
pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_WAKE, NULL);
}
void pmac_pfunc_i2c_suspend(void)
{
pmac_i2c_devscan(pmac_i2c_dev_suspend);
}
void pmac_pfunc_i2c_resume(void)
{
pmac_i2c_devscan(pmac_i2c_dev_resume);
}
/*
* Initialize us: probe all i2c busses on the machine, instantiate
* busses and platform functions as needed.
*/
/* This is non-static as it might be called early by smp code */
int __init pmac_i2c_init(void)
{
static int i2c_inited;
if (i2c_inited)
return 0;
i2c_inited = 1;
/* Probe keywest-i2c busses */
kw_i2c_probe();
#ifdef CONFIG_ADB_PMU
/* Probe PMU i2c busses */
pmu_i2c_probe();
#endif
#ifdef CONFIG_PMAC_SMU
/* Probe SMU i2c busses */
smu_i2c_probe();
#endif
/* Now add plaform functions for some known devices */
pmac_i2c_devscan(pmac_i2c_dev_create);
return 0;
}
machine_arch_initcall(powermac, pmac_i2c_init);
/* Since pmac_i2c_init can be called too early for the platform device
* registration, we need to do it at a later time. In our case, subsys
* happens to fit well, though I agree it's a bit of a hack...
*/
static int __init pmac_i2c_create_platform_devices(void)
{
struct pmac_i2c_bus *bus;
int i = 0;
/* In the case where we are initialized from smp_init(), we must
* not use the timer (and thus the irq). It's safe from now on
* though
*/
pmac_i2c_force_poll = 0;
/* Create platform devices */
list_for_each_entry(bus, &pmac_i2c_busses, link) {
bus->platform_dev =
platform_device_alloc("i2c-powermac", i++);
if (bus->platform_dev == NULL)
return -ENOMEM;
bus->platform_dev->dev.platform_data = bus;
platform_device_add(bus->platform_dev);
}
/* Now call platform "init" functions */
pmac_i2c_devscan(pmac_i2c_dev_init);
return 0;
}
machine_subsys_initcall(powermac, pmac_i2c_create_platform_devices);
| gpl-2.0 |
sleshepic/L900-MA7-Kernel | arch/m32r/platforms/usrv/setup.c | 2986 | 5451 | /*
* linux/arch/m32r/platforms/usrv/setup.c
*
* Setup routines for MITSUBISHI uServer
*
* Copyright (c) 2001, 2002, 2003 Hiroyuki Kondo, Hirokazu Takata,
* Hitoshi Yamamoto
*/
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/system.h>
#include <asm/m32r.h>
#include <asm/io.h>
#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
icu_data_t icu_data[M32700UT_NUM_CPU_IRQ];
static void disable_mappi_irq(unsigned int irq)
{
unsigned long port, data;
port = irq2port(irq);
data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7;
outl(data, port);
}
static void enable_mappi_irq(unsigned int irq)
{
unsigned long port, data;
port = irq2port(irq);
data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6;
outl(data, port);
}
static void mask_mappi(struct irq_data *data)
{
disable_mappi_irq(data->irq);
}
static void unmask_mappi(struct irq_data *data)
{
enable_mappi_irq(data->irq);
}
static void shutdown_mappi(struct irq_data *data)
{
unsigned long port;
port = irq2port(data->irq);
outl(M32R_ICUCR_ILEVEL7, port);
}
static struct irq_chip mappi_irq_type =
{
.name = "M32700-IRQ",
.irq_shutdown = shutdown_mappi,
.irq_mask = mask_mappi,
.irq_unmask = unmask_mappi,
};
/*
* Interrupt Control Unit of PLD on M32700UT (Level 2)
*/
#define irq2pldirq(x) ((x) - M32700UT_PLD_IRQ_BASE)
#define pldirq2port(x) (unsigned long)((int)PLD_ICUCR1 + \
(((x) - 1) * sizeof(unsigned short)))
typedef struct {
unsigned short icucr; /* ICU Control Register */
} pld_icu_data_t;
static pld_icu_data_t pld_icu_data[M32700UT_NUM_PLD_IRQ];
static void disable_m32700ut_pld_irq(unsigned int irq)
{
unsigned long port, data;
unsigned int pldirq;
pldirq = irq2pldirq(irq);
port = pldirq2port(pldirq);
data = pld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7;
outw(data, port);
}
static void enable_m32700ut_pld_irq(unsigned int irq)
{
unsigned long port, data;
unsigned int pldirq;
pldirq = irq2pldirq(irq);
port = pldirq2port(pldirq);
data = pld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6;
outw(data, port);
}
static void mask_m32700ut_pld(struct irq_data *data)
{
disable_m32700ut_pld_irq(data->irq);
}
static void unmask_m32700ut_pld(struct irq_data *data)
{
enable_m32700ut_pld_irq(data->irq);
enable_mappi_irq(M32R_IRQ_INT1);
}
static void shutdown_m32700ut_pld(struct irq_data *data)
{
unsigned long port;
unsigned int pldirq;
pldirq = irq2pldirq(data->irq);
port = pldirq2port(pldirq);
outw(PLD_ICUCR_ILEVEL7, port);
}
static struct irq_chip m32700ut_pld_irq_type =
{
.name = "USRV-PLD-IRQ",
.irq_shutdown = shutdown_m32700ut_pld,
.irq_mask = mask_m32700ut_pld,
.irq_unmask = unmask_m32700ut_pld,
};
void __init init_IRQ(void)
{
static int once = 0;
int i;
if (once)
return;
else
once++;
/* MFT2 : system timer */
irq_set_chip_and_handler(M32R_IRQ_MFT2, &mappi_irq_type,
handle_level_irq);
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
disable_mappi_irq(M32R_IRQ_MFT2);
#if defined(CONFIG_SERIAL_M32R_SIO)
/* SIO0_R : uart receive data */
irq_set_chip_and_handler(M32R_IRQ_SIO0_R, &mappi_irq_type,
handle_level_irq);
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
disable_mappi_irq(M32R_IRQ_SIO0_R);
/* SIO0_S : uart send data */
irq_set_chip_and_handler(M32R_IRQ_SIO0_S, &mappi_irq_type,
handle_level_irq);
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
disable_mappi_irq(M32R_IRQ_SIO0_S);
/* SIO1_R : uart receive data */
irq_set_chip_and_handler(M32R_IRQ_SIO1_R, &mappi_irq_type,
handle_level_irq);
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
disable_mappi_irq(M32R_IRQ_SIO1_R);
/* SIO1_S : uart send data */
irq_set_chip_and_handler(M32R_IRQ_SIO1_S, &mappi_irq_type,
handle_level_irq);
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
disable_mappi_irq(M32R_IRQ_SIO1_S);
#endif /* CONFIG_SERIAL_M32R_SIO */
/* INT#67-#71: CFC#0 IREQ on PLD */
for (i = 0 ; i < CONFIG_M32R_CFC_NUM ; i++ ) {
irq_set_chip_and_handler(PLD_IRQ_CF0 + i,
&m32700ut_pld_irq_type,
handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_CF0 + i)].icucr
= PLD_ICUCR_ISMOD01; /* 'L' level sense */
disable_m32700ut_pld_irq(PLD_IRQ_CF0 + i);
}
#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
/* INT#76: 16552D#0 IREQ on PLD */
irq_set_chip_and_handler(PLD_IRQ_UART0, &m32700ut_pld_irq_type,
handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_UART0)].icucr
= PLD_ICUCR_ISMOD03; /* 'H' level sense */
disable_m32700ut_pld_irq(PLD_IRQ_UART0);
/* INT#77: 16552D#1 IREQ on PLD */
irq_set_chip_and_handler(PLD_IRQ_UART1, &m32700ut_pld_irq_type,
handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_UART1)].icucr
= PLD_ICUCR_ISMOD03; /* 'H' level sense */
disable_m32700ut_pld_irq(PLD_IRQ_UART1);
#endif /* CONFIG_SERIAL_8250 || CONFIG_SERIAL_8250_MODULE */
#if defined(CONFIG_IDC_AK4524) || defined(CONFIG_IDC_AK4524_MODULE)
/* INT#80: AK4524 IREQ on PLD */
irq_set_chip_and_handler(PLD_IRQ_SNDINT, &m32700ut_pld_irq_type,
handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_SNDINT)].icucr
= PLD_ICUCR_ISMOD01; /* 'L' level sense */
disable_m32700ut_pld_irq(PLD_IRQ_SNDINT);
#endif /* CONFIG_IDC_AK4524 || CONFIG_IDC_AK4524_MODULE */
/*
* INT1# is used for UART, MMC, CF Controller in FPGA.
* We enable it here.
*/
icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD11;
enable_mappi_irq(M32R_IRQ_INT1);
}
| gpl-2.0 |
tux-mind/tf201-kernel | arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c | 3242 | 1199 | /* linux/arch/arm/plat-s3c24xx/spi-bus0-gpg5_6_7.c
*
* Copyright (c) 2008 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
* S3C24XX SPI - gpio configuration for bus 1 on gpg5,6,7
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
#include <linux/kernel.h>
#include <linux/gpio.h>
#include <mach/spi.h>
#include <mach/regs-gpio.h>
void s3c24xx_spi_gpiocfg_bus1_gpg5_6_7(struct s3c2410_spi_info *spi,
int enable)
{
if (enable) {
s3c_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPG7_SPICLK1);
s3c_gpio_cfgpin(S3C2410_GPG(6), S3C2410_GPG6_SPIMOSI1);
s3c_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPG5_SPIMISO1);
s3c2410_gpio_pullup(S3C2410_GPG(5), 0);
s3c2410_gpio_pullup(S3C2410_GPG(6), 0);
} else {
s3c_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPIO_INPUT);
s3c_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPIO_INPUT);
s3c_gpio_setpull(S3C2410_GPG(5), S3C_GPIO_PULL_NONE);
s3c_gpio_setpull(S3C2410_GPG(6), S3C_GPIO_PULL_NONE);
s3c_gpio_setpull(S3C2410_GPG(7), S3C_GPIO_PULL_NONE);
}
}
| gpl-2.0 |
AndroPlus-org/sony_sources-H2_2014 | drivers/gpu/drm/radeon/atombios_i2c.c | 4266 | 3959 | /*
* Copyright 2011 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*
*/
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
#define TARGET_HW_I2C_CLOCK 50
/* these are a limitation of ProcessI2cChannelTransaction not the hw */
#define ATOM_MAX_HW_I2C_WRITE 2
#define ATOM_MAX_HW_I2C_READ 255
static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
u8 slave_addr, u8 flags,
u8 *buf, u8 num)
{
struct drm_device *dev = chan->dev;
struct radeon_device *rdev = dev->dev_private;
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base;
u16 out;
memset(&args, 0, sizeof(args));
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
if (flags & HW_I2C_WRITE) {
if (num > ATOM_MAX_HW_I2C_WRITE) {
DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num);
return -EINVAL;
}
memcpy(&out, buf, num);
args.lpI2CDataOut = cpu_to_le16(out);
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
return -EINVAL;
}
}
args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
args.ucRegIndex = 0;
args.ucTransBytes = num;
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
DRM_DEBUG_KMS("hw_i2c error\n");
return -EIO;
}
if (!(flags & HW_I2C_WRITE))
memcpy(buf, base, num);
return 0;
}
int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct i2c_msg *p;
int i, remaining, current_count, buffer_offset, max_bytes, ret;
u8 buf = 0, flags;
/* check for bus probe */
p = &msgs[0];
if ((num == 1) && (p->len == 0)) {
ret = radeon_process_i2c_ch(i2c,
p->addr, HW_I2C_WRITE,
&buf, 1);
if (ret)
return ret;
else
return num;
}
for (i = 0; i < num; i++) {
p = &msgs[i];
remaining = p->len;
buffer_offset = 0;
/* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */
if (p->flags & I2C_M_RD) {
max_bytes = ATOM_MAX_HW_I2C_READ;
flags = HW_I2C_READ;
} else {
max_bytes = ATOM_MAX_HW_I2C_WRITE;
flags = HW_I2C_WRITE;
}
while (remaining) {
if (remaining > max_bytes)
current_count = max_bytes;
else
current_count = remaining;
ret = radeon_process_i2c_ch(i2c,
p->addr, flags,
&p->buf[buffer_offset], current_count);
if (ret)
return ret;
remaining -= current_count;
buffer_offset += current_count;
}
}
return num;
}
u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
| gpl-2.0 |
S3neos/android_kernel_samsung_s3ve3g | drivers/net/wireless/ath/main.c | 4778 | 2386 | /*
* Copyright (c) 2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "ath.h"
MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Shared library for Atheros wireless LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
u32 len,
gfp_t gfp_mask)
{
struct sk_buff *skb;
u32 off;
/*
* Cache-line-align. This is important (for the
* 5210 at least) as not doing so causes bogus data
* in rx'd frames.
*/
/* Note: the kernel can allocate a value greater than
* what we ask it to give us. We really only need 4 KB as that
* is this hardware supports and in fact we need at least 3849
* as that is the MAX AMSDU size this hardware supports.
* Unfortunately this means we may get 8 KB here from the
* kernel... and that is actually what is observed on some
* systems :( */
skb = __dev_alloc_skb(len + common->cachelsz - 1, gfp_mask);
if (skb != NULL) {
off = ((unsigned long) skb->data) % common->cachelsz;
if (off != 0)
skb_reserve(skb, common->cachelsz - off);
} else {
printk(KERN_ERR "skbuff alloc of size %u failed\n", len);
return NULL;
}
return skb;
}
EXPORT_SYMBOL(ath_rxbuf_alloc);
void ath_printk(const char *level, const struct ath_common* common,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (common && common->hw && common->hw->wiphy)
printk("%sath: %s: %pV",
level, wiphy_name(common->hw->wiphy), &vaf);
else
printk("%sath: %pV", level, &vaf);
va_end(args);
}
EXPORT_SYMBOL(ath_printk);
| gpl-2.0 |
guoyingbo/Monarudo_GPE_M7_port | drivers/staging/sep/sep_main.c | 4778 | 126224 | /*
*
* sep_main.c - Security Processor Driver main group of functions
*
* Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
* Contributions(c) 2009-2011 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* CONTACTS:
*
* Mark Allyn mark.a.allyn@intel.com
* Jayant Mangalampalli jayant.mangalampalli@intel.com
*
* CHANGES:
*
* 2009.06.26 Initial publish
* 2010.09.14 Upgrade to Medfield
* 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
* 2011.02.22 Enable kernel crypto operation
*
* Please note that this driver is based on information in the Discretix
* CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
* Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
* Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
* Overview and Integration Guide.
*/
/* #define DEBUG */
/* #define SEP_PERF_DEBUG */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/kdev_t.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/poll.h>
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
#include <asm/current.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <asm/cacheflush.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/async.h>
#include <linux/crypto.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/hash.h>
#include "sep_driver_hw_defs.h"
#include "sep_driver_config.h"
#include "sep_driver_api.h"
#include "sep_dev.h"
#include "sep_crypto.h"
#define CREATE_TRACE_POINTS
#include "sep_trace_events.h"
/*
* Let's not spend cycles iterating over message
* area contents if debugging not enabled
*/
#ifdef DEBUG
#define sep_dump_message(sep) _sep_dump_message(sep)
#else
#define sep_dump_message(sep)
#endif
/**
* Currenlty, there is only one SEP device per platform;
* In event platforms in the future have more than one SEP
* device, this will be a linked list
*/
struct sep_device *sep_dev;
/**
* sep_queue_status_remove - Removes transaction from status queue
* @sep: SEP device
* @sep_queue_info: pointer to status queue
*
* This function will removes information about transaction from the queue.
*/
void sep_queue_status_remove(struct sep_device *sep,
struct sep_queue_info **queue_elem)
{
unsigned long lck_flags;
dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
current->pid);
if (!queue_elem || !(*queue_elem)) {
dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
current->pid, __func__);
return;
}
spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
list_del(&(*queue_elem)->list);
sep->sep_queue_num--;
spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
kfree(*queue_elem);
*queue_elem = NULL;
dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
current->pid);
return;
}
/**
* sep_queue_status_add - Adds transaction to status queue
* @sep: SEP device
* @opcode: transaction opcode
* @size: input data size
* @pid: pid of current process
* @name: current process name
* @name_len: length of name (current process)
*
* This function adds information about about transaction started to the status
* queue.
*/
struct sep_queue_info *sep_queue_status_add(
struct sep_device *sep,
u32 opcode,
u32 size,
u32 pid,
u8 *name, size_t name_len)
{
unsigned long lck_flags;
struct sep_queue_info *my_elem = NULL;
my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
if (!my_elem)
return NULL;
dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
my_elem->data.opcode = opcode;
my_elem->data.size = size;
my_elem->data.pid = pid;
if (name_len > TASK_COMM_LEN)
name_len = TASK_COMM_LEN;
memcpy(&my_elem->data.name, name, name_len);
spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
list_add_tail(&my_elem->list, &sep->sep_queue_status);
sep->sep_queue_num++;
spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
return my_elem;
}
/**
* sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
* @sep: SEP device
* @dmatables_region: Destination pointer for the buffer
* @dma_ctx: DMA context for the transaction
* @table_count: Number of MLLI/DMA tables to create
* The buffer created will not work as-is for DMA operations,
* it needs to be copied over to the appropriate place in the
* shared area.
*/
static int sep_allocate_dmatables_region(struct sep_device *sep,
void **dmatables_region,
struct sep_dma_context *dma_ctx,
const u32 table_count)
{
const size_t new_len =
SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
void *tmp_region = NULL;
dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
current->pid, dma_ctx);
dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
current->pid, dmatables_region);
if (!dma_ctx || !dmatables_region) {
dev_warn(&sep->pdev->dev,
"[PID%d] dma context/region uninitialized\n",
current->pid);
return -EINVAL;
}
dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
current->pid, new_len);
dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
dma_ctx->dmatables_len);
tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
if (!tmp_region) {
dev_warn(&sep->pdev->dev,
"[PID%d] no mem for dma tables region\n",
current->pid);
return -ENOMEM;
}
/* Were there any previous tables that need to be preserved ? */
if (*dmatables_region) {
memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
kfree(*dmatables_region);
*dmatables_region = NULL;
}
*dmatables_region = tmp_region;
dma_ctx->dmatables_len += new_len;
return 0;
}
/**
* sep_wait_transaction - Used for synchronizing transactions
* @sep: SEP device
*/
int sep_wait_transaction(struct sep_device *sep)
{
int error = 0;
DEFINE_WAIT(wait);
if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
&sep->in_use_flags)) {
dev_dbg(&sep->pdev->dev,
"[PID%d] no transactions, returning\n",
current->pid);
goto end_function_setpid;
}
/*
* Looping needed even for exclusive waitq entries
* due to process wakeup latencies, previous process
* might have already created another transaction.
*/
for (;;) {
/*
* Exclusive waitq entry, so that only one process is
* woken up from the queue at a time.
*/
prepare_to_wait_exclusive(&sep->event_transactions,
&wait,
TASK_INTERRUPTIBLE);
if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
&sep->in_use_flags)) {
dev_dbg(&sep->pdev->dev,
"[PID%d] no transactions, breaking\n",
current->pid);
break;
}
dev_dbg(&sep->pdev->dev,
"[PID%d] transactions ongoing, sleeping\n",
current->pid);
schedule();
dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
if (signal_pending(current)) {
dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
current->pid);
error = -EINTR;
goto end_function;
}
}
end_function_setpid:
/*
* The pid_doing_transaction indicates that this process
* now owns the facilities to performa a transaction with
* the SEP. While this process is performing a transaction,
* no other process who has the SEP device open can perform
* any transactions. This method allows more than one process
* to have the device open at any given time, which provides
* finer granularity for device utilization by multiple
* processes.
*/
/* Only one process is able to progress here at a time */
sep->pid_doing_transaction = current->pid;
end_function:
finish_wait(&sep->event_transactions, &wait);
return error;
}
/**
* sep_check_transaction_owner - Checks if current process owns transaction
* @sep: SEP device
*/
static inline int sep_check_transaction_owner(struct sep_device *sep)
{
dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
current->pid,
sep->pid_doing_transaction);
if ((sep->pid_doing_transaction == 0) ||
(current->pid != sep->pid_doing_transaction)) {
return -EACCES;
}
/* We own the transaction */
return 0;
}
#ifdef DEBUG
/**
* sep_dump_message - dump the message that is pending
* @sep: SEP device
* This will only print dump if DEBUG is set; it does
* follow kernel debug print enabling
*/
static void _sep_dump_message(struct sep_device *sep)
{
int count;
u32 *p = sep->shared_addr;
for (count = 0; count < 10 * 4; count += 4)
dev_dbg(&sep->pdev->dev,
"[PID%d] Word %d of the message is %x\n",
current->pid, count/4, *p++);
}
#endif
/**
* sep_map_and_alloc_shared_area -allocate shared block
* @sep: security processor
* @size: size of shared area
*/
static int sep_map_and_alloc_shared_area(struct sep_device *sep)
{
sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
sep->shared_size,
&sep->shared_bus, GFP_KERNEL);
if (!sep->shared_addr) {
dev_dbg(&sep->pdev->dev,
"[PID%d] shared memory dma_alloc_coherent failed\n",
current->pid);
return -ENOMEM;
}
dev_dbg(&sep->pdev->dev,
"[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
current->pid,
sep->shared_size, sep->shared_addr,
(unsigned long long)sep->shared_bus);
return 0;
}
/**
* sep_unmap_and_free_shared_area - free shared block
* @sep: security processor
*/
static void sep_unmap_and_free_shared_area(struct sep_device *sep)
{
dma_free_coherent(&sep->pdev->dev, sep->shared_size,
sep->shared_addr, sep->shared_bus);
}
#ifdef DEBUG
/**
* sep_shared_bus_to_virt - convert bus/virt addresses
* @sep: pointer to struct sep_device
* @bus_address: address to convert
*
* Returns virtual address inside the shared area according
* to the bus address.
*/
static void *sep_shared_bus_to_virt(struct sep_device *sep,
dma_addr_t bus_address)
{
return sep->shared_addr + (bus_address - sep->shared_bus);
}
#endif
/**
* sep_open - device open method
* @inode: inode of SEP device
* @filp: file handle to SEP device
*
* Open method for the SEP device. Called when userspace opens
* the SEP device node.
*
* Returns zero on success otherwise an error code.
*/
static int sep_open(struct inode *inode, struct file *filp)
{
struct sep_device *sep;
struct sep_private_data *priv;
dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
if (filp->f_flags & O_NONBLOCK)
return -ENOTSUPP;
/*
* Get the SEP device structure and use it for the
* private_data field in filp for other methods
*/
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
sep = sep_dev;
priv->device = sep;
filp->private_data = priv;
dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
current->pid, priv);
/* Anyone can open; locking takes place at transaction level */
return 0;
}
/**
* sep_free_dma_table_data_handler - free DMA table
* @sep: pointere to struct sep_device
* @dma_ctx: dma context
*
* Handles the request to free DMA table for synchronic actions
*/
int sep_free_dma_table_data_handler(struct sep_device *sep,
struct sep_dma_context **dma_ctx)
{
int count;
int dcb_counter;
/* Pointer to the current dma_resource struct */
struct sep_dma_resource *dma;
dev_dbg(&sep->pdev->dev,
"[PID%d] sep_free_dma_table_data_handler\n",
current->pid);
if (!dma_ctx || !(*dma_ctx)) {
/* No context or context already freed */
dev_dbg(&sep->pdev->dev,
"[PID%d] no DMA context or context already freed\n",
current->pid);
return 0;
}
dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
current->pid,
(*dma_ctx)->nr_dcb_creat);
for (dcb_counter = 0;
dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
/* Unmap and free input map array */
if (dma->in_map_array) {
for (count = 0; count < dma->in_num_pages; count++) {
dma_unmap_page(&sep->pdev->dev,
dma->in_map_array[count].dma_addr,
dma->in_map_array[count].size,
DMA_TO_DEVICE);
}
kfree(dma->in_map_array);
}
/**
* Output is handled different. If
* this was a secure dma into restricted memory,
* then we skip this step altogether as restricted
* memory is not available to the o/s at all.
*/
if (((*dma_ctx)->secure_dma == false) &&
(dma->out_map_array)) {
for (count = 0; count < dma->out_num_pages; count++) {
dma_unmap_page(&sep->pdev->dev,
dma->out_map_array[count].dma_addr,
dma->out_map_array[count].size,
DMA_FROM_DEVICE);
}
kfree(dma->out_map_array);
}
/* Free page cache for output */
if (dma->in_page_array) {
for (count = 0; count < dma->in_num_pages; count++) {
flush_dcache_page(dma->in_page_array[count]);
page_cache_release(dma->in_page_array[count]);
}
kfree(dma->in_page_array);
}
/* Again, we do this only for non secure dma */
if (((*dma_ctx)->secure_dma == false) &&
(dma->out_page_array)) {
for (count = 0; count < dma->out_num_pages; count++) {
if (!PageReserved(dma->out_page_array[count]))
SetPageDirty(dma->
out_page_array[count]);
flush_dcache_page(dma->out_page_array[count]);
page_cache_release(dma->out_page_array[count]);
}
kfree(dma->out_page_array);
}
/**
* Note that here we use in_map_num_entries because we
* don't have a page array; the page array is generated
* only in the lock_user_pages, which is not called
* for kernel crypto, which is what the sg (scatter gather
* is used for exclusively
*/
if (dma->src_sg) {
dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
dma->in_map_num_entries, DMA_TO_DEVICE);
dma->src_sg = NULL;
}
if (dma->dst_sg) {
dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
dma->in_map_num_entries, DMA_FROM_DEVICE);
dma->dst_sg = NULL;
}
/* Reset all the values */
dma->in_page_array = NULL;
dma->out_page_array = NULL;
dma->in_num_pages = 0;
dma->out_num_pages = 0;
dma->in_map_array = NULL;
dma->out_map_array = NULL;
dma->in_map_num_entries = 0;
dma->out_map_num_entries = 0;
}
(*dma_ctx)->nr_dcb_creat = 0;
(*dma_ctx)->num_lli_tables_created = 0;
kfree(*dma_ctx);
*dma_ctx = NULL;
dev_dbg(&sep->pdev->dev,
"[PID%d] sep_free_dma_table_data_handler end\n",
current->pid);
return 0;
}
/**
* sep_end_transaction_handler - end transaction
* @sep: pointer to struct sep_device
* @dma_ctx: DMA context
* @call_status: Call status
*
* This API handles the end transaction request.
*/
static int sep_end_transaction_handler(struct sep_device *sep,
struct sep_dma_context **dma_ctx,
struct sep_call_status *call_status,
struct sep_queue_info **my_queue_elem)
{
dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
/*
* Extraneous transaction clearing would mess up PM
* device usage counters and SEP would get suspended
* just before we send a command to SEP in the next
* transaction
* */
if (sep_check_transaction_owner(sep)) {
dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
current->pid);
return 0;
}
/* Update queue status */
sep_queue_status_remove(sep, my_queue_elem);
/* Check that all the DMA resources were freed */
if (dma_ctx)
sep_free_dma_table_data_handler(sep, dma_ctx);
/* Reset call status for next transaction */
if (call_status)
call_status->status = 0;
/* Clear the message area to avoid next transaction reading
* sensitive results from previous transaction */
memset(sep->shared_addr, 0,
SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
/* start suspend delay */
#ifdef SEP_ENABLE_RUNTIME_PM
if (sep->in_use) {
sep->in_use = 0;
pm_runtime_mark_last_busy(&sep->pdev->dev);
pm_runtime_put_autosuspend(&sep->pdev->dev);
}
#endif
clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
sep->pid_doing_transaction = 0;
/* Now it's safe for next process to proceed */
dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
current->pid);
clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
wake_up(&sep->event_transactions);
return 0;
}
/**
* sep_release - close a SEP device
* @inode: inode of SEP device
* @filp: file handle being closed
*
* Called on the final close of a SEP device.
*/
static int sep_release(struct inode *inode, struct file *filp)
{
struct sep_private_data * const private_data = filp->private_data;
struct sep_call_status *call_status = &private_data->call_status;
struct sep_device *sep = private_data->device;
struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
sep_end_transaction_handler(sep, dma_ctx, call_status,
my_queue_elem);
kfree(filp->private_data);
return 0;
}
/**
* sep_mmap - maps the shared area to user space
* @filp: pointer to struct file
* @vma: pointer to vm_area_struct
*
* Called on an mmap of our space via the normal SEP device
*/
static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct sep_private_data * const private_data = filp->private_data;
struct sep_call_status *call_status = &private_data->call_status;
struct sep_device *sep = private_data->device;
struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
dma_addr_t bus_addr;
unsigned long error = 0;
dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
/* Set the transaction busy (own the device) */
/*
* Problem for multithreaded applications is that here we're
* possibly going to sleep while holding a write lock on
* current->mm->mmap_sem, which will cause deadlock for ongoing
* transaction trying to create DMA tables
*/
error = sep_wait_transaction(sep);
if (error)
/* Interrupted by signal, don't clear transaction */
goto end_function;
/* Clear the message area to avoid next transaction reading
* sensitive results from previous transaction */
memset(sep->shared_addr, 0,
SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
/*
* Check that the size of the mapped range is as the size of the message
* shared area
*/
if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
error = -EINVAL;
goto end_function_with_error;
}
dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
current->pid, sep->shared_addr);
/* Get bus address */
bus_addr = sep->shared_bus;
if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
dev_dbg(&sep->pdev->dev, "[PID%d] remap_page_range failed\n",
current->pid);
error = -EAGAIN;
goto end_function_with_error;
}
/* Update call status */
set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
goto end_function;
end_function_with_error:
/* Clear our transaction */
sep_end_transaction_handler(sep, NULL, call_status,
my_queue_elem);
end_function:
return error;
}
/**
* sep_poll - poll handler
* @filp: pointer to struct file
* @wait: pointer to poll_table
*
* Called by the OS when the kernel is asked to do a poll on
* a SEP file handle.
*/
static unsigned int sep_poll(struct file *filp, poll_table *wait)
{
struct sep_private_data * const private_data = filp->private_data;
struct sep_call_status *call_status = &private_data->call_status;
struct sep_device *sep = private_data->device;
u32 mask = 0;
u32 retval = 0;
u32 retval2 = 0;
unsigned long lock_irq_flag;
/* Am I the process that owns the transaction? */
if (sep_check_transaction_owner(sep)) {
dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
current->pid);
mask = POLLERR;
goto end_function;
}
/* Check if send command or send_reply were activated previously */
if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
&call_status->status)) {
dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
current->pid);
mask = POLLERR;
goto end_function;
}
/* Add the event to the polling wait table */
dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
current->pid);
poll_wait(filp, &sep->event_interrupt, wait);
dev_dbg(&sep->pdev->dev,
"[PID%d] poll: send_ct is %lx reply ct is %lx\n",
current->pid, sep->send_ct, sep->reply_ct);
/* Check if error occured during poll */
retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
if ((retval2 != 0x0) && (retval2 != 0x8)) {
dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
current->pid, retval2);
mask |= POLLERR;
goto end_function;
}
spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
if (sep->send_ct == sep->reply_ct) {
spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
dev_dbg(&sep->pdev->dev,
"[PID%d] poll: data ready check (GPR2) %x\n",
current->pid, retval);
/* Check if printf request */
if ((retval >> 30) & 0x1) {
dev_dbg(&sep->pdev->dev,
"[PID%d] poll: SEP printf request\n",
current->pid);
goto end_function;
}
/* Check if the this is SEP reply or request */
if (retval >> 31) {
dev_dbg(&sep->pdev->dev,
"[PID%d] poll: SEP request\n",
current->pid);
} else {
dev_dbg(&sep->pdev->dev,
"[PID%d] poll: normal return\n",
current->pid);
sep_dump_message(sep);
dev_dbg(&sep->pdev->dev,
"[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
current->pid);
mask |= POLLIN | POLLRDNORM;
}
set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
} else {
spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
dev_dbg(&sep->pdev->dev,
"[PID%d] poll; no reply; returning mask of 0\n",
current->pid);
mask = 0;
}
end_function:
return mask;
}
/**
* sep_time_address - address in SEP memory of time
* @sep: SEP device we want the address from
*
* Return the address of the two dwords in memory used for time
* setting.
*/
static u32 *sep_time_address(struct sep_device *sep)
{
return sep->shared_addr +
SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
}
/**
* sep_set_time - set the SEP time
* @sep: the SEP we are setting the time for
*
* Calculates time and sets it at the predefined address.
* Called with the SEP mutex held.
*/
static unsigned long sep_set_time(struct sep_device *sep)
{
struct timeval time;
u32 *time_addr; /* Address of time as seen by the kernel */
do_gettimeofday(&time);
/* Set value in the SYSTEM MEMORY offset */
time_addr = sep_time_address(sep);
time_addr[0] = SEP_TIME_VAL_TOKEN;
time_addr[1] = time.tv_sec;
dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
current->pid, time.tv_sec);
dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
current->pid, time_addr);
dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
current->pid, sep->shared_addr);
return time.tv_sec;
}
/**
* sep_send_command_handler - kick off a command
* @sep: SEP being signalled
*
* This function raises interrupt to SEP that signals that is has a new
* command from the host
*
* Note that this function does fall under the ioctl lock
*/
int sep_send_command_handler(struct sep_device *sep)
{
unsigned long lock_irq_flag;
u32 *msg_pool;
int error = 0;
/* Basic sanity check; set msg pool to start of shared area */
msg_pool = (u32 *)sep->shared_addr;
msg_pool += 2;
/* Look for start msg token */
if (*msg_pool != SEP_START_MSG_TOKEN) {
dev_warn(&sep->pdev->dev, "start message token not present\n");
error = -EPROTO;
goto end_function;
}
/* Do we have a reasonable size? */
msg_pool += 1;
if ((*msg_pool < 2) ||
(*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
dev_warn(&sep->pdev->dev, "invalid message size\n");
error = -EPROTO;
goto end_function;
}
/* Does the command look reasonable? */
msg_pool += 1;
if (*msg_pool < 2) {
dev_warn(&sep->pdev->dev, "invalid message opcode\n");
error = -EPROTO;
goto end_function;
}
#if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
current->pid,
sep->pdev->dev.power.runtime_status);
sep->in_use = 1; /* device is about to be used */
pm_runtime_get_sync(&sep->pdev->dev);
#endif
if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
error = -EPROTO;
goto end_function;
}
sep->in_use = 1; /* device is about to be used */
sep_set_time(sep);
sep_dump_message(sep);
/* Update counter */
spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
sep->send_ct++;
spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
dev_dbg(&sep->pdev->dev,
"[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
current->pid, sep->send_ct, sep->reply_ct);
/* Send interrupt to SEP */
sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
end_function:
return error;
}
/**
* sep_crypto_dma -
* @sep: pointer to struct sep_device
* @sg: pointer to struct scatterlist
* @direction:
* @dma_maps: pointer to place a pointer to array of dma maps
* This is filled in; anything previous there will be lost
* The structure for dma maps is sep_dma_map
* @returns number of dma maps on success; negative on error
*
* This creates the dma table from the scatterlist
* It is used only for kernel crypto as it works with scatterlists
* representation of data buffers
*
*/
static int sep_crypto_dma(
struct sep_device *sep,
struct scatterlist *sg,
struct sep_dma_map **dma_maps,
enum dma_data_direction direction)
{
struct scatterlist *temp_sg;
u32 count_segment;
u32 count_mapped;
struct sep_dma_map *sep_dma;
int ct1;
if (sg->length == 0)
return 0;
/* Count the segments */
temp_sg = sg;
count_segment = 0;
while (temp_sg) {
count_segment += 1;
temp_sg = scatterwalk_sg_next(temp_sg);
}
dev_dbg(&sep->pdev->dev,
"There are (hex) %x segments in sg\n", count_segment);
/* DMA map segments */
count_mapped = dma_map_sg(&sep->pdev->dev, sg,
count_segment, direction);
dev_dbg(&sep->pdev->dev,
"There are (hex) %x maps in sg\n", count_mapped);
if (count_mapped == 0) {
dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
return -ENOMEM;
}
sep_dma = kmalloc(sizeof(struct sep_dma_map) *
count_mapped, GFP_ATOMIC);
if (sep_dma == NULL) {
dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
return -ENOMEM;
}
for_each_sg(sg, temp_sg, count_mapped, ct1) {
sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
sep_dma[ct1].size = sg_dma_len(temp_sg);
dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
ct1, (unsigned long)sep_dma[ct1].dma_addr,
(unsigned long)sep_dma[ct1].size);
}
*dma_maps = sep_dma;
return count_mapped;
}
/**
* sep_crypto_lli -
* @sep: pointer to struct sep_device
* @sg: pointer to struct scatterlist
* @data_size: total data size
* @direction:
* @dma_maps: pointer to place a pointer to array of dma maps
* This is filled in; anything previous there will be lost
* The structure for dma maps is sep_dma_map
* @lli_maps: pointer to place a pointer to array of lli maps
* This is filled in; anything previous there will be lost
* The structure for dma maps is sep_dma_map
* @returns number of dma maps on success; negative on error
*
* This creates the LLI table from the scatterlist
* It is only used for kernel crypto as it works exclusively
* with scatterlists (struct scatterlist) representation of
* data buffers
*/
static int sep_crypto_lli(
struct sep_device *sep,
struct scatterlist *sg,
struct sep_dma_map **maps,
struct sep_lli_entry **llis,
u32 data_size,
enum dma_data_direction direction)
{
int ct1;
struct sep_lli_entry *sep_lli;
struct sep_dma_map *sep_map;
int nbr_ents;
nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
if (nbr_ents <= 0) {
dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
nbr_ents);
return nbr_ents;
}
sep_map = *maps;
sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
if (sep_lli == NULL) {
dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
kfree(*maps);
*maps = NULL;
return -ENOMEM;
}
for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
/* Maximum for page is total data size */
if (sep_map[ct1].size > data_size)
sep_map[ct1].size = data_size;
sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
}
*llis = sep_lli;
return nbr_ents;
}
/**
* sep_lock_kernel_pages - map kernel pages for DMA
* @sep: pointer to struct sep_device
* @kernel_virt_addr: address of data buffer in kernel
* @data_size: size of data
* @lli_array_ptr: lli array
* @in_out_flag: input into device or output from device
*
* This function locks all the physical pages of the kernel virtual buffer
* and construct a basic lli array, where each entry holds the physical
* page address and the size that application data holds in this page
* This function is used only during kernel crypto mod calls from within
* the kernel (when ioctl is not used)
*
* This is used only for kernel crypto. Kernel pages
* are handled differently as they are done via
* scatter gather lists (struct scatterlist)
*/
static int sep_lock_kernel_pages(struct sep_device *sep,
unsigned long kernel_virt_addr,
u32 data_size,
struct sep_lli_entry **lli_array_ptr,
int in_out_flag,
struct sep_dma_context *dma_ctx)
{
u32 num_pages;
struct scatterlist *sg;
/* Array of lli */
struct sep_lli_entry *lli_array;
/* Map array */
struct sep_dma_map *map_array;
enum dma_data_direction direction;
lli_array = NULL;
map_array = NULL;
if (in_out_flag == SEP_DRIVER_IN_FLAG) {
direction = DMA_TO_DEVICE;
sg = dma_ctx->src_sg;
} else {
direction = DMA_FROM_DEVICE;
sg = dma_ctx->dst_sg;
}
num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
data_size, direction);
if (num_pages <= 0) {
dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
num_pages);
return -ENOMEM;
}
/* Put mapped kernel sg into kernel resource array */
/* Set output params acording to the in_out flag */
if (in_out_flag == SEP_DRIVER_IN_FLAG) {
*lli_array_ptr = lli_array;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
num_pages;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
NULL;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
map_array;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
num_pages;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
dma_ctx->src_sg;
} else {
*lli_array_ptr = lli_array;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
num_pages;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
NULL;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
map_array;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
out_map_num_entries = num_pages;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
dma_ctx->dst_sg;
}
return 0;
}
/**
* sep_lock_user_pages - lock and map user pages for DMA
* @sep: pointer to struct sep_device
* @app_virt_addr: user memory data buffer
* @data_size: size of data buffer
* @lli_array_ptr: lli array
* @in_out_flag: input or output to device
*
* This function locks all the physical pages of the application
* virtual buffer and construct a basic lli array, where each entry
* holds the physical page address and the size that application
* data holds in this physical pages
*/
static int sep_lock_user_pages(struct sep_device *sep,
u32 app_virt_addr,
u32 data_size,
struct sep_lli_entry **lli_array_ptr,
int in_out_flag,
struct sep_dma_context *dma_ctx)
{
int error = 0;
u32 count;
int result;
/* The the page of the end address of the user space buffer */
u32 end_page;
/* The page of the start address of the user space buffer */
u32 start_page;
/* The range in pages */
u32 num_pages;
/* Array of pointers to page */
struct page **page_array;
/* Array of lli */
struct sep_lli_entry *lli_array;
/* Map array */
struct sep_dma_map *map_array;
/* Set start and end pages and num pages */
end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
start_page = app_virt_addr >> PAGE_SHIFT;
num_pages = end_page - start_page + 1;
dev_dbg(&sep->pdev->dev,
"[PID%d] lock user pages app_virt_addr is %x\n",
current->pid, app_virt_addr);
dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
current->pid, data_size);
dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
current->pid, start_page);
dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
current->pid, end_page);
dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
current->pid, num_pages);
/* Allocate array of pages structure pointers */
page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
if (!page_array) {
error = -ENOMEM;
goto end_function;
}
map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
if (!map_array) {
dev_warn(&sep->pdev->dev,
"[PID%d] kmalloc for map_array failed\n",
current->pid);
error = -ENOMEM;
goto end_function_with_error1;
}
lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
GFP_ATOMIC);
if (!lli_array) {
dev_warn(&sep->pdev->dev,
"[PID%d] kmalloc for lli_array failed\n",
current->pid);
error = -ENOMEM;
goto end_function_with_error2;
}
/* Convert the application virtual address into a set of physical */
down_read(¤t->mm->mmap_sem);
result = get_user_pages(current, current->mm, app_virt_addr,
num_pages,
((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
0, page_array, NULL);
up_read(¤t->mm->mmap_sem);
/* Check the number of pages locked - if not all then exit with error */
if (result != num_pages) {
dev_warn(&sep->pdev->dev,
"[PID%d] not all pages locked by get_user_pages, "
"result 0x%X, num_pages 0x%X\n",
current->pid, result, num_pages);
error = -ENOMEM;
goto end_function_with_error3;
}
dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
current->pid);
/*
* Fill the array using page array data and
* map the pages - this action will also flush the cache as needed
*/
for (count = 0; count < num_pages; count++) {
/* Fill the map array */
map_array[count].dma_addr =
dma_map_page(&sep->pdev->dev, page_array[count],
0, PAGE_SIZE, DMA_BIDIRECTIONAL);
map_array[count].size = PAGE_SIZE;
/* Fill the lli array entry */
lli_array[count].bus_address = (u32)map_array[count].dma_addr;
lli_array[count].block_size = PAGE_SIZE;
dev_dbg(&sep->pdev->dev,
"[PID%d] lli_array[%x].bus_address is %08lx, "
"lli_array[%x].block_size is (hex) %x\n", current->pid,
count, (unsigned long)lli_array[count].bus_address,
count, lli_array[count].block_size);
}
/* Check the offset for the first page */
lli_array[0].bus_address =
lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
/* Check that not all the data is in the first page only */
if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
lli_array[0].block_size = data_size;
else
lli_array[0].block_size =
PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
dev_dbg(&sep->pdev->dev,
"[PID%d] After check if page 0 has all data\n",
current->pid);
dev_dbg(&sep->pdev->dev,
"[PID%d] lli_array[0].bus_address is (hex) %08lx, "
"lli_array[0].block_size is (hex) %x\n",
current->pid,
(unsigned long)lli_array[0].bus_address,
lli_array[0].block_size);
/* Check the size of the last page */
if (num_pages > 1) {
lli_array[num_pages - 1].block_size =
(app_virt_addr + data_size) & (~PAGE_MASK);
if (lli_array[num_pages - 1].block_size == 0)
lli_array[num_pages - 1].block_size = PAGE_SIZE;
dev_dbg(&sep->pdev->dev,
"[PID%d] After last page size adjustment\n",
current->pid);
dev_dbg(&sep->pdev->dev,
"[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
"lli_array[%x].block_size is (hex) %x\n",
current->pid,
num_pages - 1,
(unsigned long)lli_array[num_pages - 1].bus_address,
num_pages - 1,
lli_array[num_pages - 1].block_size);
}
/* Set output params acording to the in_out flag */
if (in_out_flag == SEP_DRIVER_IN_FLAG) {
*lli_array_ptr = lli_array;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
num_pages;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
page_array;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
map_array;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
num_pages;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
} else {
*lli_array_ptr = lli_array;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
num_pages;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
page_array;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
map_array;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
out_map_num_entries = num_pages;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
}
goto end_function;
end_function_with_error3:
/* Free lli array */
kfree(lli_array);
end_function_with_error2:
kfree(map_array);
end_function_with_error1:
/* Free page array */
kfree(page_array);
end_function:
return error;
}
/**
* sep_lli_table_secure_dma - get lli array for IMR addresses
* @sep: pointer to struct sep_device
* @app_virt_addr: user memory data buffer
* @data_size: size of data buffer
* @lli_array_ptr: lli array
* @in_out_flag: not used
* @dma_ctx: pointer to struct sep_dma_context
*
* This function creates lli tables for outputting data to
* IMR memory, which is memory that cannot be accessed by the
* the x86 processor.
*/
static int sep_lli_table_secure_dma(struct sep_device *sep,
u32 app_virt_addr,
u32 data_size,
struct sep_lli_entry **lli_array_ptr,
int in_out_flag,
struct sep_dma_context *dma_ctx)
{
int error = 0;
u32 count;
/* The the page of the end address of the user space buffer */
u32 end_page;
/* The page of the start address of the user space buffer */
u32 start_page;
/* The range in pages */
u32 num_pages;
/* Array of lli */
struct sep_lli_entry *lli_array;
/* Set start and end pages and num pages */
end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
start_page = app_virt_addr >> PAGE_SHIFT;
num_pages = end_page - start_page + 1;
dev_dbg(&sep->pdev->dev, "[PID%d] lock user pages"
" app_virt_addr is %x\n", current->pid, app_virt_addr);
dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
current->pid, data_size);
dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
current->pid, start_page);
dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
current->pid, end_page);
dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
current->pid, num_pages);
lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
GFP_ATOMIC);
if (!lli_array) {
dev_warn(&sep->pdev->dev,
"[PID%d] kmalloc for lli_array failed\n",
current->pid);
return -ENOMEM;
}
/*
* Fill the lli_array
*/
start_page = start_page << PAGE_SHIFT;
for (count = 0; count < num_pages; count++) {
/* Fill the lli array entry */
lli_array[count].bus_address = start_page;
lli_array[count].block_size = PAGE_SIZE;
start_page += PAGE_SIZE;
dev_dbg(&sep->pdev->dev,
"[PID%d] lli_array[%x].bus_address is %08lx, "
"lli_array[%x].block_size is (hex) %x\n",
current->pid,
count, (unsigned long)lli_array[count].bus_address,
count, lli_array[count].block_size);
}
/* Check the offset for the first page */
lli_array[0].bus_address =
lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
/* Check that not all the data is in the first page only */
if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
lli_array[0].block_size = data_size;
else
lli_array[0].block_size =
PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
dev_dbg(&sep->pdev->dev,
"[PID%d] After check if page 0 has all data\n"
"lli_array[0].bus_address is (hex) %08lx, "
"lli_array[0].block_size is (hex) %x\n",
current->pid,
(unsigned long)lli_array[0].bus_address,
lli_array[0].block_size);
/* Check the size of the last page */
if (num_pages > 1) {
lli_array[num_pages - 1].block_size =
(app_virt_addr + data_size) & (~PAGE_MASK);
if (lli_array[num_pages - 1].block_size == 0)
lli_array[num_pages - 1].block_size = PAGE_SIZE;
dev_dbg(&sep->pdev->dev,
"[PID%d] After last page size adjustment\n"
"lli_array[%x].bus_address is (hex) %08lx, "
"lli_array[%x].block_size is (hex) %x\n",
current->pid, num_pages - 1,
(unsigned long)lli_array[num_pages - 1].bus_address,
num_pages - 1,
lli_array[num_pages - 1].block_size);
}
*lli_array_ptr = lli_array;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
return error;
}
/**
* sep_calculate_lli_table_max_size - size the LLI table
* @sep: pointer to struct sep_device
* @lli_in_array_ptr
* @num_array_entries
* @last_table_flag
*
* This function calculates the size of data that can be inserted into
* the lli table from this array, such that either the table is full
* (all entries are entered), or there are no more entries in the
* lli array
*/
static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
struct sep_lli_entry *lli_in_array_ptr,
u32 num_array_entries,
u32 *last_table_flag)
{
u32 counter;
/* Table data size */
u32 table_data_size = 0;
/* Data size for the next table */
u32 next_table_data_size;
*last_table_flag = 0;
/*
* Calculate the data in the out lli table till we fill the whole
* table or till the data has ended
*/
for (counter = 0;
(counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
(counter < num_array_entries); counter++)
table_data_size += lli_in_array_ptr[counter].block_size;
/*
* Check if we reached the last entry,
* meaning this ia the last table to build,
* and no need to check the block alignment
*/
if (counter == num_array_entries) {
/* Set the last table flag */
*last_table_flag = 1;
goto end_function;
}
/*
* Calculate the data size of the next table.
* Stop if no entries left or if data size is more the DMA restriction
*/
next_table_data_size = 0;
for (; counter < num_array_entries; counter++) {
next_table_data_size += lli_in_array_ptr[counter].block_size;
if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
break;
}
/*
* Check if the next table data size is less then DMA rstriction.
* if it is - recalculate the current table size, so that the next
* table data size will be adaquete for DMA
*/
if (next_table_data_size &&
next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
next_table_data_size);
end_function:
return table_data_size;
}
/**
* sep_build_lli_table - build an lli array for the given table
* @sep: pointer to struct sep_device
* @lli_array_ptr: pointer to lli array
* @lli_table_ptr: pointer to lli table
* @num_processed_entries_ptr: pointer to number of entries
* @num_table_entries_ptr: pointer to number of tables
* @table_data_size: total data size
*
* Builds ant lli table from the lli_array according to
* the given size of data
*/
static void sep_build_lli_table(struct sep_device *sep,
struct sep_lli_entry *lli_array_ptr,
struct sep_lli_entry *lli_table_ptr,
u32 *num_processed_entries_ptr,
u32 *num_table_entries_ptr,
u32 table_data_size)
{
/* Current table data size */
u32 curr_table_data_size;
/* Counter of lli array entry */
u32 array_counter;
/* Init current table data size and lli array entry counter */
curr_table_data_size = 0;
array_counter = 0;
*num_table_entries_ptr = 1;
dev_dbg(&sep->pdev->dev,
"[PID%d] build lli table table_data_size: (hex) %x\n",
current->pid, table_data_size);
/* Fill the table till table size reaches the needed amount */
while (curr_table_data_size < table_data_size) {
/* Update the number of entries in table */
(*num_table_entries_ptr)++;
lli_table_ptr->bus_address =
cpu_to_le32(lli_array_ptr[array_counter].bus_address);
lli_table_ptr->block_size =
cpu_to_le32(lli_array_ptr[array_counter].block_size);
curr_table_data_size += lli_array_ptr[array_counter].block_size;
dev_dbg(&sep->pdev->dev,
"[PID%d] lli_table_ptr is %p\n",
current->pid, lli_table_ptr);
dev_dbg(&sep->pdev->dev,
"[PID%d] lli_table_ptr->bus_address: %08lx\n",
current->pid,
(unsigned long)lli_table_ptr->bus_address);
dev_dbg(&sep->pdev->dev,
"[PID%d] lli_table_ptr->block_size is (hex) %x\n",
current->pid, lli_table_ptr->block_size);
/* Check for overflow of the table data */
if (curr_table_data_size > table_data_size) {
dev_dbg(&sep->pdev->dev,
"[PID%d] curr_table_data_size too large\n",
current->pid);
/* Update the size of block in the table */
lli_table_ptr->block_size =
cpu_to_le32(lli_table_ptr->block_size) -
(curr_table_data_size - table_data_size);
/* Update the physical address in the lli array */
lli_array_ptr[array_counter].bus_address +=
cpu_to_le32(lli_table_ptr->block_size);
/* Update the block size left in the lli array */
lli_array_ptr[array_counter].block_size =
(curr_table_data_size - table_data_size);
} else
/* Advance to the next entry in the lli_array */
array_counter++;
dev_dbg(&sep->pdev->dev,
"[PID%d] lli_table_ptr->bus_address is %08lx\n",
current->pid,
(unsigned long)lli_table_ptr->bus_address);
dev_dbg(&sep->pdev->dev,
"[PID%d] lli_table_ptr->block_size is (hex) %x\n",
current->pid,
lli_table_ptr->block_size);
/* Move to the next entry in table */
lli_table_ptr++;
}
/* Set the info entry to default */
lli_table_ptr->bus_address = 0xffffffff;
lli_table_ptr->block_size = 0;
/* Set the output parameter */
*num_processed_entries_ptr += array_counter;
}
/**
* sep_shared_area_virt_to_bus - map shared area to bus address
* @sep: pointer to struct sep_device
* @virt_address: virtual address to convert
*
* This functions returns the physical address inside shared area according
* to the virtual address. It can be either on the externa RAM device
* (ioremapped), or on the system RAM
* This implementation is for the external RAM
*/
static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
void *virt_address)
{
dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
current->pid, virt_address);
dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
current->pid,
(unsigned long)
sep->shared_bus + (virt_address - sep->shared_addr));
return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
}
/**
* sep_shared_area_bus_to_virt - map shared area bus address to kernel
* @sep: pointer to struct sep_device
* @bus_address: bus address to convert
*
* This functions returns the virtual address inside shared area
* according to the physical address. It can be either on the
* externa RAM device (ioremapped), or on the system RAM
* This implementation is for the external RAM
*/
static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
dma_addr_t bus_address)
{
dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
current->pid,
(unsigned long)bus_address, (unsigned long)(sep->shared_addr +
(size_t)(bus_address - sep->shared_bus)));
return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
}
/**
* sep_debug_print_lli_tables - dump LLI table
* @sep: pointer to struct sep_device
* @lli_table_ptr: pointer to sep_lli_entry
* @num_table_entries: number of entries
* @table_data_size: total data size
*
* Walk the the list of the print created tables and print all the data
*/
static void sep_debug_print_lli_tables(struct sep_device *sep,
struct sep_lli_entry *lli_table_ptr,
unsigned long num_table_entries,
unsigned long table_data_size)
{
#ifdef DEBUG
unsigned long table_count = 1;
unsigned long entries_count = 0;
dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
current->pid);
if (num_table_entries == 0) {
dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
current->pid);
return;
}
while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
dev_dbg(&sep->pdev->dev,
"[PID%d] lli table %08lx, "
"table_data_size is (hex) %lx\n",
current->pid, table_count, table_data_size);
dev_dbg(&sep->pdev->dev,
"[PID%d] num_table_entries is (hex) %lx\n",
current->pid, num_table_entries);
/* Print entries of the table (without info entry) */
for (entries_count = 0; entries_count < num_table_entries;
entries_count++, lli_table_ptr++) {
dev_dbg(&sep->pdev->dev,
"[PID%d] lli_table_ptr address is %08lx\n",
current->pid,
(unsigned long) lli_table_ptr);
dev_dbg(&sep->pdev->dev,
"[PID%d] phys address is %08lx "
"block size is (hex) %x\n", current->pid,
(unsigned long)lli_table_ptr->bus_address,
lli_table_ptr->block_size);
}
/* Point to the info entry */
lli_table_ptr--;
dev_dbg(&sep->pdev->dev,
"[PID%d] phys lli_table_ptr->block_size "
"is (hex) %x\n",
current->pid,
lli_table_ptr->block_size);
dev_dbg(&sep->pdev->dev,
"[PID%d] phys lli_table_ptr->physical_address "
"is %08lx\n",
current->pid,
(unsigned long)lli_table_ptr->bus_address);
table_data_size = lli_table_ptr->block_size & 0xffffff;
num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
dev_dbg(&sep->pdev->dev,
"[PID%d] phys table_data_size is "
"(hex) %lx num_table_entries is"
" %lx bus_address is%lx\n",
current->pid,
table_data_size,
num_table_entries,
(unsigned long)lli_table_ptr->bus_address);
if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
lli_table_ptr = (struct sep_lli_entry *)
sep_shared_bus_to_virt(sep,
(unsigned long)lli_table_ptr->bus_address);
table_count++;
}
dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
current->pid);
#endif
}
/**
* sep_prepare_empty_lli_table - create a blank LLI table
* @sep: pointer to struct sep_device
* @lli_table_addr_ptr: pointer to lli table
* @num_entries_ptr: pointer to number of entries
* @table_data_size_ptr: point to table data size
* @dmatables_region: Optional buffer for DMA tables
* @dma_ctx: DMA context
*
* This function creates empty lli tables when there is no data
*/
static void sep_prepare_empty_lli_table(struct sep_device *sep,
dma_addr_t *lli_table_addr_ptr,
u32 *num_entries_ptr,
u32 *table_data_size_ptr,
void **dmatables_region,
struct sep_dma_context *dma_ctx)
{
struct sep_lli_entry *lli_table_ptr;
/* Find the area for new table */
lli_table_ptr =
(struct sep_lli_entry *)(sep->shared_addr +
SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
if (dmatables_region && *dmatables_region)
lli_table_ptr = *dmatables_region;
lli_table_ptr->bus_address = 0;
lli_table_ptr->block_size = 0;
lli_table_ptr++;
lli_table_ptr->bus_address = 0xFFFFFFFF;
lli_table_ptr->block_size = 0;
/* Set the output parameter value */
*lli_table_addr_ptr = sep->shared_bus +
SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
dma_ctx->num_lli_tables_created *
sizeof(struct sep_lli_entry) *
SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
/* Set the num of entries and table data size for empty table */
*num_entries_ptr = 2;
*table_data_size_ptr = 0;
/* Update the number of created tables */
dma_ctx->num_lli_tables_created++;
}
/**
* sep_prepare_input_dma_table - prepare input DMA mappings
* @sep: pointer to struct sep_device
* @data_size:
* @block_size:
* @lli_table_ptr:
* @num_entries_ptr:
* @table_data_size_ptr:
* @is_kva: set for kernel data (kernel cryptio call)
*
* This function prepares only input DMA table for synhronic symmetric
* operations (HASH)
* Note that all bus addresses that are passed to the SEP
* are in 32 bit format; the SEP is a 32 bit device
*/
static int sep_prepare_input_dma_table(struct sep_device *sep,
unsigned long app_virt_addr,
u32 data_size,
u32 block_size,
dma_addr_t *lli_table_ptr,
u32 *num_entries_ptr,
u32 *table_data_size_ptr,
bool is_kva,
void **dmatables_region,
struct sep_dma_context *dma_ctx
)
{
int error = 0;
/* Pointer to the info entry of the table - the last entry */
struct sep_lli_entry *info_entry_ptr;
/* Array of pointers to page */
struct sep_lli_entry *lli_array_ptr;
/* Points to the first entry to be processed in the lli_in_array */
u32 current_entry = 0;
/* Num entries in the virtual buffer */
u32 sep_lli_entries = 0;
/* Lli table pointer */
struct sep_lli_entry *in_lli_table_ptr;
/* The total data in one table */
u32 table_data_size = 0;
/* Flag for last table */
u32 last_table_flag = 0;
/* Number of entries in lli table */
u32 num_entries_in_table = 0;
/* Next table address */
void *lli_table_alloc_addr = NULL;
void *dma_lli_table_alloc_addr = NULL;
void *dma_in_lli_table_ptr = NULL;
dev_dbg(&sep->pdev->dev, "[PID%d] prepare intput dma "
"tbl data size: (hex) %x\n",
current->pid, data_size);
dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
current->pid, block_size);
/* Initialize the pages pointers */
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
/* Set the kernel address for first table to be allocated */
lli_table_alloc_addr = (void *)(sep->shared_addr +
SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
if (data_size == 0) {
if (dmatables_region) {
error = sep_allocate_dmatables_region(sep,
dmatables_region,
dma_ctx,
1);
if (error)
return error;
}
/* Special case - create meptu table - 2 entries, zero data */
sep_prepare_empty_lli_table(sep, lli_table_ptr,
num_entries_ptr, table_data_size_ptr,
dmatables_region, dma_ctx);
goto update_dcb_counter;
}
/* Check if the pages are in Kernel Virtual Address layout */
if (is_kva == true)
error = sep_lock_kernel_pages(sep, app_virt_addr,
data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
dma_ctx);
else
/*
* Lock the pages of the user buffer
* and translate them to pages
*/
error = sep_lock_user_pages(sep, app_virt_addr,
data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
dma_ctx);
if (error)
goto end_function;
dev_dbg(&sep->pdev->dev,
"[PID%d] output sep_in_num_pages is (hex) %x\n",
current->pid,
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
current_entry = 0;
info_entry_ptr = NULL;
sep_lli_entries =
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
dma_lli_table_alloc_addr = lli_table_alloc_addr;
if (dmatables_region) {
error = sep_allocate_dmatables_region(sep,
dmatables_region,
dma_ctx,
sep_lli_entries);
if (error)
return error;
lli_table_alloc_addr = *dmatables_region;
}
/* Loop till all the entries in in array are processed */
while (current_entry < sep_lli_entries) {
/* Set the new input and output tables */
in_lli_table_ptr =
(struct sep_lli_entry *)lli_table_alloc_addr;
dma_in_lli_table_ptr =
(struct sep_lli_entry *)dma_lli_table_alloc_addr;
lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
if (dma_lli_table_alloc_addr >
((void *)sep->shared_addr +
SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
error = -ENOMEM;
goto end_function_error;
}
/* Update the number of created tables */
dma_ctx->num_lli_tables_created++;
/* Calculate the maximum size of data for input table */
table_data_size = sep_calculate_lli_table_max_size(sep,
&lli_array_ptr[current_entry],
(sep_lli_entries - current_entry),
&last_table_flag);
/*
* If this is not the last table -
* then allign it to the block size
*/
if (!last_table_flag)
table_data_size =
(table_data_size / block_size) * block_size;
dev_dbg(&sep->pdev->dev,
"[PID%d] output table_data_size is (hex) %x\n",
current->pid,
table_data_size);
/* Construct input lli table */
sep_build_lli_table(sep, &lli_array_ptr[current_entry],
in_lli_table_ptr,
¤t_entry, &num_entries_in_table, table_data_size);
if (info_entry_ptr == NULL) {
/* Set the output parameters to physical addresses */
*lli_table_ptr = sep_shared_area_virt_to_bus(sep,
dma_in_lli_table_ptr);
*num_entries_ptr = num_entries_in_table;
*table_data_size_ptr = table_data_size;
dev_dbg(&sep->pdev->dev,
"[PID%d] output lli_table_in_ptr is %08lx\n",
current->pid,
(unsigned long)*lli_table_ptr);
} else {
/* Update the info entry of the previous in table */
info_entry_ptr->bus_address =
sep_shared_area_virt_to_bus(sep,
dma_in_lli_table_ptr);
info_entry_ptr->block_size =
((num_entries_in_table) << 24) |
(table_data_size);
}
/* Save the pointer to the info entry of the current tables */
info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
}
/* Print input tables */
if (!dmatables_region) {
sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
*num_entries_ptr, *table_data_size_ptr);
}
/* The array of the pages */
kfree(lli_array_ptr);
update_dcb_counter:
/* Update DCB counter */
dma_ctx->nr_dcb_creat++;
goto end_function;
end_function_error:
/* Free all the allocated resources */
kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
kfree(lli_array_ptr);
kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
end_function:
return error;
}
/**
* sep_construct_dma_tables_from_lli - prepare AES/DES mappings
* @sep: pointer to struct sep_device
* @lli_in_array:
* @sep_in_lli_entries:
* @lli_out_array:
* @sep_out_lli_entries
* @block_size
* @lli_table_in_ptr
* @lli_table_out_ptr
* @in_num_entries_ptr
* @out_num_entries_ptr
* @table_data_size_ptr
*
* This function creates the input and output DMA tables for
* symmetric operations (AES/DES) according to the block
* size from LLI arays
* Note that all bus addresses that are passed to the SEP
* are in 32 bit format; the SEP is a 32 bit device
*/
static int sep_construct_dma_tables_from_lli(
struct sep_device *sep,
struct sep_lli_entry *lli_in_array,
u32 sep_in_lli_entries,
struct sep_lli_entry *lli_out_array,
u32 sep_out_lli_entries,
u32 block_size,
dma_addr_t *lli_table_in_ptr,
dma_addr_t *lli_table_out_ptr,
u32 *in_num_entries_ptr,
u32 *out_num_entries_ptr,
u32 *table_data_size_ptr,
void **dmatables_region,
struct sep_dma_context *dma_ctx)
{
/* Points to the area where next lli table can be allocated */
void *lli_table_alloc_addr = NULL;
/*
* Points to the area in shared region where next lli table
* can be allocated
*/
void *dma_lli_table_alloc_addr = NULL;
/* Input lli table in dmatables_region or shared region */
struct sep_lli_entry *in_lli_table_ptr = NULL;
/* Input lli table location in the shared region */
struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
/* Output lli table in dmatables_region or shared region */
struct sep_lli_entry *out_lli_table_ptr = NULL;
/* Output lli table location in the shared region */
struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
/* Pointer to the info entry of the table - the last entry */
struct sep_lli_entry *info_in_entry_ptr = NULL;
/* Pointer to the info entry of the table - the last entry */
struct sep_lli_entry *info_out_entry_ptr = NULL;
/* Points to the first entry to be processed in the lli_in_array */
u32 current_in_entry = 0;
/* Points to the first entry to be processed in the lli_out_array */
u32 current_out_entry = 0;
/* Max size of the input table */
u32 in_table_data_size = 0;
/* Max size of the output table */
u32 out_table_data_size = 0;
/* Flag te signifies if this is the last tables build */
u32 last_table_flag = 0;
/* The data size that should be in table */
u32 table_data_size = 0;
/* Number of etnries in the input table */
u32 num_entries_in_table = 0;
/* Number of etnries in the output table */
u32 num_entries_out_table = 0;
if (!dma_ctx) {
dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
return -EINVAL;
}
/* Initiate to point after the message area */
lli_table_alloc_addr = (void *)(sep->shared_addr +
SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
(dma_ctx->num_lli_tables_created *
(sizeof(struct sep_lli_entry) *
SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
dma_lli_table_alloc_addr = lli_table_alloc_addr;
if (dmatables_region) {
/* 2 for both in+out table */
if (sep_allocate_dmatables_region(sep,
dmatables_region,
dma_ctx,
2*sep_in_lli_entries))
return -ENOMEM;
lli_table_alloc_addr = *dmatables_region;
}
/* Loop till all the entries in in array are not processed */
while (current_in_entry < sep_in_lli_entries) {
/* Set the new input and output tables */
in_lli_table_ptr =
(struct sep_lli_entry *)lli_table_alloc_addr;
dma_in_lli_table_ptr =
(struct sep_lli_entry *)dma_lli_table_alloc_addr;
lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
/* Set the first output tables */
out_lli_table_ptr =
(struct sep_lli_entry *)lli_table_alloc_addr;
dma_out_lli_table_ptr =
(struct sep_lli_entry *)dma_lli_table_alloc_addr;
/* Check if the DMA table area limit was overrun */
if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
((void *)sep->shared_addr +
SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
return -ENOMEM;
}
/* Update the number of the lli tables created */
dma_ctx->num_lli_tables_created += 2;
lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
/* Calculate the maximum size of data for input table */
in_table_data_size =
sep_calculate_lli_table_max_size(sep,
&lli_in_array[current_in_entry],
(sep_in_lli_entries - current_in_entry),
&last_table_flag);
/* Calculate the maximum size of data for output table */
out_table_data_size =
sep_calculate_lli_table_max_size(sep,
&lli_out_array[current_out_entry],
(sep_out_lli_entries - current_out_entry),
&last_table_flag);
if (!last_table_flag) {
in_table_data_size = (in_table_data_size /
block_size) * block_size;
out_table_data_size = (out_table_data_size /
block_size) * block_size;
}
table_data_size = in_table_data_size;
if (table_data_size > out_table_data_size)
table_data_size = out_table_data_size;
dev_dbg(&sep->pdev->dev,
"[PID%d] construct tables from lli"
" in_table_data_size is (hex) %x\n", current->pid,
in_table_data_size);
dev_dbg(&sep->pdev->dev,
"[PID%d] construct tables from lli"
"out_table_data_size is (hex) %x\n", current->pid,
out_table_data_size);
/* Construct input lli table */
sep_build_lli_table(sep, &lli_in_array[current_in_entry],
in_lli_table_ptr,
¤t_in_entry,
&num_entries_in_table,
table_data_size);
/* Construct output lli table */
sep_build_lli_table(sep, &lli_out_array[current_out_entry],
out_lli_table_ptr,
¤t_out_entry,
&num_entries_out_table,
table_data_size);
/* If info entry is null - this is the first table built */
if (info_in_entry_ptr == NULL) {
/* Set the output parameters to physical addresses */
*lli_table_in_ptr =
sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
*in_num_entries_ptr = num_entries_in_table;
*lli_table_out_ptr =
sep_shared_area_virt_to_bus(sep,
dma_out_lli_table_ptr);
*out_num_entries_ptr = num_entries_out_table;
*table_data_size_ptr = table_data_size;
dev_dbg(&sep->pdev->dev,
"[PID%d] output lli_table_in_ptr is %08lx\n",
current->pid,
(unsigned long)*lli_table_in_ptr);
dev_dbg(&sep->pdev->dev,
"[PID%d] output lli_table_out_ptr is %08lx\n",
current->pid,
(unsigned long)*lli_table_out_ptr);
} else {
/* Update the info entry of the previous in table */
info_in_entry_ptr->bus_address =
sep_shared_area_virt_to_bus(sep,
dma_in_lli_table_ptr);
info_in_entry_ptr->block_size =
((num_entries_in_table) << 24) |
(table_data_size);
/* Update the info entry of the previous in table */
info_out_entry_ptr->bus_address =
sep_shared_area_virt_to_bus(sep,
dma_out_lli_table_ptr);
info_out_entry_ptr->block_size =
((num_entries_out_table) << 24) |
(table_data_size);
dev_dbg(&sep->pdev->dev,
"[PID%d] output lli_table_in_ptr:%08lx %08x\n",
current->pid,
(unsigned long)info_in_entry_ptr->bus_address,
info_in_entry_ptr->block_size);
dev_dbg(&sep->pdev->dev,
"[PID%d] output lli_table_out_ptr:"
"%08lx %08x\n",
current->pid,
(unsigned long)info_out_entry_ptr->bus_address,
info_out_entry_ptr->block_size);
}
/* Save the pointer to the info entry of the current tables */
info_in_entry_ptr = in_lli_table_ptr +
num_entries_in_table - 1;
info_out_entry_ptr = out_lli_table_ptr +
num_entries_out_table - 1;
dev_dbg(&sep->pdev->dev,
"[PID%d] output num_entries_out_table is %x\n",
current->pid,
(u32)num_entries_out_table);
dev_dbg(&sep->pdev->dev,
"[PID%d] output info_in_entry_ptr is %lx\n",
current->pid,
(unsigned long)info_in_entry_ptr);
dev_dbg(&sep->pdev->dev,
"[PID%d] output info_out_entry_ptr is %lx\n",
current->pid,
(unsigned long)info_out_entry_ptr);
}
/* Print input tables */
if (!dmatables_region) {
sep_debug_print_lli_tables(
sep,
(struct sep_lli_entry *)
sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
*in_num_entries_ptr,
*table_data_size_ptr);
}
/* Print output tables */
if (!dmatables_region) {
sep_debug_print_lli_tables(
sep,
(struct sep_lli_entry *)
sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
*out_num_entries_ptr,
*table_data_size_ptr);
}
return 0;
}
/**
* sep_prepare_input_output_dma_table - prepare DMA I/O table
* @app_virt_in_addr:
* @app_virt_out_addr:
* @data_size:
* @block_size:
* @lli_table_in_ptr:
* @lli_table_out_ptr:
* @in_num_entries_ptr:
* @out_num_entries_ptr:
* @table_data_size_ptr:
* @is_kva: set for kernel data; used only for kernel crypto module
*
* This function builds input and output DMA tables for synhronic
* symmetric operations (AES, DES, HASH). It also checks that each table
* is of the modular block size
* Note that all bus addresses that are passed to the SEP
* are in 32 bit format; the SEP is a 32 bit device
*/
static int sep_prepare_input_output_dma_table(struct sep_device *sep,
unsigned long app_virt_in_addr,
unsigned long app_virt_out_addr,
u32 data_size,
u32 block_size,
dma_addr_t *lli_table_in_ptr,
dma_addr_t *lli_table_out_ptr,
u32 *in_num_entries_ptr,
u32 *out_num_entries_ptr,
u32 *table_data_size_ptr,
bool is_kva,
void **dmatables_region,
struct sep_dma_context *dma_ctx)
{
int error = 0;
/* Array of pointers of page */
struct sep_lli_entry *lli_in_array;
/* Array of pointers of page */
struct sep_lli_entry *lli_out_array;
if (!dma_ctx) {
error = -EINVAL;
goto end_function;
}
if (data_size == 0) {
/* Prepare empty table for input and output */
if (dmatables_region) {
error = sep_allocate_dmatables_region(
sep,
dmatables_region,
dma_ctx,
2);
if (error)
goto end_function;
}
sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
in_num_entries_ptr, table_data_size_ptr,
dmatables_region, dma_ctx);
sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
out_num_entries_ptr, table_data_size_ptr,
dmatables_region, dma_ctx);
goto update_dcb_counter;
}
/* Initialize the pages pointers */
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
/* Lock the pages of the buffer and translate them to pages */
if (is_kva == true) {
dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
current->pid);
error = sep_lock_kernel_pages(sep, app_virt_in_addr,
data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
dma_ctx);
if (error) {
dev_warn(&sep->pdev->dev,
"[PID%d] sep_lock_kernel_pages for input "
"virtual buffer failed\n", current->pid);
goto end_function;
}
dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
current->pid);
error = sep_lock_kernel_pages(sep, app_virt_out_addr,
data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
dma_ctx);
if (error) {
dev_warn(&sep->pdev->dev,
"[PID%d] sep_lock_kernel_pages for output "
"virtual buffer failed\n", current->pid);
goto end_function_free_lli_in;
}
}
else {
dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
current->pid);
error = sep_lock_user_pages(sep, app_virt_in_addr,
data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
dma_ctx);
if (error) {
dev_warn(&sep->pdev->dev,
"[PID%d] sep_lock_user_pages for input "
"virtual buffer failed\n", current->pid);
goto end_function;
}
if (dma_ctx->secure_dma == true) {
/* secure_dma requires use of non accessible memory */
dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
current->pid);
error = sep_lli_table_secure_dma(sep,
app_virt_out_addr, data_size, &lli_out_array,
SEP_DRIVER_OUT_FLAG, dma_ctx);
if (error) {
dev_warn(&sep->pdev->dev,
"[PID%d] secure dma table setup "
" for output virtual buffer failed\n",
current->pid);
goto end_function_free_lli_in;
}
} else {
/* For normal, non-secure dma */
dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
current->pid);
dev_dbg(&sep->pdev->dev,
"[PID%d] Locking user output pages\n",
current->pid);
error = sep_lock_user_pages(sep, app_virt_out_addr,
data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
dma_ctx);
if (error) {
dev_warn(&sep->pdev->dev,
"[PID%d] sep_lock_user_pages"
" for output virtual buffer failed\n",
current->pid);
goto end_function_free_lli_in;
}
}
}
dev_dbg(&sep->pdev->dev, "[PID%d] After lock; prep input output dma "
"table sep_in_num_pages is (hex) %x\n", current->pid,
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
current->pid,
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
dev_dbg(&sep->pdev->dev, "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP"
" is (hex) %x\n", current->pid,
SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
/* Call the fucntion that creates table from the lli arrays */
dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
current->pid);
error = sep_construct_dma_tables_from_lli(
sep, lli_in_array,
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
in_num_pages,
lli_out_array,
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
out_num_pages,
block_size, lli_table_in_ptr, lli_table_out_ptr,
in_num_entries_ptr, out_num_entries_ptr,
table_data_size_ptr, dmatables_region, dma_ctx);
if (error) {
dev_warn(&sep->pdev->dev,
"[PID%d] sep_construct_dma_tables_from_lli failed\n",
current->pid);
goto end_function_with_error;
}
kfree(lli_out_array);
kfree(lli_in_array);
update_dcb_counter:
/* Update DCB counter */
dma_ctx->nr_dcb_creat++;
goto end_function;
end_function_with_error:
kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
kfree(lli_out_array);
end_function_free_lli_in:
kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
kfree(lli_in_array);
end_function:
return error;
}
/**
* sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
* @app_in_address: unsigned long; for data buffer in (user space)
* @app_out_address: unsigned long; for data buffer out (user space)
* @data_in_size: u32; for size of data
* @block_size: u32; for block size
* @tail_block_size: u32; for size of tail block
* @isapplet: bool; to indicate external app
* @is_kva: bool; kernel buffer; only used for kernel crypto module
* @secure_dma; indicates whether this is secure_dma using IMR
*
* This function prepares the linked DMA tables and puts the
* address for the linked list of tables inta a DCB (data control
* block) the address of which is known by the SEP hardware
* Note that all bus addresses that are passed to the SEP
* are in 32 bit format; the SEP is a 32 bit device
*/
int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
unsigned long app_in_address,
unsigned long app_out_address,
u32 data_in_size,
u32 block_size,
u32 tail_block_size,
bool isapplet,
bool is_kva,
bool secure_dma,
struct sep_dcblock *dcb_region,
void **dmatables_region,
struct sep_dma_context **dma_ctx,
struct scatterlist *src_sg,
struct scatterlist *dst_sg)
{
int error = 0;
/* Size of tail */
u32 tail_size = 0;
/* Address of the created DCB table */
struct sep_dcblock *dcb_table_ptr = NULL;
/* The physical address of the first input DMA table */
dma_addr_t in_first_mlli_address = 0;
/* Number of entries in the first input DMA table */
u32 in_first_num_entries = 0;
/* The physical address of the first output DMA table */
dma_addr_t out_first_mlli_address = 0;
/* Number of entries in the first output DMA table */
u32 out_first_num_entries = 0;
/* Data in the first input/output table */
u32 first_data_size = 0;
dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
current->pid, app_in_address);
dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
current->pid, app_out_address);
dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
current->pid, data_in_size);
dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
current->pid, block_size);
dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
current->pid, tail_block_size);
dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
current->pid, isapplet);
dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
current->pid, is_kva);
dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
current->pid, src_sg);
dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
current->pid, dst_sg);
if (!dma_ctx) {
dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
current->pid);
error = -EINVAL;
goto end_function;
}
if (*dma_ctx) {
/* In case there are multiple DCBs for this transaction */
dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
current->pid);
} else {
*dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
if (!(*dma_ctx)) {
dev_dbg(&sep->pdev->dev,
"[PID%d] Not enough memory for DMA context\n",
current->pid);
error = -ENOMEM;
goto end_function;
}
dev_dbg(&sep->pdev->dev,
"[PID%d] Created DMA context addr at 0x%p\n",
current->pid, *dma_ctx);
}
(*dma_ctx)->secure_dma = secure_dma;
/* these are for kernel crypto only */
(*dma_ctx)->src_sg = src_sg;
(*dma_ctx)->dst_sg = dst_sg;
if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
/* No more DCBs to allocate */
dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
current->pid);
error = -ENOSPC;
goto end_function_error;
}
/* Allocate new DCB */
if (dcb_region) {
dcb_table_ptr = dcb_region;
} else {
dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
((*dma_ctx)->nr_dcb_creat *
sizeof(struct sep_dcblock)));
}
/* Set the default values in the DCB */
dcb_table_ptr->input_mlli_address = 0;
dcb_table_ptr->input_mlli_num_entries = 0;
dcb_table_ptr->input_mlli_data_size = 0;
dcb_table_ptr->output_mlli_address = 0;
dcb_table_ptr->output_mlli_num_entries = 0;
dcb_table_ptr->output_mlli_data_size = 0;
dcb_table_ptr->tail_data_size = 0;
dcb_table_ptr->out_vr_tail_pt = 0;
if (isapplet == true) {
/* Check if there is enough data for DMA operation */
if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
if (is_kva == true) {
error = -ENODEV;
goto end_function_error;
} else {
if (copy_from_user(dcb_table_ptr->tail_data,
(void __user *)app_in_address,
data_in_size)) {
error = -EFAULT;
goto end_function_error;
}
}
dcb_table_ptr->tail_data_size = data_in_size;
/* Set the output user-space address for mem2mem op */
if (app_out_address)
dcb_table_ptr->out_vr_tail_pt =
(aligned_u64)app_out_address;
/*
* Update both data length parameters in order to avoid
* second data copy and allow building of empty mlli
* tables
*/
tail_size = 0x0;
data_in_size = 0x0;
} else {
if (!app_out_address) {
tail_size = data_in_size % block_size;
if (!tail_size) {
if (tail_block_size == block_size)
tail_size = block_size;
}
} else {
tail_size = 0;
}
}
if (tail_size) {
if (tail_size > sizeof(dcb_table_ptr->tail_data))
return -EINVAL;
if (is_kva == true) {
error = -ENODEV;
goto end_function_error;
} else {
/* We have tail data - copy it to DCB */
if (copy_from_user(dcb_table_ptr->tail_data,
(void __user *)(app_in_address +
data_in_size - tail_size), tail_size)) {
error = -EFAULT;
goto end_function_error;
}
}
if (app_out_address)
/*
* Calculate the output address
* according to tail data size
*/
dcb_table_ptr->out_vr_tail_pt =
(aligned_u64)app_out_address +
data_in_size - tail_size;
/* Save the real tail data size */
dcb_table_ptr->tail_data_size = tail_size;
/*
* Update the data size without the tail
* data size AKA data for the dma
*/
data_in_size = (data_in_size - tail_size);
}
}
/* Check if we need to build only input table or input/output */
if (app_out_address) {
/* Prepare input/output tables */
error = sep_prepare_input_output_dma_table(sep,
app_in_address,
app_out_address,
data_in_size,
block_size,
&in_first_mlli_address,
&out_first_mlli_address,
&in_first_num_entries,
&out_first_num_entries,
&first_data_size,
is_kva,
dmatables_region,
*dma_ctx);
} else {
/* Prepare input tables */
error = sep_prepare_input_dma_table(sep,
app_in_address,
data_in_size,
block_size,
&in_first_mlli_address,
&in_first_num_entries,
&first_data_size,
is_kva,
dmatables_region,
*dma_ctx);
}
if (error) {
dev_warn(&sep->pdev->dev,
"prepare DMA table call failed "
"from prepare DCB call\n");
goto end_function_error;
}
/* Set the DCB values */
dcb_table_ptr->input_mlli_address = in_first_mlli_address;
dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
dcb_table_ptr->input_mlli_data_size = first_data_size;
dcb_table_ptr->output_mlli_address = out_first_mlli_address;
dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
dcb_table_ptr->output_mlli_data_size = first_data_size;
goto end_function;
end_function_error:
kfree(*dma_ctx);
*dma_ctx = NULL;
end_function:
return error;
}
/**
* sep_free_dma_tables_and_dcb - free DMA tables and DCBs
* @sep: pointer to struct sep_device
* @isapplet: indicates external application (used for kernel access)
* @is_kva: indicates kernel addresses (only used for kernel crypto)
*
* This function frees the DMA tables and DCB
*/
static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
bool is_kva, struct sep_dma_context **dma_ctx)
{
struct sep_dcblock *dcb_table_ptr;
unsigned long pt_hold;
void *tail_pt;
int i = 0;
int error = 0;
int error_temp = 0;
dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
current->pid);
if (((*dma_ctx)->secure_dma == false) && (isapplet == true)) {
dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
current->pid);
/* Tail stuff is only for non secure_dma */
/* Set pointer to first DCB table */
dcb_table_ptr = (struct sep_dcblock *)
(sep->shared_addr +
SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
/**
* Go over each DCB and see if
* tail pointer must be updated
*/
for (i = 0; dma_ctx && *dma_ctx &&
i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
if (dcb_table_ptr->out_vr_tail_pt) {
pt_hold = (unsigned long)dcb_table_ptr->
out_vr_tail_pt;
tail_pt = (void *)pt_hold;
if (is_kva == true) {
error = -ENODEV;
break;
} else {
error_temp = copy_to_user(
(void __user *)tail_pt,
dcb_table_ptr->tail_data,
dcb_table_ptr->tail_data_size);
}
if (error_temp) {
/* Release the DMA resource */
error = -EFAULT;
break;
}
}
}
}
/* Free the output pages, if any */
sep_free_dma_table_data_handler(sep, dma_ctx);
dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
current->pid);
return error;
}
/**
* sep_prepare_dcb_handler - prepare a control block
* @sep: pointer to struct sep_device
* @arg: pointer to user parameters
* @secure_dma: indicate whether we are using secure_dma on IMR
*
* This function will retrieve the RAR buffer physical addresses, type
* & size corresponding to the RAR handles provided in the buffers vector.
*/
static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
bool secure_dma,
struct sep_dma_context **dma_ctx)
{
int error;
/* Command arguments */
static struct build_dcb_struct command_args;
/* Get the command arguments */
if (copy_from_user(&command_args, (void __user *)arg,
sizeof(struct build_dcb_struct))) {
error = -EFAULT;
goto end_function;
}
dev_dbg(&sep->pdev->dev,
"[PID%d] prep dcb handler app_in_address is %08llx\n",
current->pid, command_args.app_in_address);
dev_dbg(&sep->pdev->dev,
"[PID%d] app_out_address is %08llx\n",
current->pid, command_args.app_out_address);
dev_dbg(&sep->pdev->dev,
"[PID%d] data_size is %x\n",
current->pid, command_args.data_in_size);
dev_dbg(&sep->pdev->dev,
"[PID%d] block_size is %x\n",
current->pid, command_args.block_size);
dev_dbg(&sep->pdev->dev,
"[PID%d] tail block_size is %x\n",
current->pid, command_args.tail_block_size);
dev_dbg(&sep->pdev->dev,
"[PID%d] is_applet is %x\n",
current->pid, command_args.is_applet);
if (!command_args.app_in_address) {
dev_warn(&sep->pdev->dev,
"[PID%d] null app_in_address\n", current->pid);
error = -EINVAL;
goto end_function;
}
error = sep_prepare_input_output_dma_table_in_dcb(sep,
(unsigned long)command_args.app_in_address,
(unsigned long)command_args.app_out_address,
command_args.data_in_size, command_args.block_size,
command_args.tail_block_size,
command_args.is_applet, false,
secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
end_function:
return error;
}
/**
* sep_free_dcb_handler - free control block resources
* @sep: pointer to struct sep_device
*
* This function frees the DCB resources and updates the needed
* user-space buffers.
*/
static int sep_free_dcb_handler(struct sep_device *sep,
struct sep_dma_context **dma_ctx)
{
if (!dma_ctx || !(*dma_ctx)) {
dev_dbg(&sep->pdev->dev,
"[PID%d] no dma context defined, nothing to free\n",
current->pid);
return -EINVAL;
}
dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
current->pid,
(*dma_ctx)->nr_dcb_creat);
return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
}
/**
* sep_ioctl - ioctl handler for sep device
* @filp: pointer to struct file
* @cmd: command
* @arg: pointer to argument structure
*
* Implement the ioctl methods availble on the SEP device.
*/
static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct sep_private_data * const private_data = filp->private_data;
struct sep_call_status *call_status = &private_data->call_status;
struct sep_device *sep = private_data->device;
struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
int error = 0;
dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
current->pid, cmd);
dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
current->pid, *dma_ctx);
/* Make sure we own this device */
error = sep_check_transaction_owner(sep);
if (error) {
dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
current->pid);
goto end_function;
}
/* Check that sep_mmap has been called before */
if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
&call_status->status)) {
dev_dbg(&sep->pdev->dev,
"[PID%d] mmap not called\n", current->pid);
error = -EPROTO;
goto end_function;
}
/* Check that the command is for SEP device */
if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
error = -ENOTTY;
goto end_function;
}
switch (cmd) {
case SEP_IOCSENDSEPCOMMAND:
dev_dbg(&sep->pdev->dev,
"[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
current->pid);
if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
&call_status->status)) {
dev_warn(&sep->pdev->dev,
"[PID%d] send msg already done\n",
current->pid);
error = -EPROTO;
goto end_function;
}
/* Send command to SEP */
error = sep_send_command_handler(sep);
if (!error)
set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
&call_status->status);
dev_dbg(&sep->pdev->dev,
"[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
current->pid);
break;
case SEP_IOCENDTRANSACTION:
dev_dbg(&sep->pdev->dev,
"[PID%d] SEP_IOCENDTRANSACTION start\n",
current->pid);
error = sep_end_transaction_handler(sep, dma_ctx, call_status,
my_queue_elem);
dev_dbg(&sep->pdev->dev,
"[PID%d] SEP_IOCENDTRANSACTION end\n",
current->pid);
break;
case SEP_IOCPREPAREDCB:
dev_dbg(&sep->pdev->dev,
"[PID%d] SEP_IOCPREPAREDCB start\n",
current->pid);
case SEP_IOCPREPAREDCB_SECURE_DMA:
dev_dbg(&sep->pdev->dev,
"[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
current->pid);
if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
&call_status->status)) {
dev_dbg(&sep->pdev->dev,
"[PID%d] dcb prep needed before send msg\n",
current->pid);
error = -EPROTO;
goto end_function;
}
if (!arg) {
dev_dbg(&sep->pdev->dev,
"[PID%d] dcb null arg\n", current->pid);
error = -EINVAL;
goto end_function;
}
if (cmd == SEP_IOCPREPAREDCB) {
/* No secure dma */
dev_dbg(&sep->pdev->dev,
"[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
current->pid);
error = sep_prepare_dcb_handler(sep, arg, false,
dma_ctx);
} else {
/* Secure dma */
dev_dbg(&sep->pdev->dev,
"[PID%d] SEP_IOC_POC (with secure_dma)\n",
current->pid);
error = sep_prepare_dcb_handler(sep, arg, true,
dma_ctx);
}
dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
current->pid);
break;
case SEP_IOCFREEDCB:
dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
current->pid);
case SEP_IOCFREEDCB_SECURE_DMA:
dev_dbg(&sep->pdev->dev,
"[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
current->pid);
error = sep_free_dcb_handler(sep, dma_ctx);
dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
current->pid);
break;
default:
error = -ENOTTY;
dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
current->pid);
break;
}
end_function:
dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
return error;
}
/**
* sep_inthandler - interrupt handler for sep device
* @irq: interrupt
* @dev_id: device id
*/
static irqreturn_t sep_inthandler(int irq, void *dev_id)
{
unsigned long lock_irq_flag;
u32 reg_val, reg_val2 = 0;
struct sep_device *sep = dev_id;
irqreturn_t int_error = IRQ_HANDLED;
/* Are we in power save? */
#if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
return IRQ_NONE;
}
#endif
if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
return IRQ_NONE;
}
/* Read the IRR register to check if this is SEP interrupt */
reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
if (reg_val & (0x1 << 13)) {
/* Lock and update the counter of reply messages */
spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
sep->reply_ct++;
spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
sep->send_ct, sep->reply_ct);
/* Is this a kernel client request */
if (sep->in_kernel) {
tasklet_schedule(&sep->finish_tasklet);
goto finished_interrupt;
}
/* Is this printf or daemon request? */
reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
dev_dbg(&sep->pdev->dev,
"SEP Interrupt - GPR2 is %08x\n", reg_val2);
clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
if ((reg_val2 >> 30) & 0x1) {
dev_dbg(&sep->pdev->dev, "int: printf request\n");
} else if (reg_val2 >> 31) {
dev_dbg(&sep->pdev->dev, "int: daemon request\n");
} else {
dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
wake_up(&sep->event_interrupt);
}
} else {
dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
int_error = IRQ_NONE;
}
finished_interrupt:
if (int_error == IRQ_HANDLED)
sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
return int_error;
}
/**
* sep_reconfig_shared_area - reconfigure shared area
* @sep: pointer to struct sep_device
*
* Reconfig the shared area between HOST and SEP - needed in case
* the DX_CC_Init function was called before OS loading.
*/
static int sep_reconfig_shared_area(struct sep_device *sep)
{
int ret_val;
/* use to limit waiting for SEP */
unsigned long end_time;
/* Send the new SHARED MESSAGE AREA to the SEP */
dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
(unsigned long long)sep->shared_bus);
sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
/* Poll for SEP response */
ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
end_time = jiffies + (WAIT_TIME * HZ);
while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
(ret_val != sep->shared_bus))
ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
/* Check the return value (register) */
if (ret_val != sep->shared_bus) {
dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
ret_val = -ENOMEM;
} else
ret_val = 0;
dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
return ret_val;
}
/**
* sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
* contexts into use
* @sep: SEP device
* @dcb_region: DCB region copy
* @dmatables_region: MLLI/DMA tables copy
* @dma_ctx: DMA context for current transaction
*/
ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
struct sep_dcblock **dcb_region,
void **dmatables_region,
struct sep_dma_context *dma_ctx)
{
void *dmaregion_free_start = NULL;
void *dmaregion_free_end = NULL;
void *dcbregion_free_start = NULL;
void *dcbregion_free_end = NULL;
ssize_t error = 0;
dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
current->pid);
if (1 > dma_ctx->nr_dcb_creat) {
dev_warn(&sep->pdev->dev,
"[PID%d] invalid number of dcbs to activate 0x%08X\n",
current->pid, dma_ctx->nr_dcb_creat);
error = -EINVAL;
goto end_function;
}
dmaregion_free_start = sep->shared_addr
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
dmaregion_free_end = dmaregion_free_start
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
if (dmaregion_free_start
+ dma_ctx->dmatables_len > dmaregion_free_end) {
error = -ENOMEM;
goto end_function;
}
memcpy(dmaregion_free_start,
*dmatables_region,
dma_ctx->dmatables_len);
/* Free MLLI table copy */
kfree(*dmatables_region);
*dmatables_region = NULL;
/* Copy thread's DCB table copy to DCB table region */
dcbregion_free_start = sep->shared_addr +
SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
dcbregion_free_end = dcbregion_free_start +
(SEP_MAX_NUM_SYNC_DMA_OPS *
sizeof(struct sep_dcblock)) - 1;
if (dcbregion_free_start
+ (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
> dcbregion_free_end) {
error = -ENOMEM;
goto end_function;
}
memcpy(dcbregion_free_start,
*dcb_region,
dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
/* Print the tables */
dev_dbg(&sep->pdev->dev, "activate: input table\n");
sep_debug_print_lli_tables(sep,
(struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
(*dcb_region)->input_mlli_address),
(*dcb_region)->input_mlli_num_entries,
(*dcb_region)->input_mlli_data_size);
dev_dbg(&sep->pdev->dev, "activate: output table\n");
sep_debug_print_lli_tables(sep,
(struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
(*dcb_region)->output_mlli_address),
(*dcb_region)->output_mlli_num_entries,
(*dcb_region)->output_mlli_data_size);
dev_dbg(&sep->pdev->dev,
"[PID%d] printing activated tables\n", current->pid);
end_function:
kfree(*dmatables_region);
*dmatables_region = NULL;
kfree(*dcb_region);
*dcb_region = NULL;
return error;
}
/**
* sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
* @sep: SEP device
* @dcb_region: DCB region buf to create for current transaction
* @dmatables_region: MLLI/DMA tables buf to create for current transaction
* @dma_ctx: DMA context buf to create for current transaction
* @user_dcb_args: User arguments for DCB/MLLI creation
* @num_dcbs: Number of DCBs to create
* @secure_dma: Indicate use of IMR restricted memory secure dma
*/
static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
struct sep_dcblock **dcb_region,
void **dmatables_region,
struct sep_dma_context **dma_ctx,
const struct build_dcb_struct __user *user_dcb_args,
const u32 num_dcbs, bool secure_dma)
{
int error = 0;
int i = 0;
struct build_dcb_struct *dcb_args = NULL;
dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
current->pid);
if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
error = -EINVAL;
goto end_function;
}
if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
dev_warn(&sep->pdev->dev,
"[PID%d] invalid number of dcbs 0x%08X\n",
current->pid, num_dcbs);
error = -EINVAL;
goto end_function;
}
dcb_args = kzalloc(num_dcbs * sizeof(struct build_dcb_struct),
GFP_KERNEL);
if (!dcb_args) {
dev_warn(&sep->pdev->dev, "[PID%d] no memory for dcb args\n",
current->pid);
error = -ENOMEM;
goto end_function;
}
if (copy_from_user(dcb_args,
user_dcb_args,
num_dcbs * sizeof(struct build_dcb_struct))) {
error = -EINVAL;
goto end_function;
}
/* Allocate thread-specific memory for DCB */
*dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
GFP_KERNEL);
if (!(*dcb_region)) {
error = -ENOMEM;
goto end_function;
}
/* Prepare DCB and MLLI table into the allocated regions */
for (i = 0; i < num_dcbs; i++) {
error = sep_prepare_input_output_dma_table_in_dcb(sep,
(unsigned long)dcb_args[i].app_in_address,
(unsigned long)dcb_args[i].app_out_address,
dcb_args[i].data_in_size,
dcb_args[i].block_size,
dcb_args[i].tail_block_size,
dcb_args[i].is_applet,
false, secure_dma,
*dcb_region, dmatables_region,
dma_ctx,
NULL,
NULL);
if (error) {
dev_warn(&sep->pdev->dev,
"[PID%d] dma table creation failed\n",
current->pid);
goto end_function;
}
if (dcb_args[i].app_in_address != 0)
(*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
}
end_function:
kfree(dcb_args);
return error;
}
/**
* sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
* for kernel crypto
* @sep: SEP device
* @dcb_region: DCB region buf to create for current transaction
* @dmatables_region: MLLI/DMA tables buf to create for current transaction
* @dma_ctx: DMA context buf to create for current transaction
* @user_dcb_args: User arguments for DCB/MLLI creation
* @num_dcbs: Number of DCBs to create
* This does that same thing as sep_create_dcb_dmatables_context
* except that it is used only for the kernel crypto operation. It is
* separate because there is no user data involved; the dcb data structure
* is specific for kernel crypto (build_dcb_struct_kernel)
*/
int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
struct sep_dcblock **dcb_region,
void **dmatables_region,
struct sep_dma_context **dma_ctx,
const struct build_dcb_struct_kernel *dcb_data,
const u32 num_dcbs)
{
int error = 0;
int i = 0;
dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
current->pid);
if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
error = -EINVAL;
goto end_function;
}
if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
dev_warn(&sep->pdev->dev,
"[PID%d] invalid number of dcbs 0x%08X\n",
current->pid, num_dcbs);
error = -EINVAL;
goto end_function;
}
dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
current->pid, num_dcbs);
/* Allocate thread-specific memory for DCB */
*dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
GFP_KERNEL);
if (!(*dcb_region)) {
error = -ENOMEM;
goto end_function;
}
/* Prepare DCB and MLLI table into the allocated regions */
for (i = 0; i < num_dcbs; i++) {
error = sep_prepare_input_output_dma_table_in_dcb(sep,
(unsigned long)dcb_data->app_in_address,
(unsigned long)dcb_data->app_out_address,
dcb_data->data_in_size,
dcb_data->block_size,
dcb_data->tail_block_size,
dcb_data->is_applet,
true,
false,
*dcb_region, dmatables_region,
dma_ctx,
dcb_data->src_sg,
dcb_data->dst_sg);
if (error) {
dev_warn(&sep->pdev->dev,
"[PID%d] dma table creation failed\n",
current->pid);
goto end_function;
}
}
end_function:
return error;
}
/**
* sep_activate_msgarea_context - Takes the message area context into use
* @sep: SEP device
* @msg_region: Message area context buf
* @msg_len: Message area context buffer size
*/
static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
void **msg_region,
const size_t msg_len)
{
dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
current->pid);
if (!msg_region || !(*msg_region) ||
SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
dev_warn(&sep->pdev->dev,
"[PID%d] invalid act msgarea len 0x%08zX\n",
current->pid, msg_len);
return -EINVAL;
}
memcpy(sep->shared_addr, *msg_region, msg_len);
return 0;
}
/**
* sep_create_msgarea_context - Creates message area context
* @sep: SEP device
* @msg_region: Msg area region buf to create for current transaction
* @msg_user: Content for msg area region from user
* @msg_len: Message area size
*/
static ssize_t sep_create_msgarea_context(struct sep_device *sep,
void **msg_region,
const void __user *msg_user,
const size_t msg_len)
{
int error = 0;
dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
current->pid);
if (!msg_region ||
!msg_user ||
SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
dev_warn(&sep->pdev->dev,
"[PID%d] invalid creat msgarea len 0x%08zX\n",
current->pid, msg_len);
error = -EINVAL;
goto end_function;
}
/* Allocate thread-specific memory for message buffer */
*msg_region = kzalloc(msg_len, GFP_KERNEL);
if (!(*msg_region)) {
dev_warn(&sep->pdev->dev,
"[PID%d] no mem for msgarea context\n",
current->pid);
error = -ENOMEM;
goto end_function;
}
/* Copy input data to write() to allocated message buffer */
if (copy_from_user(*msg_region, msg_user, msg_len)) {
error = -EINVAL;
goto end_function;
}
end_function:
if (error && msg_region) {
kfree(*msg_region);
*msg_region = NULL;
}
return error;
}
/**
* sep_read - Returns results of an operation for fastcall interface
* @filp: File pointer
* @buf_user: User buffer for storing results
* @count_user: User buffer size
* @offset: File offset, not supported
*
* The implementation does not support reading in chunks, all data must be
* consumed during a single read system call.
*/
static ssize_t sep_read(struct file *filp,
char __user *buf_user, size_t count_user,
loff_t *offset)
{
struct sep_private_data * const private_data = filp->private_data;
struct sep_call_status *call_status = &private_data->call_status;
struct sep_device *sep = private_data->device;
struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
ssize_t error = 0, error_tmp = 0;
/* Am I the process that owns the transaction? */
error = sep_check_transaction_owner(sep);
if (error) {
dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
current->pid);
goto end_function;
}
/* Checks that user has called necessarry apis */
if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
&call_status->status)) {
dev_warn(&sep->pdev->dev,
"[PID%d] fastcall write not called\n",
current->pid);
error = -EPROTO;
goto end_function_error;
}
if (!buf_user) {
dev_warn(&sep->pdev->dev,
"[PID%d] null user buffer\n",
current->pid);
error = -EINVAL;
goto end_function_error;
}
/* Wait for SEP to finish */
wait_event(sep->event_interrupt,
test_bit(SEP_WORKING_LOCK_BIT,
&sep->in_use_flags) == 0);
sep_dump_message(sep);
dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
current->pid, count_user);
/* In case user has allocated bigger buffer */
if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
error = -EFAULT;
goto end_function_error;
}
dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
error = count_user;
end_function_error:
/* Copy possible tail data to user and free DCB and MLLIs */
error_tmp = sep_free_dcb_handler(sep, dma_ctx);
if (error_tmp)
dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
current->pid);
/* End the transaction, wakeup pending ones */
error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
my_queue_elem);
if (error_tmp)
dev_warn(&sep->pdev->dev,
"[PID%d] ending transaction failed\n",
current->pid);
end_function:
return error;
}
/**
* sep_fastcall_args_get - Gets fastcall params from user
* sep: SEP device
* @args: Parameters buffer
* @buf_user: User buffer for operation parameters
* @count_user: User buffer size
*/
static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
struct sep_fastcall_hdr *args,
const char __user *buf_user,
const size_t count_user)
{
ssize_t error = 0;
size_t actual_count = 0;
if (!buf_user) {
dev_warn(&sep->pdev->dev,
"[PID%d] null user buffer\n",
current->pid);
error = -EINVAL;
goto end_function;
}
if (count_user < sizeof(struct sep_fastcall_hdr)) {
dev_warn(&sep->pdev->dev,
"[PID%d] too small message size 0x%08zX\n",
current->pid, count_user);
error = -EINVAL;
goto end_function;
}
if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
error = -EFAULT;
goto end_function;
}
if (SEP_FC_MAGIC != args->magic) {
dev_warn(&sep->pdev->dev,
"[PID%d] invalid fastcall magic 0x%08X\n",
current->pid, args->magic);
error = -EINVAL;
goto end_function;
}
dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
current->pid, args->num_dcbs);
dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
current->pid, args->msg_len);
if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
dev_warn(&sep->pdev->dev,
"[PID%d] invalid message length\n",
current->pid);
error = -EINVAL;
goto end_function;
}
actual_count = sizeof(struct sep_fastcall_hdr)
+ args->msg_len
+ (args->num_dcbs * sizeof(struct build_dcb_struct));
if (actual_count != count_user) {
dev_warn(&sep->pdev->dev,
"[PID%d] inconsistent message "
"sizes 0x%08zX vs 0x%08zX\n",
current->pid, actual_count, count_user);
error = -EMSGSIZE;
goto end_function;
}
end_function:
return error;
}
/**
* sep_write - Starts an operation for fastcall interface
* @filp: File pointer
* @buf_user: User buffer for operation parameters
* @count_user: User buffer size
* @offset: File offset, not supported
*
* The implementation does not support writing in chunks,
* all data must be given during a single write system call.
*/
static ssize_t sep_write(struct file *filp,
const char __user *buf_user, size_t count_user,
loff_t *offset)
{
struct sep_private_data * const private_data = filp->private_data;
struct sep_call_status *call_status = &private_data->call_status;
struct sep_device *sep = private_data->device;
struct sep_dma_context *dma_ctx = NULL;
struct sep_fastcall_hdr call_hdr = {0};
void *msg_region = NULL;
void *dmatables_region = NULL;
struct sep_dcblock *dcb_region = NULL;
ssize_t error = 0;
struct sep_queue_info *my_queue_elem = NULL;
bool my_secure_dma; /* are we using secure_dma (IMR)? */
dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
current->pid, sep);
dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
current->pid, private_data);
error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
if (error)
goto end_function;
buf_user += sizeof(struct sep_fastcall_hdr);
if (call_hdr.secure_dma == 0)
my_secure_dma = false;
else
my_secure_dma = true;
/*
* Controlling driver memory usage by limiting amount of
* buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
* of threads can progress further at a time
*/
dev_dbg(&sep->pdev->dev, "[PID%d] waiting for double buffering "
"region access\n", current->pid);
error = down_interruptible(&sep->sep_doublebuf);
dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
current->pid);
if (error) {
/* Signal received */
goto end_function_error;
}
/*
* Prepare contents of the shared area regions for
* the operation into temporary buffers
*/
if (0 < call_hdr.num_dcbs) {
error = sep_create_dcb_dmatables_context(sep,
&dcb_region,
&dmatables_region,
&dma_ctx,
(const struct build_dcb_struct __user *)
buf_user,
call_hdr.num_dcbs, my_secure_dma);
if (error)
goto end_function_error_doublebuf;
buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
}
error = sep_create_msgarea_context(sep,
&msg_region,
buf_user,
call_hdr.msg_len);
if (error)
goto end_function_error_doublebuf;
dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
current->pid);
my_queue_elem = sep_queue_status_add(sep,
((struct sep_msgarea_hdr *)msg_region)->opcode,
(dma_ctx) ? dma_ctx->input_data_len : 0,
current->pid,
current->comm, sizeof(current->comm));
if (!my_queue_elem) {
dev_dbg(&sep->pdev->dev, "[PID%d] updating queue"
"status error\n", current->pid);
error = -ENOMEM;
goto end_function_error_doublebuf;
}
/* Wait until current process gets the transaction */
error = sep_wait_transaction(sep);
if (error) {
/* Interrupted by signal, don't clear transaction */
dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
current->pid);
sep_queue_status_remove(sep, &my_queue_elem);
goto end_function_error_doublebuf;
}
dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
current->pid);
private_data->my_queue_elem = my_queue_elem;
/* Activate shared area regions for the transaction */
error = sep_activate_msgarea_context(sep, &msg_region,
call_hdr.msg_len);
if (error)
goto end_function_error_clear_transact;
sep_dump_message(sep);
if (0 < call_hdr.num_dcbs) {
error = sep_activate_dcb_dmatables_context(sep,
&dcb_region,
&dmatables_region,
dma_ctx);
if (error)
goto end_function_error_clear_transact;
}
/* Send command to SEP */
error = sep_send_command_handler(sep);
if (error)
goto end_function_error_clear_transact;
/* Store DMA context for the transaction */
private_data->dma_ctx = dma_ctx;
/* Update call status */
set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
error = count_user;
up(&sep->sep_doublebuf);
dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
current->pid);
goto end_function;
end_function_error_clear_transact:
sep_end_transaction_handler(sep, &dma_ctx, call_status,
&private_data->my_queue_elem);
end_function_error_doublebuf:
up(&sep->sep_doublebuf);
dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
current->pid);
end_function_error:
if (dma_ctx)
sep_free_dma_table_data_handler(sep, &dma_ctx);
end_function:
kfree(dcb_region);
kfree(dmatables_region);
kfree(msg_region);
return error;
}
/**
* sep_seek - Handler for seek system call
* @filp: File pointer
* @offset: File offset
* @origin: Options for offset
*
* Fastcall interface does not support seeking, all reads
* and writes are from/to offset zero
*/
static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
{
return -ENOSYS;
}
/**
* sep_file_operations - file operation on sep device
* @sep_ioctl: ioctl handler from user space call
* @sep_poll: poll handler
* @sep_open: handles sep device open request
* @sep_release:handles sep device release request
* @sep_mmap: handles memory mapping requests
* @sep_read: handles read request on sep device
* @sep_write: handles write request on sep device
* @sep_seek: handles seek request on sep device
*/
static const struct file_operations sep_file_operations = {
.owner = THIS_MODULE,
.unlocked_ioctl = sep_ioctl,
.poll = sep_poll,
.open = sep_open,
.release = sep_release,
.mmap = sep_mmap,
.read = sep_read,
.write = sep_write,
.llseek = sep_seek,
};
/**
* sep_sysfs_read - read sysfs entry per gives arguments
* @filp: file pointer
* @kobj: kobject pointer
* @attr: binary file attributes
* @buf: read to this buffer
* @pos: offset to read
* @count: amount of data to read
*
* This function is to read sysfs entries for sep driver per given arguments.
*/
static ssize_t
sep_sysfs_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t pos, size_t count)
{
unsigned long lck_flags;
size_t nleft = count;
struct sep_device *sep = sep_dev;
struct sep_queue_info *queue_elem = NULL;
u32 queue_num = 0;
u32 i = 1;
spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
queue_num = sep->sep_queue_num;
if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
if (count < sizeof(queue_num)
+ (queue_num * sizeof(struct sep_queue_data))) {
spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
return -EINVAL;
}
memcpy(buf, &queue_num, sizeof(queue_num));
buf += sizeof(queue_num);
nleft -= sizeof(queue_num);
list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
if (i++ > queue_num)
break;
memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
nleft -= sizeof(queue_elem->data);
buf += sizeof(queue_elem->data);
}
spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
return count - nleft;
}
/**
* bin_attributes - defines attributes for queue_status
* @attr: attributes (name & permissions)
* @read: function pointer to read this file
* @size: maxinum size of binary attribute
*/
static const struct bin_attribute queue_status = {
.attr = {.name = "queue_status", .mode = 0444},
.read = sep_sysfs_read,
.size = sizeof(u32)
+ (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
};
/**
* sep_register_driver_with_fs - register misc devices
* @sep: pointer to struct sep_device
*
* This function registers the driver with the file system
*/
static int sep_register_driver_with_fs(struct sep_device *sep)
{
int ret_val;
sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
sep->miscdev_sep.name = SEP_DEV_NAME;
sep->miscdev_sep.fops = &sep_file_operations;
ret_val = misc_register(&sep->miscdev_sep);
if (ret_val) {
dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
ret_val);
return ret_val;
}
ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
&queue_status);
if (ret_val) {
dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
ret_val);
return ret_val;
}
return ret_val;
}
/**
*sep_probe - probe a matching PCI device
*@pdev: pci_device
*@ent: pci_device_id
*
*Attempt to set up and configure a SEP device that has been
*discovered by the PCI layer. Allocates all required resources.
*/
static int __devinit sep_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int error = 0;
struct sep_device *sep = NULL;
if (sep_dev != NULL) {
dev_dbg(&pdev->dev, "only one SEP supported.\n");
return -EBUSY;
}
/* Enable the device */
error = pci_enable_device(pdev);
if (error) {
dev_warn(&pdev->dev, "error enabling pci device\n");
goto end_function;
}
/* Allocate the sep_device structure for this device */
sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
if (sep_dev == NULL) {
dev_warn(&pdev->dev,
"can't kmalloc the sep_device structure\n");
error = -ENOMEM;
goto end_function_disable_device;
}
/*
* We're going to use another variable for actually
* working with the device; this way, if we have
* multiple devices in the future, it would be easier
* to make appropriate changes
*/
sep = sep_dev;
sep->pdev = pci_dev_get(pdev);
init_waitqueue_head(&sep->event_transactions);
init_waitqueue_head(&sep->event_interrupt);
spin_lock_init(&sep->snd_rply_lck);
spin_lock_init(&sep->sep_queue_lock);
sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
INIT_LIST_HEAD(&sep->sep_queue_status);
dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, "
"device being prepared\n");
/* Set up our register area */
sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
if (!sep->reg_physical_addr) {
dev_warn(&sep->pdev->dev, "Error getting register start\n");
error = -ENODEV;
goto end_function_free_sep_dev;
}
sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
if (!sep->reg_physical_end) {
dev_warn(&sep->pdev->dev, "Error getting register end\n");
error = -ENODEV;
goto end_function_free_sep_dev;
}
sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
(size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
if (!sep->reg_addr) {
dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
error = -ENODEV;
goto end_function_free_sep_dev;
}
dev_dbg(&sep->pdev->dev,
"Register area start %llx end %llx virtual %p\n",
(unsigned long long)sep->reg_physical_addr,
(unsigned long long)sep->reg_physical_end,
sep->reg_addr);
/* Allocate the shared area */
sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
if (sep_map_and_alloc_shared_area(sep)) {
error = -ENOMEM;
/* Allocation failed */
goto end_function_error;
}
/* Clear ICR register */
sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
/* Set the IMR register - open only GPR 2 */
sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
/* Read send/receive counters from SEP */
sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
sep->reply_ct &= 0x3FFFFFFF;
sep->send_ct = sep->reply_ct;
/* Get the interrupt line */
error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
"sep_driver", sep);
if (error)
goto end_function_deallocate_sep_shared_area;
/* The new chip requires a shared area reconfigure */
error = sep_reconfig_shared_area(sep);
if (error)
goto end_function_free_irq;
sep->in_use = 1;
/* Finally magic up the device nodes */
/* Register driver with the fs */
error = sep_register_driver_with_fs(sep);
if (error) {
dev_err(&sep->pdev->dev, "error registering dev file\n");
goto end_function_free_irq;
}
sep->in_use = 0; /* through touching the device */
#ifdef SEP_ENABLE_RUNTIME_PM
pm_runtime_put_noidle(&sep->pdev->dev);
pm_runtime_allow(&sep->pdev->dev);
pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
SUSPEND_DELAY);
pm_runtime_use_autosuspend(&sep->pdev->dev);
pm_runtime_mark_last_busy(&sep->pdev->dev);
sep->power_save_setup = 1;
#endif
/* register kernel crypto driver */
#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
error = sep_crypto_setup();
if (error) {
dev_err(&sep->pdev->dev, "crypto setup failed\n");
goto end_function_free_irq;
}
#endif
goto end_function;
end_function_free_irq:
free_irq(pdev->irq, sep);
end_function_deallocate_sep_shared_area:
/* De-allocate shared area */
sep_unmap_and_free_shared_area(sep);
end_function_error:
iounmap(sep->reg_addr);
end_function_free_sep_dev:
pci_dev_put(sep_dev->pdev);
kfree(sep_dev);
sep_dev = NULL;
end_function_disable_device:
pci_disable_device(pdev);
end_function:
return error;
}
/**
* sep_remove - handles removing device from pci subsystem
* @pdev: pointer to pci device
*
* This function will handle removing our sep device from pci subsystem on exit
* or unloading this module. It should free up all used resources, and unmap if
* any memory regions mapped.
*/
static void sep_remove(struct pci_dev *pdev)
{
struct sep_device *sep = sep_dev;
/* Unregister from fs */
misc_deregister(&sep->miscdev_sep);
/* Unregister from kernel crypto */
#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
sep_crypto_takedown();
#endif
/* Free the irq */
free_irq(sep->pdev->irq, sep);
/* Free the shared area */
sep_unmap_and_free_shared_area(sep_dev);
iounmap(sep_dev->reg_addr);
#ifdef SEP_ENABLE_RUNTIME_PM
if (sep->in_use) {
sep->in_use = 0;
pm_runtime_forbid(&sep->pdev->dev);
pm_runtime_get_noresume(&sep->pdev->dev);
}
#endif
pci_dev_put(sep_dev->pdev);
kfree(sep_dev);
sep_dev = NULL;
}
/* Initialize struct pci_device_id for our driver */
static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
{0}
};
/* Export our pci_device_id structure to user space */
MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
#ifdef SEP_ENABLE_RUNTIME_PM
/**
* sep_pm_resume - rsume routine while waking up from S3 state
* @dev: pointer to sep device
*
* This function is to be used to wake up sep driver while system awakes from S3
* state i.e. suspend to ram. The RAM in intact.
* Notes - revisit with more understanding of pm, ICR/IMR & counters.
*/
static int sep_pci_resume(struct device *dev)
{
struct sep_device *sep = sep_dev;
dev_dbg(&sep->pdev->dev, "pci resume called\n");
if (sep->power_state == SEP_DRIVER_POWERON)
return 0;
/* Clear ICR register */
sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
/* Set the IMR register - open only GPR 2 */
sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
/* Read send/receive counters from SEP */
sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
sep->reply_ct &= 0x3FFFFFFF;
sep->send_ct = sep->reply_ct;
sep->power_state = SEP_DRIVER_POWERON;
return 0;
}
/**
* sep_pm_suspend - suspend routine while going to S3 state
* @dev: pointer to sep device
*
* This function is to be used to suspend sep driver while system goes to S3
* state i.e. suspend to ram. The RAM in intact and ON during this suspend.
* Notes - revisit with more understanding of pm, ICR/IMR
*/
static int sep_pci_suspend(struct device *dev)
{
struct sep_device *sep = sep_dev;
dev_dbg(&sep->pdev->dev, "pci suspend called\n");
if (sep->in_use == 1)
return -EAGAIN;
sep->power_state = SEP_DRIVER_POWEROFF;
/* Clear ICR register */
sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
/* Set the IMR to block all */
sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
return 0;
}
/**
* sep_pm_runtime_resume - runtime resume routine
* @dev: pointer to sep device
*
* Notes - revisit with more understanding of pm, ICR/IMR & counters
*/
static int sep_pm_runtime_resume(struct device *dev)
{
u32 retval2;
u32 delay_count;
struct sep_device *sep = sep_dev;
dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
/**
* Wait until the SCU boot is ready
* This is done by iterating SCU_DELAY_ITERATION (10
* microseconds each) up to SCU_DELAY_MAX (50) times.
* This bit can be set in a random time that is less
* than 500 microseconds after each power resume
*/
retval2 = 0;
delay_count = 0;
while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
retval2 &= 0x00000008;
if (!retval2) {
udelay(SCU_DELAY_ITERATION);
delay_count += 1;
}
}
if (!retval2) {
dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
return -EINVAL;
}
/* Clear ICR register */
sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
/* Set the IMR register - open only GPR 2 */
sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
/* Read send/receive counters from SEP */
sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
sep->reply_ct &= 0x3FFFFFFF;
sep->send_ct = sep->reply_ct;
return 0;
}
/**
* sep_pm_runtime_suspend - runtime suspend routine
* @dev: pointer to sep device
*
* Notes - revisit with more understanding of pm
*/
static int sep_pm_runtime_suspend(struct device *dev)
{
struct sep_device *sep = sep_dev;
dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
/* Clear ICR register */
sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
return 0;
}
/**
* sep_pm - power management for sep driver
* @sep_pm_runtime_resume: resume- no communication with cpu & main memory
* @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
* @sep_pci_suspend: suspend - main memory is still ON
* @sep_pci_resume: resume - main meory is still ON
*/
static const struct dev_pm_ops sep_pm = {
.runtime_resume = sep_pm_runtime_resume,
.runtime_suspend = sep_pm_runtime_suspend,
.resume = sep_pci_resume,
.suspend = sep_pci_suspend,
};
#endif /* SEP_ENABLE_RUNTIME_PM */
/**
* sep_pci_driver - registers this device with pci subsystem
* @name: name identifier for this driver
* @sep_pci_id_tbl: pointer to struct pci_device_id table
* @sep_probe: pointer to probe function in PCI driver
* @sep_remove: pointer to remove function in PCI driver
*/
static struct pci_driver sep_pci_driver = {
#ifdef SEP_ENABLE_RUNTIME_PM
.driver = {
.pm = &sep_pm,
},
#endif
.name = "sep_sec_driver",
.id_table = sep_pci_id_tbl,
.probe = sep_probe,
.remove = sep_remove
};
/**
* sep_init - init function
*
* Module load time. Register the PCI device driver.
*/
static int __init sep_init(void)
{
return pci_register_driver(&sep_pci_driver);
}
/**
* sep_exit - called to unload driver
*
* Unregister the driver The device will perform all the cleanup required.
*/
static void __exit sep_exit(void)
{
pci_unregister_driver(&sep_pci_driver);
}
module_init(sep_init);
module_exit(sep_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
TV-LP51-Devices/kernel_lge_g3 | sound/soc/imx/imx-pcm.c | 5034 | 2688 | /*
* Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
*
* This code is based on code copyrighted by Freescale,
* Liam Girdwood, Javier Martin and probably others.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include "imx-pcm.h"
int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int ret;
ret = dma_mmap_writecombine(substream->pcm->card->dev, vma,
runtime->dma_area, runtime->dma_addr, runtime->dma_bytes);
pr_debug("%s: ret: %d %p 0x%08x 0x%08x\n", __func__, ret,
runtime->dma_area,
runtime->dma_addr,
runtime->dma_bytes);
return ret;
}
EXPORT_SYMBOL_GPL(snd_imx_pcm_mmap);
static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
{
struct snd_pcm_substream *substream = pcm->streams[stream].substream;
struct snd_dma_buffer *buf = &substream->dma_buffer;
size_t size = IMX_SSI_DMABUF_SIZE;
buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev;
buf->private_data = NULL;
buf->area = dma_alloc_writecombine(pcm->card->dev, size,
&buf->addr, GFP_KERNEL);
if (!buf->area)
return -ENOMEM;
buf->bytes = size;
return 0;
}
static u64 imx_pcm_dmamask = DMA_BIT_MASK(32);
int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
int ret = 0;
if (!card->dev->dma_mask)
card->dev->dma_mask = &imx_pcm_dmamask;
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = imx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_PLAYBACK);
if (ret)
goto out;
}
if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
ret = imx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_CAPTURE);
if (ret)
goto out;
}
out:
return ret;
}
EXPORT_SYMBOL_GPL(imx_pcm_new);
void imx_pcm_free(struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
int stream;
for (stream = 0; stream < 2; stream++) {
substream = pcm->streams[stream].substream;
if (!substream)
continue;
buf = &substream->dma_buffer;
if (!buf->area)
continue;
dma_free_writecombine(pcm->card->dev, buf->bytes,
buf->area, buf->addr);
buf->area = NULL;
}
}
EXPORT_SYMBOL_GPL(imx_pcm_free);
| gpl-2.0 |
mifl/android_kernel_pantech_im-860s | tools/power/cpupower/utils/helpers/sysfs.c | 5290 | 8945 | /*
* (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
* (C) 2011 Thomas Renninger <trenn@novell.com> Novell Inc.
*
* Licensed under the terms of the GNU GPL License version 2.
*/
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include "helpers/sysfs.h"
unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen)
{
int fd;
ssize_t numread;
fd = open(path, O_RDONLY);
if (fd == -1)
return 0;
numread = read(fd, buf, buflen - 1);
if (numread < 1) {
close(fd);
return 0;
}
buf[numread] = '\0';
close(fd);
return (unsigned int) numread;
}
static unsigned int sysfs_write_file(const char *path,
const char *value, size_t len)
{
int fd;
ssize_t numwrite;
fd = open(path, O_WRONLY);
if (fd == -1)
return 0;
numwrite = write(fd, value, len);
if (numwrite < 1) {
close(fd);
return 0;
}
close(fd);
return (unsigned int) numwrite;
}
/*
* Detect whether a CPU is online
*
* Returns:
* 1 -> if CPU is online
* 0 -> if CPU is offline
* negative errno values in error case
*/
int sysfs_is_cpu_online(unsigned int cpu)
{
char path[SYSFS_PATH_MAX];
int fd;
ssize_t numread;
unsigned long long value;
char linebuf[MAX_LINE_LEN];
char *endp;
struct stat statbuf;
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu);
if (stat(path, &statbuf) != 0)
return 0;
/*
* kernel without CONFIG_HOTPLUG_CPU
* -> cpuX directory exists, but not cpuX/online file
*/
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu);
if (stat(path, &statbuf) != 0)
return 1;
fd = open(path, O_RDONLY);
if (fd == -1)
return -errno;
numread = read(fd, linebuf, MAX_LINE_LEN - 1);
if (numread < 1) {
close(fd);
return -EIO;
}
linebuf[numread] = '\0';
close(fd);
value = strtoull(linebuf, &endp, 0);
if (value > 1 || value < 0)
return -EINVAL;
return value;
}
/* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */
/*
* helper function to read file from /sys into given buffer
* fname is a relative path under "cpuX/cpuidle/stateX/" dir
* cstates starting with 0, C0 is not counted as cstate.
* This means if you want C1 info, pass 0 as idlestate param
*/
unsigned int sysfs_idlestate_read_file(unsigned int cpu, unsigned int idlestate,
const char *fname, char *buf, size_t buflen)
{
char path[SYSFS_PATH_MAX];
int fd;
ssize_t numread;
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s",
cpu, idlestate, fname);
fd = open(path, O_RDONLY);
if (fd == -1)
return 0;
numread = read(fd, buf, buflen - 1);
if (numread < 1) {
close(fd);
return 0;
}
buf[numread] = '\0';
close(fd);
return (unsigned int) numread;
}
/* read access to files which contain one numeric value */
enum idlestate_value {
IDLESTATE_USAGE,
IDLESTATE_POWER,
IDLESTATE_LATENCY,
IDLESTATE_TIME,
MAX_IDLESTATE_VALUE_FILES
};
static const char *idlestate_value_files[MAX_IDLESTATE_VALUE_FILES] = {
[IDLESTATE_USAGE] = "usage",
[IDLESTATE_POWER] = "power",
[IDLESTATE_LATENCY] = "latency",
[IDLESTATE_TIME] = "time",
};
static unsigned long long sysfs_idlestate_get_one_value(unsigned int cpu,
unsigned int idlestate,
enum idlestate_value which)
{
unsigned long long value;
unsigned int len;
char linebuf[MAX_LINE_LEN];
char *endp;
if (which >= MAX_IDLESTATE_VALUE_FILES)
return 0;
len = sysfs_idlestate_read_file(cpu, idlestate,
idlestate_value_files[which],
linebuf, sizeof(linebuf));
if (len == 0)
return 0;
value = strtoull(linebuf, &endp, 0);
if (endp == linebuf || errno == ERANGE)
return 0;
return value;
}
/* read access to files which contain one string */
enum idlestate_string {
IDLESTATE_DESC,
IDLESTATE_NAME,
MAX_IDLESTATE_STRING_FILES
};
static const char *idlestate_string_files[MAX_IDLESTATE_STRING_FILES] = {
[IDLESTATE_DESC] = "desc",
[IDLESTATE_NAME] = "name",
};
static char *sysfs_idlestate_get_one_string(unsigned int cpu,
unsigned int idlestate,
enum idlestate_string which)
{
char linebuf[MAX_LINE_LEN];
char *result;
unsigned int len;
if (which >= MAX_IDLESTATE_STRING_FILES)
return NULL;
len = sysfs_idlestate_read_file(cpu, idlestate,
idlestate_string_files[which],
linebuf, sizeof(linebuf));
if (len == 0)
return NULL;
result = strdup(linebuf);
if (result == NULL)
return NULL;
if (result[strlen(result) - 1] == '\n')
result[strlen(result) - 1] = '\0';
return result;
}
unsigned long sysfs_get_idlestate_latency(unsigned int cpu,
unsigned int idlestate)
{
return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_LATENCY);
}
unsigned long sysfs_get_idlestate_usage(unsigned int cpu,
unsigned int idlestate)
{
return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_USAGE);
}
unsigned long long sysfs_get_idlestate_time(unsigned int cpu,
unsigned int idlestate)
{
return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_TIME);
}
char *sysfs_get_idlestate_name(unsigned int cpu, unsigned int idlestate)
{
return sysfs_idlestate_get_one_string(cpu, idlestate, IDLESTATE_NAME);
}
char *sysfs_get_idlestate_desc(unsigned int cpu, unsigned int idlestate)
{
return sysfs_idlestate_get_one_string(cpu, idlestate, IDLESTATE_DESC);
}
/*
* Returns number of supported C-states of CPU core cpu
* Negativ in error case
* Zero if cpuidle does not export any C-states
*/
int sysfs_get_idlestate_count(unsigned int cpu)
{
char file[SYSFS_PATH_MAX];
struct stat statbuf;
int idlestates = 1;
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle");
if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
return -ENODEV;
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
return 0;
while (stat(file, &statbuf) == 0 && S_ISDIR(statbuf.st_mode)) {
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU
"cpu%u/cpuidle/state%d", cpu, idlestates);
idlestates++;
}
idlestates--;
return idlestates;
}
/* CPUidle general /sys/devices/system/cpu/cpuidle/ sysfs access ********/
/*
* helper function to read file from /sys into given buffer
* fname is a relative path under "cpu/cpuidle/" dir
*/
static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf,
size_t buflen)
{
char path[SYSFS_PATH_MAX];
snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname);
return sysfs_read_file(path, buf, buflen);
}
/* read access to files which contain one string */
enum cpuidle_string {
CPUIDLE_GOVERNOR,
CPUIDLE_GOVERNOR_RO,
CPUIDLE_DRIVER,
MAX_CPUIDLE_STRING_FILES
};
static const char *cpuidle_string_files[MAX_CPUIDLE_STRING_FILES] = {
[CPUIDLE_GOVERNOR] = "current_governor",
[CPUIDLE_GOVERNOR_RO] = "current_governor_ro",
[CPUIDLE_DRIVER] = "current_driver",
};
static char *sysfs_cpuidle_get_one_string(enum cpuidle_string which)
{
char linebuf[MAX_LINE_LEN];
char *result;
unsigned int len;
if (which >= MAX_CPUIDLE_STRING_FILES)
return NULL;
len = sysfs_cpuidle_read_file(cpuidle_string_files[which],
linebuf, sizeof(linebuf));
if (len == 0)
return NULL;
result = strdup(linebuf);
if (result == NULL)
return NULL;
if (result[strlen(result) - 1] == '\n')
result[strlen(result) - 1] = '\0';
return result;
}
char *sysfs_get_cpuidle_governor(void)
{
char *tmp = sysfs_cpuidle_get_one_string(CPUIDLE_GOVERNOR_RO);
if (!tmp)
return sysfs_cpuidle_get_one_string(CPUIDLE_GOVERNOR);
else
return tmp;
}
char *sysfs_get_cpuidle_driver(void)
{
return sysfs_cpuidle_get_one_string(CPUIDLE_DRIVER);
}
/* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */
/*
* Get sched_mc or sched_smt settings
* Pass "mc" or "smt" as argument
*
* Returns negative value on failure
*/
int sysfs_get_sched(const char *smt_mc)
{
unsigned long value;
char linebuf[MAX_LINE_LEN];
char *endp;
char path[SYSFS_PATH_MAX];
if (strcmp("mc", smt_mc) && strcmp("smt", smt_mc))
return -EINVAL;
snprintf(path, sizeof(path),
PATH_TO_CPU "sched_%s_power_savings", smt_mc);
if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0)
return -1;
value = strtoul(linebuf, &endp, 0);
if (endp == linebuf || errno == ERANGE)
return -1;
return value;
}
/*
* Get sched_mc or sched_smt settings
* Pass "mc" or "smt" as argument
*
* Returns negative value on failure
*/
int sysfs_set_sched(const char *smt_mc, int val)
{
char linebuf[MAX_LINE_LEN];
char path[SYSFS_PATH_MAX];
struct stat statbuf;
if (strcmp("mc", smt_mc) && strcmp("smt", smt_mc))
return -EINVAL;
snprintf(path, sizeof(path),
PATH_TO_CPU "sched_%s_power_savings", smt_mc);
sprintf(linebuf, "%d", val);
if (stat(path, &statbuf) != 0)
return -ENODEV;
if (sysfs_write_file(path, linebuf, MAX_LINE_LEN) == 0)
return -1;
return 0;
}
| gpl-2.0 |
davidmueller13/kushan_kernel | net/wireless/wext-compat.c | 5290 | 39722 | /*
* cfg80211 - wext compat code
*
* This is temporary code until all wireless functionality is migrated
* into cfg80211, when that happens all the exports here go away and
* we directly assign the wireless handlers of wireless interfaces.
*
* Copyright 2008-2009 Johannes Berg <johannes@sipsolutions.net>
*/
#include <linux/export.h>
#include <linux/wireless.h>
#include <linux/nl80211.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <net/iw_handler.h>
#include <net/cfg80211.h>
#include <net/cfg80211-wext.h>
#include "wext-compat.h"
#include "core.h"
int cfg80211_wext_giwname(struct net_device *dev,
struct iw_request_info *info,
char *name, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct ieee80211_supported_band *sband;
bool is_ht = false, is_a = false, is_b = false, is_g = false;
if (!wdev)
return -EOPNOTSUPP;
sband = wdev->wiphy->bands[IEEE80211_BAND_5GHZ];
if (sband) {
is_a = true;
is_ht |= sband->ht_cap.ht_supported;
}
sband = wdev->wiphy->bands[IEEE80211_BAND_2GHZ];
if (sband) {
int i;
/* Check for mandatory rates */
for (i = 0; i < sband->n_bitrates; i++) {
if (sband->bitrates[i].bitrate == 10)
is_b = true;
if (sband->bitrates[i].bitrate == 60)
is_g = true;
}
is_ht |= sband->ht_cap.ht_supported;
}
strcpy(name, "IEEE 802.11");
if (is_a)
strcat(name, "a");
if (is_b)
strcat(name, "b");
if (is_g)
strcat(name, "g");
if (is_ht)
strcat(name, "n");
return 0;
}
EXPORT_SYMBOL_GPL(cfg80211_wext_giwname);
int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
u32 *mode, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev;
struct vif_params vifparams;
enum nl80211_iftype type;
int ret;
rdev = wiphy_to_dev(wdev->wiphy);
switch (*mode) {
case IW_MODE_INFRA:
type = NL80211_IFTYPE_STATION;
break;
case IW_MODE_ADHOC:
type = NL80211_IFTYPE_ADHOC;
break;
case IW_MODE_REPEAT:
type = NL80211_IFTYPE_WDS;
break;
case IW_MODE_MONITOR:
type = NL80211_IFTYPE_MONITOR;
break;
default:
return -EINVAL;
}
if (type == wdev->iftype)
return 0;
memset(&vifparams, 0, sizeof(vifparams));
cfg80211_lock_rdev(rdev);
ret = cfg80211_change_iface(rdev, dev, type, NULL, &vifparams);
cfg80211_unlock_rdev(rdev);
return ret;
}
EXPORT_SYMBOL_GPL(cfg80211_wext_siwmode);
int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
u32 *mode, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
if (!wdev)
return -EOPNOTSUPP;
switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
*mode = IW_MODE_MASTER;
break;
case NL80211_IFTYPE_STATION:
*mode = IW_MODE_INFRA;
break;
case NL80211_IFTYPE_ADHOC:
*mode = IW_MODE_ADHOC;
break;
case NL80211_IFTYPE_MONITOR:
*mode = IW_MODE_MONITOR;
break;
case NL80211_IFTYPE_WDS:
*mode = IW_MODE_REPEAT;
break;
case NL80211_IFTYPE_AP_VLAN:
*mode = IW_MODE_SECOND; /* FIXME */
break;
default:
*mode = IW_MODE_AUTO;
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(cfg80211_wext_giwmode);
int cfg80211_wext_giwrange(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct iw_range *range = (struct iw_range *) extra;
enum ieee80211_band band;
int i, c = 0;
if (!wdev)
return -EOPNOTSUPP;
data->length = sizeof(struct iw_range);
memset(range, 0, sizeof(struct iw_range));
range->we_version_compiled = WIRELESS_EXT;
range->we_version_source = 21;
range->retry_capa = IW_RETRY_LIMIT;
range->retry_flags = IW_RETRY_LIMIT;
range->min_retry = 0;
range->max_retry = 255;
range->min_rts = 0;
range->max_rts = 2347;
range->min_frag = 256;
range->max_frag = 2346;
range->max_encoding_tokens = 4;
range->max_qual.updated = IW_QUAL_NOISE_INVALID;
switch (wdev->wiphy->signal_type) {
case CFG80211_SIGNAL_TYPE_NONE:
break;
case CFG80211_SIGNAL_TYPE_MBM:
range->max_qual.level = -110;
range->max_qual.qual = 70;
range->avg_qual.qual = 35;
range->max_qual.updated |= IW_QUAL_DBM;
range->max_qual.updated |= IW_QUAL_QUAL_UPDATED;
range->max_qual.updated |= IW_QUAL_LEVEL_UPDATED;
break;
case CFG80211_SIGNAL_TYPE_UNSPEC:
range->max_qual.level = 100;
range->max_qual.qual = 100;
range->avg_qual.qual = 50;
range->max_qual.updated |= IW_QUAL_QUAL_UPDATED;
range->max_qual.updated |= IW_QUAL_LEVEL_UPDATED;
break;
}
range->avg_qual.level = range->max_qual.level / 2;
range->avg_qual.noise = range->max_qual.noise / 2;
range->avg_qual.updated = range->max_qual.updated;
for (i = 0; i < wdev->wiphy->n_cipher_suites; i++) {
switch (wdev->wiphy->cipher_suites[i]) {
case WLAN_CIPHER_SUITE_TKIP:
range->enc_capa |= (IW_ENC_CAPA_CIPHER_TKIP |
IW_ENC_CAPA_WPA);
break;
case WLAN_CIPHER_SUITE_CCMP:
range->enc_capa |= (IW_ENC_CAPA_CIPHER_CCMP |
IW_ENC_CAPA_WPA2);
break;
case WLAN_CIPHER_SUITE_WEP40:
range->encoding_size[range->num_encoding_sizes++] =
WLAN_KEY_LEN_WEP40;
break;
case WLAN_CIPHER_SUITE_WEP104:
range->encoding_size[range->num_encoding_sizes++] =
WLAN_KEY_LEN_WEP104;
break;
}
}
for (band = 0; band < IEEE80211_NUM_BANDS; band ++) {
struct ieee80211_supported_band *sband;
sband = wdev->wiphy->bands[band];
if (!sband)
continue;
for (i = 0; i < sband->n_channels && c < IW_MAX_FREQUENCIES; i++) {
struct ieee80211_channel *chan = &sband->channels[i];
if (!(chan->flags & IEEE80211_CHAN_DISABLED)) {
range->freq[c].i =
ieee80211_frequency_to_channel(
chan->center_freq);
range->freq[c].m = chan->center_freq;
range->freq[c].e = 6;
c++;
}
}
}
range->num_channels = c;
range->num_frequency = c;
IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
if (wdev->wiphy->max_scan_ssids > 0)
range->scan_capa |= IW_SCAN_CAPA_ESSID;
return 0;
}
EXPORT_SYMBOL_GPL(cfg80211_wext_giwrange);
/**
* cfg80211_wext_freq - get wext frequency for non-"auto"
* @wiphy: the wiphy
* @freq: the wext freq encoding
*
* Returns a frequency, or a negative error code, or 0 for auto.
*/
int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq)
{
/*
* Parse frequency - return 0 for auto and
* -EINVAL for impossible things.
*/
if (freq->e == 0) {
enum ieee80211_band band = IEEE80211_BAND_2GHZ;
if (freq->m < 0)
return 0;
if (freq->m > 14)
band = IEEE80211_BAND_5GHZ;
return ieee80211_channel_to_frequency(freq->m, band);
} else {
int i, div = 1000000;
for (i = 0; i < freq->e; i++)
div /= 10;
if (div <= 0)
return -EINVAL;
return freq->m / div;
}
}
int cfg80211_wext_siwrts(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rts, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
u32 orts = wdev->wiphy->rts_threshold;
int err;
if (rts->disabled || !rts->fixed)
wdev->wiphy->rts_threshold = (u32) -1;
else if (rts->value < 0)
return -EINVAL;
else
wdev->wiphy->rts_threshold = rts->value;
err = rdev->ops->set_wiphy_params(wdev->wiphy,
WIPHY_PARAM_RTS_THRESHOLD);
if (err)
wdev->wiphy->rts_threshold = orts;
return err;
}
EXPORT_SYMBOL_GPL(cfg80211_wext_siwrts);
int cfg80211_wext_giwrts(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rts, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
rts->value = wdev->wiphy->rts_threshold;
rts->disabled = rts->value == (u32) -1;
rts->fixed = 1;
return 0;
}
EXPORT_SYMBOL_GPL(cfg80211_wext_giwrts);
int cfg80211_wext_siwfrag(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *frag, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
u32 ofrag = wdev->wiphy->frag_threshold;
int err;
if (frag->disabled || !frag->fixed)
wdev->wiphy->frag_threshold = (u32) -1;
else if (frag->value < 256)
return -EINVAL;
else {
/* Fragment length must be even, so strip LSB. */
wdev->wiphy->frag_threshold = frag->value & ~0x1;
}
err = rdev->ops->set_wiphy_params(wdev->wiphy,
WIPHY_PARAM_FRAG_THRESHOLD);
if (err)
wdev->wiphy->frag_threshold = ofrag;
return err;
}
EXPORT_SYMBOL_GPL(cfg80211_wext_siwfrag);
int cfg80211_wext_giwfrag(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *frag, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
frag->value = wdev->wiphy->frag_threshold;
frag->disabled = frag->value == (u32) -1;
frag->fixed = 1;
return 0;
}
EXPORT_SYMBOL_GPL(cfg80211_wext_giwfrag);
static int cfg80211_wext_siwretry(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *retry, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
u32 changed = 0;
u8 olong = wdev->wiphy->retry_long;
u8 oshort = wdev->wiphy->retry_short;
int err;
if (retry->disabled ||
(retry->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT)
return -EINVAL;
if (retry->flags & IW_RETRY_LONG) {
wdev->wiphy->retry_long = retry->value;
changed |= WIPHY_PARAM_RETRY_LONG;
} else if (retry->flags & IW_RETRY_SHORT) {
wdev->wiphy->retry_short = retry->value;
changed |= WIPHY_PARAM_RETRY_SHORT;
} else {
wdev->wiphy->retry_short = retry->value;
wdev->wiphy->retry_long = retry->value;
changed |= WIPHY_PARAM_RETRY_LONG;
changed |= WIPHY_PARAM_RETRY_SHORT;
}
if (!changed)
return 0;
err = rdev->ops->set_wiphy_params(wdev->wiphy, changed);
if (err) {
wdev->wiphy->retry_short = oshort;
wdev->wiphy->retry_long = olong;
}
return err;
}
int cfg80211_wext_giwretry(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *retry, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
retry->disabled = 0;
if (retry->flags == 0 || (retry->flags & IW_RETRY_SHORT)) {
/*
* First return short value, iwconfig will ask long value
* later if needed
*/
retry->flags |= IW_RETRY_LIMIT;
retry->value = wdev->wiphy->retry_short;
if (wdev->wiphy->retry_long != wdev->wiphy->retry_short)
retry->flags |= IW_RETRY_LONG;
return 0;
}
if (retry->flags & IW_RETRY_LONG) {
retry->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
retry->value = wdev->wiphy->retry_long;
}
return 0;
}
EXPORT_SYMBOL_GPL(cfg80211_wext_giwretry);
static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
struct net_device *dev, bool pairwise,
const u8 *addr, bool remove, bool tx_key,
int idx, struct key_params *params)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err, i;
bool rejoin = false;
if (pairwise && !addr)
return -EINVAL;
if (!wdev->wext.keys) {
wdev->wext.keys = kzalloc(sizeof(*wdev->wext.keys),
GFP_KERNEL);
if (!wdev->wext.keys)
return -ENOMEM;
for (i = 0; i < 6; i++)
wdev->wext.keys->params[i].key =
wdev->wext.keys->data[i];
}
if (wdev->iftype != NL80211_IFTYPE_ADHOC &&
wdev->iftype != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
if (!wdev->current_bss)
return -ENOLINK;
if (!rdev->ops->set_default_mgmt_key)
return -EOPNOTSUPP;
if (idx < 4 || idx > 5)
return -EINVAL;
} else if (idx < 0 || idx > 3)
return -EINVAL;
if (remove) {
err = 0;
if (wdev->current_bss) {
/*
* If removing the current TX key, we will need to
* join a new IBSS without the privacy bit clear.
*/
if (idx == wdev->wext.default_key &&
wdev->iftype == NL80211_IFTYPE_ADHOC) {
__cfg80211_leave_ibss(rdev, wdev->netdev, true);
rejoin = true;
}
if (!pairwise && addr &&
!(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
err = -ENOENT;
else
err = rdev->ops->del_key(&rdev->wiphy, dev, idx,
pairwise, addr);
}
wdev->wext.connect.privacy = false;
/*
* Applications using wireless extensions expect to be
* able to delete keys that don't exist, so allow that.
*/
if (err == -ENOENT)
err = 0;
if (!err) {
if (!addr) {
wdev->wext.keys->params[idx].key_len = 0;
wdev->wext.keys->params[idx].cipher = 0;
}
if (idx == wdev->wext.default_key)
wdev->wext.default_key = -1;
else if (idx == wdev->wext.default_mgmt_key)
wdev->wext.default_mgmt_key = -1;
}
if (!err && rejoin)
err = cfg80211_ibss_wext_join(rdev, wdev);
return err;
}
if (addr)
tx_key = false;
if (cfg80211_validate_key_settings(rdev, params, idx, pairwise, addr))
return -EINVAL;
err = 0;
if (wdev->current_bss)
err = rdev->ops->add_key(&rdev->wiphy, dev, idx,
pairwise, addr, params);
if (err)
return err;
if (!addr) {
wdev->wext.keys->params[idx] = *params;
memcpy(wdev->wext.keys->data[idx],
params->key, params->key_len);
wdev->wext.keys->params[idx].key =
wdev->wext.keys->data[idx];
}
if ((params->cipher == WLAN_CIPHER_SUITE_WEP40 ||
params->cipher == WLAN_CIPHER_SUITE_WEP104) &&
(tx_key || (!addr && wdev->wext.default_key == -1))) {
if (wdev->current_bss) {
/*
* If we are getting a new TX key from not having
* had one before we need to join a new IBSS with
* the privacy bit set.
*/
if (wdev->iftype == NL80211_IFTYPE_ADHOC &&
wdev->wext.default_key == -1) {
__cfg80211_leave_ibss(rdev, wdev->netdev, true);
rejoin = true;
}
err = rdev->ops->set_default_key(&rdev->wiphy, dev,
idx, true, true);
}
if (!err) {
wdev->wext.default_key = idx;
if (rejoin)
err = cfg80211_ibss_wext_join(rdev, wdev);
}
return err;
}
if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC &&
(tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) {
if (wdev->current_bss)
err = rdev->ops->set_default_mgmt_key(&rdev->wiphy,
dev, idx);
if (!err)
wdev->wext.default_mgmt_key = idx;
return err;
}
return 0;
}
static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
struct net_device *dev, bool pairwise,
const u8 *addr, bool remove, bool tx_key,
int idx, struct key_params *params)
{
int err;
/* devlist mutex needed for possible IBSS re-join */
mutex_lock(&rdev->devlist_mtx);
wdev_lock(dev->ieee80211_ptr);
err = __cfg80211_set_encryption(rdev, dev, pairwise, addr,
remove, tx_key, idx, params);
wdev_unlock(dev->ieee80211_ptr);
mutex_unlock(&rdev->devlist_mtx);
return err;
}
static int cfg80211_wext_siwencode(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *erq, char *keybuf)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
int idx, err;
bool remove = false;
struct key_params params;
if (wdev->iftype != NL80211_IFTYPE_STATION &&
wdev->iftype != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
/* no use -- only MFP (set_default_mgmt_key) is optional */
if (!rdev->ops->del_key ||
!rdev->ops->add_key ||
!rdev->ops->set_default_key)
return -EOPNOTSUPP;
idx = erq->flags & IW_ENCODE_INDEX;
if (idx == 0) {
idx = wdev->wext.default_key;
if (idx < 0)
idx = 0;
} else if (idx < 1 || idx > 4)
return -EINVAL;
else
idx--;
if (erq->flags & IW_ENCODE_DISABLED)
remove = true;
else if (erq->length == 0) {
/* No key data - just set the default TX key index */
err = 0;
wdev_lock(wdev);
if (wdev->current_bss)
err = rdev->ops->set_default_key(&rdev->wiphy, dev,
idx, true, true);
if (!err)
wdev->wext.default_key = idx;
wdev_unlock(wdev);
return err;
}
memset(¶ms, 0, sizeof(params));
params.key = keybuf;
params.key_len = erq->length;
if (erq->length == 5)
params.cipher = WLAN_CIPHER_SUITE_WEP40;
else if (erq->length == 13)
params.cipher = WLAN_CIPHER_SUITE_WEP104;
else if (!remove)
return -EINVAL;
return cfg80211_set_encryption(rdev, dev, false, NULL, remove,
wdev->wext.default_key == -1,
idx, ¶ms);
}
static int cfg80211_wext_siwencodeext(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *erq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
const u8 *addr;
int idx;
bool remove = false;
struct key_params params;
u32 cipher;
if (wdev->iftype != NL80211_IFTYPE_STATION &&
wdev->iftype != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
/* no use -- only MFP (set_default_mgmt_key) is optional */
if (!rdev->ops->del_key ||
!rdev->ops->add_key ||
!rdev->ops->set_default_key)
return -EOPNOTSUPP;
switch (ext->alg) {
case IW_ENCODE_ALG_NONE:
remove = true;
cipher = 0;
break;
case IW_ENCODE_ALG_WEP:
if (ext->key_len == 5)
cipher = WLAN_CIPHER_SUITE_WEP40;
else if (ext->key_len == 13)
cipher = WLAN_CIPHER_SUITE_WEP104;
else
return -EINVAL;
break;
case IW_ENCODE_ALG_TKIP:
cipher = WLAN_CIPHER_SUITE_TKIP;
break;
case IW_ENCODE_ALG_CCMP:
cipher = WLAN_CIPHER_SUITE_CCMP;
break;
case IW_ENCODE_ALG_AES_CMAC:
cipher = WLAN_CIPHER_SUITE_AES_CMAC;
break;
default:
return -EOPNOTSUPP;
}
if (erq->flags & IW_ENCODE_DISABLED)
remove = true;
idx = erq->flags & IW_ENCODE_INDEX;
if (cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
if (idx < 4 || idx > 5) {
idx = wdev->wext.default_mgmt_key;
if (idx < 0)
return -EINVAL;
} else
idx--;
} else {
if (idx < 1 || idx > 4) {
idx = wdev->wext.default_key;
if (idx < 0)
return -EINVAL;
} else
idx--;
}
addr = ext->addr.sa_data;
if (is_broadcast_ether_addr(addr))
addr = NULL;
memset(¶ms, 0, sizeof(params));
params.key = ext->key;
params.key_len = ext->key_len;
params.cipher = cipher;
if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
params.seq = ext->rx_seq;
params.seq_len = 6;
}
return cfg80211_set_encryption(
rdev, dev,
!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY),
addr, remove,
ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
idx, ¶ms);
}
static int cfg80211_wext_giwencode(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *erq, char *keybuf)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int idx;
if (wdev->iftype != NL80211_IFTYPE_STATION &&
wdev->iftype != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
idx = erq->flags & IW_ENCODE_INDEX;
if (idx == 0) {
idx = wdev->wext.default_key;
if (idx < 0)
idx = 0;
} else if (idx < 1 || idx > 4)
return -EINVAL;
else
idx--;
erq->flags = idx + 1;
if (!wdev->wext.keys || !wdev->wext.keys->params[idx].cipher) {
erq->flags |= IW_ENCODE_DISABLED;
erq->length = 0;
return 0;
}
erq->length = min_t(size_t, erq->length,
wdev->wext.keys->params[idx].key_len);
memcpy(keybuf, wdev->wext.keys->params[idx].key, erq->length);
erq->flags |= IW_ENCODE_ENABLED;
return 0;
}
static int cfg80211_wext_siwfreq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *wextfreq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
int freq, err;
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
return cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra);
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MESH_POINT:
freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
if (freq < 0)
return freq;
if (freq == 0)
return -EINVAL;
mutex_lock(&rdev->devlist_mtx);
wdev_lock(wdev);
err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
wdev_unlock(wdev);
mutex_unlock(&rdev->devlist_mtx);
return err;
default:
return -EOPNOTSUPP;
}
}
static int cfg80211_wext_giwfreq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *freq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
struct ieee80211_channel *chan;
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
return cfg80211_mgd_wext_giwfreq(dev, info, freq, extra);
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra);
case NL80211_IFTYPE_MONITOR:
if (!rdev->ops->get_channel)
return -EINVAL;
chan = rdev->ops->get_channel(wdev->wiphy);
if (!chan)
return -EINVAL;
freq->m = chan->center_freq;
freq->e = 6;
return 0;
default:
if (!wdev->channel)
return -EINVAL;
freq->m = wdev->channel->center_freq;
freq->e = 6;
return 0;
}
}
static int cfg80211_wext_siwtxpower(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *data, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
enum nl80211_tx_power_setting type;
int dbm = 0;
if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
return -EINVAL;
if (data->txpower.flags & IW_TXPOW_RANGE)
return -EINVAL;
if (!rdev->ops->set_tx_power)
return -EOPNOTSUPP;
/* only change when not disabling */
if (!data->txpower.disabled) {
rfkill_set_sw_state(rdev->rfkill, false);
if (data->txpower.fixed) {
/*
* wext doesn't support negative values, see
* below where it's for automatic
*/
if (data->txpower.value < 0)
return -EINVAL;
dbm = data->txpower.value;
type = NL80211_TX_POWER_FIXED;
/* TODO: do regulatory check! */
} else {
/*
* Automatic power level setting, max being the value
* passed in from userland.
*/
if (data->txpower.value < 0) {
type = NL80211_TX_POWER_AUTOMATIC;
} else {
dbm = data->txpower.value;
type = NL80211_TX_POWER_LIMITED;
}
}
} else {
rfkill_set_sw_state(rdev->rfkill, true);
schedule_work(&rdev->rfkill_sync);
return 0;
}
return rdev->ops->set_tx_power(wdev->wiphy, type, DBM_TO_MBM(dbm));
}
static int cfg80211_wext_giwtxpower(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *data, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
int err, val;
if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
return -EINVAL;
if (data->txpower.flags & IW_TXPOW_RANGE)
return -EINVAL;
if (!rdev->ops->get_tx_power)
return -EOPNOTSUPP;
err = rdev->ops->get_tx_power(wdev->wiphy, &val);
if (err)
return err;
/* well... oh well */
data->txpower.fixed = 1;
data->txpower.disabled = rfkill_blocked(rdev->rfkill);
data->txpower.value = val;
data->txpower.flags = IW_TXPOW_DBM;
return 0;
}
static int cfg80211_set_auth_alg(struct wireless_dev *wdev,
s32 auth_alg)
{
int nr_alg = 0;
if (!auth_alg)
return -EINVAL;
if (auth_alg & ~(IW_AUTH_ALG_OPEN_SYSTEM |
IW_AUTH_ALG_SHARED_KEY |
IW_AUTH_ALG_LEAP))
return -EINVAL;
if (auth_alg & IW_AUTH_ALG_OPEN_SYSTEM) {
nr_alg++;
wdev->wext.connect.auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM;
}
if (auth_alg & IW_AUTH_ALG_SHARED_KEY) {
nr_alg++;
wdev->wext.connect.auth_type = NL80211_AUTHTYPE_SHARED_KEY;
}
if (auth_alg & IW_AUTH_ALG_LEAP) {
nr_alg++;
wdev->wext.connect.auth_type = NL80211_AUTHTYPE_NETWORK_EAP;
}
if (nr_alg > 1)
wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
return 0;
}
static int cfg80211_set_wpa_version(struct wireless_dev *wdev, u32 wpa_versions)
{
if (wpa_versions & ~(IW_AUTH_WPA_VERSION_WPA |
IW_AUTH_WPA_VERSION_WPA2|
IW_AUTH_WPA_VERSION_DISABLED))
return -EINVAL;
if ((wpa_versions & IW_AUTH_WPA_VERSION_DISABLED) &&
(wpa_versions & (IW_AUTH_WPA_VERSION_WPA|
IW_AUTH_WPA_VERSION_WPA2)))
return -EINVAL;
if (wpa_versions & IW_AUTH_WPA_VERSION_DISABLED)
wdev->wext.connect.crypto.wpa_versions &=
~(NL80211_WPA_VERSION_1|NL80211_WPA_VERSION_2);
if (wpa_versions & IW_AUTH_WPA_VERSION_WPA)
wdev->wext.connect.crypto.wpa_versions |=
NL80211_WPA_VERSION_1;
if (wpa_versions & IW_AUTH_WPA_VERSION_WPA2)
wdev->wext.connect.crypto.wpa_versions |=
NL80211_WPA_VERSION_2;
return 0;
}
static int cfg80211_set_cipher_group(struct wireless_dev *wdev, u32 cipher)
{
if (cipher & IW_AUTH_CIPHER_WEP40)
wdev->wext.connect.crypto.cipher_group =
WLAN_CIPHER_SUITE_WEP40;
else if (cipher & IW_AUTH_CIPHER_WEP104)
wdev->wext.connect.crypto.cipher_group =
WLAN_CIPHER_SUITE_WEP104;
else if (cipher & IW_AUTH_CIPHER_TKIP)
wdev->wext.connect.crypto.cipher_group =
WLAN_CIPHER_SUITE_TKIP;
else if (cipher & IW_AUTH_CIPHER_CCMP)
wdev->wext.connect.crypto.cipher_group =
WLAN_CIPHER_SUITE_CCMP;
else if (cipher & IW_AUTH_CIPHER_AES_CMAC)
wdev->wext.connect.crypto.cipher_group =
WLAN_CIPHER_SUITE_AES_CMAC;
else if (cipher & IW_AUTH_CIPHER_NONE)
wdev->wext.connect.crypto.cipher_group = 0;
else
return -EINVAL;
return 0;
}
static int cfg80211_set_cipher_pairwise(struct wireless_dev *wdev, u32 cipher)
{
int nr_ciphers = 0;
u32 *ciphers_pairwise = wdev->wext.connect.crypto.ciphers_pairwise;
if (cipher & IW_AUTH_CIPHER_WEP40) {
ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_WEP40;
nr_ciphers++;
}
if (cipher & IW_AUTH_CIPHER_WEP104) {
ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_WEP104;
nr_ciphers++;
}
if (cipher & IW_AUTH_CIPHER_TKIP) {
ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_TKIP;
nr_ciphers++;
}
if (cipher & IW_AUTH_CIPHER_CCMP) {
ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_CCMP;
nr_ciphers++;
}
if (cipher & IW_AUTH_CIPHER_AES_CMAC) {
ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_AES_CMAC;
nr_ciphers++;
}
BUILD_BUG_ON(NL80211_MAX_NR_CIPHER_SUITES < 5);
wdev->wext.connect.crypto.n_ciphers_pairwise = nr_ciphers;
return 0;
}
static int cfg80211_set_key_mgt(struct wireless_dev *wdev, u32 key_mgt)
{
int nr_akm_suites = 0;
if (key_mgt & ~(IW_AUTH_KEY_MGMT_802_1X |
IW_AUTH_KEY_MGMT_PSK))
return -EINVAL;
if (key_mgt & IW_AUTH_KEY_MGMT_802_1X) {
wdev->wext.connect.crypto.akm_suites[nr_akm_suites] =
WLAN_AKM_SUITE_8021X;
nr_akm_suites++;
}
if (key_mgt & IW_AUTH_KEY_MGMT_PSK) {
wdev->wext.connect.crypto.akm_suites[nr_akm_suites] =
WLAN_AKM_SUITE_PSK;
nr_akm_suites++;
}
wdev->wext.connect.crypto.n_akm_suites = nr_akm_suites;
return 0;
}
static int cfg80211_wext_siwauth(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *data, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
if (wdev->iftype != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
switch (data->flags & IW_AUTH_INDEX) {
case IW_AUTH_PRIVACY_INVOKED:
wdev->wext.connect.privacy = data->value;
return 0;
case IW_AUTH_WPA_VERSION:
return cfg80211_set_wpa_version(wdev, data->value);
case IW_AUTH_CIPHER_GROUP:
return cfg80211_set_cipher_group(wdev, data->value);
case IW_AUTH_KEY_MGMT:
return cfg80211_set_key_mgt(wdev, data->value);
case IW_AUTH_CIPHER_PAIRWISE:
return cfg80211_set_cipher_pairwise(wdev, data->value);
case IW_AUTH_80211_AUTH_ALG:
return cfg80211_set_auth_alg(wdev, data->value);
case IW_AUTH_WPA_ENABLED:
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
case IW_AUTH_DROP_UNENCRYPTED:
case IW_AUTH_MFP:
return 0;
default:
return -EOPNOTSUPP;
}
}
static int cfg80211_wext_giwauth(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *data, char *extra)
{
/* XXX: what do we need? */
return -EOPNOTSUPP;
}
static int cfg80211_wext_siwpower(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
bool ps = wdev->ps;
int timeout = wdev->ps_timeout;
int err;
if (wdev->iftype != NL80211_IFTYPE_STATION)
return -EINVAL;
if (!rdev->ops->set_power_mgmt)
return -EOPNOTSUPP;
if (wrq->disabled) {
ps = false;
} else {
switch (wrq->flags & IW_POWER_MODE) {
case IW_POWER_ON: /* If not specified */
case IW_POWER_MODE: /* If set all mask */
case IW_POWER_ALL_R: /* If explicitely state all */
ps = true;
break;
default: /* Otherwise we ignore */
return -EINVAL;
}
if (wrq->flags & ~(IW_POWER_MODE | IW_POWER_TIMEOUT))
return -EINVAL;
if (wrq->flags & IW_POWER_TIMEOUT)
timeout = wrq->value / 1000;
}
err = rdev->ops->set_power_mgmt(wdev->wiphy, dev, ps, timeout);
if (err)
return err;
wdev->ps = ps;
wdev->ps_timeout = timeout;
return 0;
}
static int cfg80211_wext_giwpower(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
wrq->disabled = !wdev->ps;
return 0;
}
static int cfg80211_wds_wext_siwap(struct net_device *dev,
struct iw_request_info *info,
struct sockaddr *addr, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
int err;
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS))
return -EINVAL;
if (addr->sa_family != ARPHRD_ETHER)
return -EINVAL;
if (netif_running(dev))
return -EBUSY;
if (!rdev->ops->set_wds_peer)
return -EOPNOTSUPP;
err = rdev->ops->set_wds_peer(wdev->wiphy, dev, (u8 *) &addr->sa_data);
if (err)
return err;
memcpy(&wdev->wext.bssid, (u8 *) &addr->sa_data, ETH_ALEN);
return 0;
}
static int cfg80211_wds_wext_giwap(struct net_device *dev,
struct iw_request_info *info,
struct sockaddr *addr, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS))
return -EINVAL;
addr->sa_family = ARPHRD_ETHER;
memcpy(&addr->sa_data, wdev->wext.bssid, ETH_ALEN);
return 0;
}
static int cfg80211_wext_siwrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rate, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
struct cfg80211_bitrate_mask mask;
u32 fixed, maxrate;
struct ieee80211_supported_band *sband;
int band, ridx;
bool match = false;
if (!rdev->ops->set_bitrate_mask)
return -EOPNOTSUPP;
memset(&mask, 0, sizeof(mask));
fixed = 0;
maxrate = (u32)-1;
if (rate->value < 0) {
/* nothing */
} else if (rate->fixed) {
fixed = rate->value / 100000;
} else {
maxrate = rate->value / 100000;
}
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
sband = wdev->wiphy->bands[band];
if (sband == NULL)
continue;
for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
struct ieee80211_rate *srate = &sband->bitrates[ridx];
if (fixed == srate->bitrate) {
mask.control[band].legacy = 1 << ridx;
match = true;
break;
}
if (srate->bitrate <= maxrate) {
mask.control[band].legacy |= 1 << ridx;
match = true;
}
}
}
if (!match)
return -EINVAL;
return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask);
}
static int cfg80211_wext_giwrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rate, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
/* we are under RTNL - globally locked - so can use a static struct */
static struct station_info sinfo;
u8 addr[ETH_ALEN];
int err;
if (wdev->iftype != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
if (!rdev->ops->get_station)
return -EOPNOTSUPP;
err = 0;
wdev_lock(wdev);
if (wdev->current_bss)
memcpy(addr, wdev->current_bss->pub.bssid, ETH_ALEN);
else
err = -EOPNOTSUPP;
wdev_unlock(wdev);
if (err)
return err;
err = rdev->ops->get_station(&rdev->wiphy, dev, addr, &sinfo);
if (err)
return err;
if (!(sinfo.filled & STATION_INFO_TX_BITRATE))
return -EOPNOTSUPP;
rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
return 0;
}
/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */
static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
/* we are under RTNL - globally locked - so can use static structs */
static struct iw_statistics wstats;
static struct station_info sinfo;
u8 bssid[ETH_ALEN];
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
return NULL;
if (!rdev->ops->get_station)
return NULL;
/* Grab BSSID of current BSS, if any */
wdev_lock(wdev);
if (!wdev->current_bss) {
wdev_unlock(wdev);
return NULL;
}
memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
wdev_unlock(wdev);
if (rdev->ops->get_station(&rdev->wiphy, dev, bssid, &sinfo))
return NULL;
memset(&wstats, 0, sizeof(wstats));
switch (rdev->wiphy.signal_type) {
case CFG80211_SIGNAL_TYPE_MBM:
if (sinfo.filled & STATION_INFO_SIGNAL) {
int sig = sinfo.signal;
wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
wstats.qual.updated |= IW_QUAL_QUAL_UPDATED;
wstats.qual.updated |= IW_QUAL_DBM;
wstats.qual.level = sig;
if (sig < -110)
sig = -110;
else if (sig > -40)
sig = -40;
wstats.qual.qual = sig + 110;
break;
}
case CFG80211_SIGNAL_TYPE_UNSPEC:
if (sinfo.filled & STATION_INFO_SIGNAL) {
wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
wstats.qual.updated |= IW_QUAL_QUAL_UPDATED;
wstats.qual.level = sinfo.signal;
wstats.qual.qual = sinfo.signal;
break;
}
default:
wstats.qual.updated |= IW_QUAL_LEVEL_INVALID;
wstats.qual.updated |= IW_QUAL_QUAL_INVALID;
}
wstats.qual.updated |= IW_QUAL_NOISE_INVALID;
if (sinfo.filled & STATION_INFO_RX_DROP_MISC)
wstats.discard.misc = sinfo.rx_dropped_misc;
if (sinfo.filled & STATION_INFO_TX_FAILED)
wstats.discard.retries = sinfo.tx_failed;
return &wstats;
}
static int cfg80211_wext_siwap(struct net_device *dev,
struct iw_request_info *info,
struct sockaddr *ap_addr, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra);
case NL80211_IFTYPE_STATION:
return cfg80211_mgd_wext_siwap(dev, info, ap_addr, extra);
case NL80211_IFTYPE_WDS:
return cfg80211_wds_wext_siwap(dev, info, ap_addr, extra);
default:
return -EOPNOTSUPP;
}
}
static int cfg80211_wext_giwap(struct net_device *dev,
struct iw_request_info *info,
struct sockaddr *ap_addr, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra);
case NL80211_IFTYPE_STATION:
return cfg80211_mgd_wext_giwap(dev, info, ap_addr, extra);
case NL80211_IFTYPE_WDS:
return cfg80211_wds_wext_giwap(dev, info, ap_addr, extra);
default:
return -EOPNOTSUPP;
}
}
static int cfg80211_wext_siwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *ssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_siwessid(dev, info, data, ssid);
case NL80211_IFTYPE_STATION:
return cfg80211_mgd_wext_siwessid(dev, info, data, ssid);
default:
return -EOPNOTSUPP;
}
}
static int cfg80211_wext_giwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *ssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
data->flags = 0;
data->length = 0;
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
case NL80211_IFTYPE_STATION:
return cfg80211_mgd_wext_giwessid(dev, info, data, ssid);
default:
return -EOPNOTSUPP;
}
}
static int cfg80211_wext_siwpmksa(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
struct cfg80211_pmksa cfg_pmksa;
struct iw_pmksa *pmksa = (struct iw_pmksa *)extra;
memset(&cfg_pmksa, 0, sizeof(struct cfg80211_pmksa));
if (wdev->iftype != NL80211_IFTYPE_STATION)
return -EINVAL;
cfg_pmksa.bssid = pmksa->bssid.sa_data;
cfg_pmksa.pmkid = pmksa->pmkid;
switch (pmksa->cmd) {
case IW_PMKSA_ADD:
if (!rdev->ops->set_pmksa)
return -EOPNOTSUPP;
return rdev->ops->set_pmksa(&rdev->wiphy, dev, &cfg_pmksa);
case IW_PMKSA_REMOVE:
if (!rdev->ops->del_pmksa)
return -EOPNOTSUPP;
return rdev->ops->del_pmksa(&rdev->wiphy, dev, &cfg_pmksa);
case IW_PMKSA_FLUSH:
if (!rdev->ops->flush_pmksa)
return -EOPNOTSUPP;
return rdev->ops->flush_pmksa(&rdev->wiphy, dev);
default:
return -EOPNOTSUPP;
}
}
static const iw_handler cfg80211_handlers[] = {
[IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname,
[IW_IOCTL_IDX(SIOCSIWFREQ)] = (iw_handler) cfg80211_wext_siwfreq,
[IW_IOCTL_IDX(SIOCGIWFREQ)] = (iw_handler) cfg80211_wext_giwfreq,
[IW_IOCTL_IDX(SIOCSIWMODE)] = (iw_handler) cfg80211_wext_siwmode,
[IW_IOCTL_IDX(SIOCGIWMODE)] = (iw_handler) cfg80211_wext_giwmode,
[IW_IOCTL_IDX(SIOCGIWRANGE)] = (iw_handler) cfg80211_wext_giwrange,
[IW_IOCTL_IDX(SIOCSIWAP)] = (iw_handler) cfg80211_wext_siwap,
[IW_IOCTL_IDX(SIOCGIWAP)] = (iw_handler) cfg80211_wext_giwap,
[IW_IOCTL_IDX(SIOCSIWMLME)] = (iw_handler) cfg80211_wext_siwmlme,
[IW_IOCTL_IDX(SIOCSIWSCAN)] = (iw_handler) cfg80211_wext_siwscan,
[IW_IOCTL_IDX(SIOCGIWSCAN)] = (iw_handler) cfg80211_wext_giwscan,
[IW_IOCTL_IDX(SIOCSIWESSID)] = (iw_handler) cfg80211_wext_siwessid,
[IW_IOCTL_IDX(SIOCGIWESSID)] = (iw_handler) cfg80211_wext_giwessid,
[IW_IOCTL_IDX(SIOCSIWRATE)] = (iw_handler) cfg80211_wext_siwrate,
[IW_IOCTL_IDX(SIOCGIWRATE)] = (iw_handler) cfg80211_wext_giwrate,
[IW_IOCTL_IDX(SIOCSIWRTS)] = (iw_handler) cfg80211_wext_siwrts,
[IW_IOCTL_IDX(SIOCGIWRTS)] = (iw_handler) cfg80211_wext_giwrts,
[IW_IOCTL_IDX(SIOCSIWFRAG)] = (iw_handler) cfg80211_wext_siwfrag,
[IW_IOCTL_IDX(SIOCGIWFRAG)] = (iw_handler) cfg80211_wext_giwfrag,
[IW_IOCTL_IDX(SIOCSIWTXPOW)] = (iw_handler) cfg80211_wext_siwtxpower,
[IW_IOCTL_IDX(SIOCGIWTXPOW)] = (iw_handler) cfg80211_wext_giwtxpower,
[IW_IOCTL_IDX(SIOCSIWRETRY)] = (iw_handler) cfg80211_wext_siwretry,
[IW_IOCTL_IDX(SIOCGIWRETRY)] = (iw_handler) cfg80211_wext_giwretry,
[IW_IOCTL_IDX(SIOCSIWENCODE)] = (iw_handler) cfg80211_wext_siwencode,
[IW_IOCTL_IDX(SIOCGIWENCODE)] = (iw_handler) cfg80211_wext_giwencode,
[IW_IOCTL_IDX(SIOCSIWPOWER)] = (iw_handler) cfg80211_wext_siwpower,
[IW_IOCTL_IDX(SIOCGIWPOWER)] = (iw_handler) cfg80211_wext_giwpower,
[IW_IOCTL_IDX(SIOCSIWGENIE)] = (iw_handler) cfg80211_wext_siwgenie,
[IW_IOCTL_IDX(SIOCSIWAUTH)] = (iw_handler) cfg80211_wext_siwauth,
[IW_IOCTL_IDX(SIOCGIWAUTH)] = (iw_handler) cfg80211_wext_giwauth,
[IW_IOCTL_IDX(SIOCSIWENCODEEXT)]= (iw_handler) cfg80211_wext_siwencodeext,
[IW_IOCTL_IDX(SIOCSIWPMKSA)] = (iw_handler) cfg80211_wext_siwpmksa,
};
const struct iw_handler_def cfg80211_wext_handler = {
.num_standard = ARRAY_SIZE(cfg80211_handlers),
.standard = cfg80211_handlers,
.get_wireless_stats = cfg80211_wireless_stats,
};
| gpl-2.0 |
Zuli/kernel_sony_lt28 | drivers/media/common/tuners/tda8290.c | 8106 | 24382 | /*
i2c tv tuner chip device driver
controls the philips tda8290+75 tuner chip combo.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
This "tda8290" module was split apart from the original "tuner" module.
*/
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/videodev2.h>
#include "tuner-i2c.h"
#include "tda8290.h"
#include "tda827x.h"
#include "tda18271.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable verbose debug messages");
static int deemphasis_50;
module_param(deemphasis_50, int, 0644);
MODULE_PARM_DESC(deemphasis_50, "0 - 75us deemphasis; 1 - 50us deemphasis");
/* ---------------------------------------------------------------------- */
struct tda8290_priv {
struct tuner_i2c_props i2c_props;
unsigned char tda8290_easy_mode;
unsigned char tda827x_addr;
unsigned char ver;
#define TDA8290 1
#define TDA8295 2
#define TDA8275 4
#define TDA8275A 8
#define TDA18271 16
struct tda827x_config cfg;
};
/*---------------------------------------------------------------------*/
static int tda8290_i2c_bridge(struct dvb_frontend *fe, int close)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char enable[2] = { 0x21, 0xC0 };
unsigned char disable[2] = { 0x21, 0x00 };
unsigned char *msg;
if (close) {
msg = enable;
tuner_i2c_xfer_send(&priv->i2c_props, msg, 2);
/* let the bridge stabilize */
msleep(20);
} else {
msg = disable;
tuner_i2c_xfer_send(&priv->i2c_props, msg, 2);
}
return 0;
}
static int tda8295_i2c_bridge(struct dvb_frontend *fe, int close)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char enable[2] = { 0x45, 0xc1 };
unsigned char disable[2] = { 0x46, 0x00 };
unsigned char buf[3] = { 0x45, 0x01, 0x00 };
unsigned char *msg;
if (close) {
msg = enable;
tuner_i2c_xfer_send(&priv->i2c_props, msg, 2);
/* let the bridge stabilize */
msleep(20);
} else {
msg = disable;
tuner_i2c_xfer_send_recv(&priv->i2c_props, msg, 1, &msg[1], 1);
buf[2] = msg[1];
buf[2] &= ~0x04;
tuner_i2c_xfer_send(&priv->i2c_props, buf, 3);
msleep(5);
msg[1] |= 0x04;
tuner_i2c_xfer_send(&priv->i2c_props, msg, 2);
}
return 0;
}
/*---------------------------------------------------------------------*/
static void set_audio(struct dvb_frontend *fe,
struct analog_parameters *params)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
char* mode;
if (params->std & V4L2_STD_MN) {
priv->tda8290_easy_mode = 0x01;
mode = "MN";
} else if (params->std & V4L2_STD_B) {
priv->tda8290_easy_mode = 0x02;
mode = "B";
} else if (params->std & V4L2_STD_GH) {
priv->tda8290_easy_mode = 0x04;
mode = "GH";
} else if (params->std & V4L2_STD_PAL_I) {
priv->tda8290_easy_mode = 0x08;
mode = "I";
} else if (params->std & V4L2_STD_DK) {
priv->tda8290_easy_mode = 0x10;
mode = "DK";
} else if (params->std & V4L2_STD_SECAM_L) {
priv->tda8290_easy_mode = 0x20;
mode = "L";
} else if (params->std & V4L2_STD_SECAM_LC) {
priv->tda8290_easy_mode = 0x40;
mode = "LC";
} else {
priv->tda8290_easy_mode = 0x10;
mode = "xx";
}
if (params->mode == V4L2_TUNER_RADIO) {
/* Set TDA8295 to FM radio; Start TDA8290 with MN values */
priv->tda8290_easy_mode = (priv->ver & TDA8295) ? 0x80 : 0x01;
tuner_dbg("setting to radio FM\n");
} else {
tuner_dbg("setting tda829x to system %s\n", mode);
}
}
static struct {
unsigned char seq[2];
} fm_mode[] = {
{ { 0x01, 0x81} }, /* Put device into expert mode */
{ { 0x03, 0x48} }, /* Disable NOTCH and VIDEO filters */
{ { 0x04, 0x04} }, /* Disable color carrier filter (SSIF) */
{ { 0x05, 0x04} }, /* ADC headroom */
{ { 0x06, 0x10} }, /* group delay flat */
{ { 0x07, 0x00} }, /* use the same radio DTO values as a tda8295 */
{ { 0x08, 0x00} },
{ { 0x09, 0x80} },
{ { 0x0a, 0xda} },
{ { 0x0b, 0x4b} },
{ { 0x0c, 0x68} },
{ { 0x0d, 0x00} }, /* PLL off, no video carrier detect */
{ { 0x14, 0x00} }, /* disable auto mute if no video */
};
static void tda8290_set_params(struct dvb_frontend *fe,
struct analog_parameters *params)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char soft_reset[] = { 0x00, 0x00 };
unsigned char easy_mode[] = { 0x01, priv->tda8290_easy_mode };
unsigned char expert_mode[] = { 0x01, 0x80 };
unsigned char agc_out_on[] = { 0x02, 0x00 };
unsigned char gainset_off[] = { 0x28, 0x14 };
unsigned char if_agc_spd[] = { 0x0f, 0x88 };
unsigned char adc_head_6[] = { 0x05, 0x04 };
unsigned char adc_head_9[] = { 0x05, 0x02 };
unsigned char adc_head_12[] = { 0x05, 0x01 };
unsigned char pll_bw_nom[] = { 0x0d, 0x47 };
unsigned char pll_bw_low[] = { 0x0d, 0x27 };
unsigned char gainset_2[] = { 0x28, 0x64 };
unsigned char agc_rst_on[] = { 0x0e, 0x0b };
unsigned char agc_rst_off[] = { 0x0e, 0x09 };
unsigned char if_agc_set[] = { 0x0f, 0x81 };
unsigned char addr_adc_sat = 0x1a;
unsigned char addr_agc_stat = 0x1d;
unsigned char addr_pll_stat = 0x1b;
unsigned char adc_sat, agc_stat,
pll_stat;
int i;
set_audio(fe, params);
if (priv->cfg.config)
tuner_dbg("tda827xa config is 0x%02x\n", priv->cfg.config);
tuner_i2c_xfer_send(&priv->i2c_props, easy_mode, 2);
tuner_i2c_xfer_send(&priv->i2c_props, agc_out_on, 2);
tuner_i2c_xfer_send(&priv->i2c_props, soft_reset, 2);
msleep(1);
if (params->mode == V4L2_TUNER_RADIO) {
unsigned char deemphasis[] = { 0x13, 1 };
/* FIXME: allow using a different deemphasis */
if (deemphasis_50)
deemphasis[1] = 2;
for (i = 0; i < ARRAY_SIZE(fm_mode); i++)
tuner_i2c_xfer_send(&priv->i2c_props, fm_mode[i].seq, 2);
tuner_i2c_xfer_send(&priv->i2c_props, deemphasis, 2);
} else {
expert_mode[1] = priv->tda8290_easy_mode + 0x80;
tuner_i2c_xfer_send(&priv->i2c_props, expert_mode, 2);
tuner_i2c_xfer_send(&priv->i2c_props, gainset_off, 2);
tuner_i2c_xfer_send(&priv->i2c_props, if_agc_spd, 2);
if (priv->tda8290_easy_mode & 0x60)
tuner_i2c_xfer_send(&priv->i2c_props, adc_head_9, 2);
else
tuner_i2c_xfer_send(&priv->i2c_props, adc_head_6, 2);
tuner_i2c_xfer_send(&priv->i2c_props, pll_bw_nom, 2);
}
tda8290_i2c_bridge(fe, 1);
if (fe->ops.tuner_ops.set_analog_params)
fe->ops.tuner_ops.set_analog_params(fe, params);
for (i = 0; i < 3; i++) {
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_pll_stat, 1, &pll_stat, 1);
if (pll_stat & 0x80) {
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_adc_sat, 1,
&adc_sat, 1);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_agc_stat, 1,
&agc_stat, 1);
tuner_dbg("tda8290 is locked, AGC: %d\n", agc_stat);
break;
} else {
tuner_dbg("tda8290 not locked, no signal?\n");
msleep(100);
}
}
/* adjust headroom resp. gain */
if ((agc_stat > 115) || (!(pll_stat & 0x80) && (adc_sat < 20))) {
tuner_dbg("adjust gain, step 1. Agc: %d, ADC stat: %d, lock: %d\n",
agc_stat, adc_sat, pll_stat & 0x80);
tuner_i2c_xfer_send(&priv->i2c_props, gainset_2, 2);
msleep(100);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_agc_stat, 1, &agc_stat, 1);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_pll_stat, 1, &pll_stat, 1);
if ((agc_stat > 115) || !(pll_stat & 0x80)) {
tuner_dbg("adjust gain, step 2. Agc: %d, lock: %d\n",
agc_stat, pll_stat & 0x80);
if (priv->cfg.agcf)
priv->cfg.agcf(fe);
msleep(100);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_agc_stat, 1,
&agc_stat, 1);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_pll_stat, 1,
&pll_stat, 1);
if((agc_stat > 115) || !(pll_stat & 0x80)) {
tuner_dbg("adjust gain, step 3. Agc: %d\n", agc_stat);
tuner_i2c_xfer_send(&priv->i2c_props, adc_head_12, 2);
tuner_i2c_xfer_send(&priv->i2c_props, pll_bw_low, 2);
msleep(100);
}
}
}
/* l/ l' deadlock? */
if(priv->tda8290_easy_mode & 0x60) {
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_adc_sat, 1,
&adc_sat, 1);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_pll_stat, 1,
&pll_stat, 1);
if ((adc_sat > 20) || !(pll_stat & 0x80)) {
tuner_dbg("trying to resolve SECAM L deadlock\n");
tuner_i2c_xfer_send(&priv->i2c_props, agc_rst_on, 2);
msleep(40);
tuner_i2c_xfer_send(&priv->i2c_props, agc_rst_off, 2);
}
}
tda8290_i2c_bridge(fe, 0);
tuner_i2c_xfer_send(&priv->i2c_props, if_agc_set, 2);
}
/*---------------------------------------------------------------------*/
static void tda8295_power(struct dvb_frontend *fe, int enable)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char buf[] = { 0x30, 0x00 }; /* clb_stdbt */
tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
if (enable)
buf[1] = 0x01;
else
buf[1] = 0x03;
tuner_i2c_xfer_send(&priv->i2c_props, buf, 2);
}
static void tda8295_set_easy_mode(struct dvb_frontend *fe, int enable)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char buf[] = { 0x01, 0x00 };
tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
if (enable)
buf[1] = 0x01; /* rising edge sets regs 0x02 - 0x23 */
else
buf[1] = 0x00; /* reset active bit */
tuner_i2c_xfer_send(&priv->i2c_props, buf, 2);
}
static void tda8295_set_video_std(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char buf[] = { 0x00, priv->tda8290_easy_mode };
tuner_i2c_xfer_send(&priv->i2c_props, buf, 2);
tda8295_set_easy_mode(fe, 1);
msleep(20);
tda8295_set_easy_mode(fe, 0);
}
/*---------------------------------------------------------------------*/
static void tda8295_agc1_out(struct dvb_frontend *fe, int enable)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char buf[] = { 0x02, 0x00 }; /* DIV_FUNC */
tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
if (enable)
buf[1] &= ~0x40;
else
buf[1] |= 0x40;
tuner_i2c_xfer_send(&priv->i2c_props, buf, 2);
}
static void tda8295_agc2_out(struct dvb_frontend *fe, int enable)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char set_gpio_cf[] = { 0x44, 0x00 };
unsigned char set_gpio_val[] = { 0x46, 0x00 };
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&set_gpio_cf[0], 1, &set_gpio_cf[1], 1);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&set_gpio_val[0], 1, &set_gpio_val[1], 1);
set_gpio_cf[1] &= 0xf0; /* clear GPIO_0 bits 3-0 */
if (enable) {
set_gpio_cf[1] |= 0x01; /* config GPIO_0 as Open Drain Out */
set_gpio_val[1] &= 0xfe; /* set GPIO_0 pin low */
}
tuner_i2c_xfer_send(&priv->i2c_props, set_gpio_cf, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_gpio_val, 2);
}
static int tda8295_has_signal(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char hvpll_stat = 0x26;
unsigned char ret;
tuner_i2c_xfer_send_recv(&priv->i2c_props, &hvpll_stat, 1, &ret, 1);
return (ret & 0x01) ? 65535 : 0;
}
/*---------------------------------------------------------------------*/
static void tda8295_set_params(struct dvb_frontend *fe,
struct analog_parameters *params)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char blanking_mode[] = { 0x1d, 0x00 };
set_audio(fe, params);
tuner_dbg("%s: freq = %d\n", __func__, params->frequency);
tda8295_power(fe, 1);
tda8295_agc1_out(fe, 1);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&blanking_mode[0], 1, &blanking_mode[1], 1);
tda8295_set_video_std(fe);
blanking_mode[1] = 0x03;
tuner_i2c_xfer_send(&priv->i2c_props, blanking_mode, 2);
msleep(20);
tda8295_i2c_bridge(fe, 1);
if (fe->ops.tuner_ops.set_analog_params)
fe->ops.tuner_ops.set_analog_params(fe, params);
if (priv->cfg.agcf)
priv->cfg.agcf(fe);
if (tda8295_has_signal(fe))
tuner_dbg("tda8295 is locked\n");
else
tuner_dbg("tda8295 not locked, no signal?\n");
tda8295_i2c_bridge(fe, 0);
}
/*---------------------------------------------------------------------*/
static int tda8290_has_signal(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char i2c_get_afc[1] = { 0x1B };
unsigned char afc = 0;
tuner_i2c_xfer_send_recv(&priv->i2c_props,
i2c_get_afc, ARRAY_SIZE(i2c_get_afc), &afc, 1);
return (afc & 0x80)? 65535:0;
}
/*---------------------------------------------------------------------*/
static void tda8290_standby(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char cb1[] = { 0x30, 0xD0 };
unsigned char tda8290_standby[] = { 0x00, 0x02 };
unsigned char tda8290_agc_tri[] = { 0x02, 0x20 };
struct i2c_msg msg = {.addr = priv->tda827x_addr, .flags=0, .buf=cb1, .len = 2};
tda8290_i2c_bridge(fe, 1);
if (priv->ver & TDA8275A)
cb1[1] = 0x90;
i2c_transfer(priv->i2c_props.adap, &msg, 1);
tda8290_i2c_bridge(fe, 0);
tuner_i2c_xfer_send(&priv->i2c_props, tda8290_agc_tri, 2);
tuner_i2c_xfer_send(&priv->i2c_props, tda8290_standby, 2);
}
static void tda8295_standby(struct dvb_frontend *fe)
{
tda8295_agc1_out(fe, 0); /* Put AGC in tri-state */
tda8295_power(fe, 0);
}
static void tda8290_init_if(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char set_VS[] = { 0x30, 0x6F };
unsigned char set_GP00_CF[] = { 0x20, 0x01 };
unsigned char set_GP01_CF[] = { 0x20, 0x0B };
if ((priv->cfg.config == 1) || (priv->cfg.config == 2))
tuner_i2c_xfer_send(&priv->i2c_props, set_GP00_CF, 2);
else
tuner_i2c_xfer_send(&priv->i2c_props, set_GP01_CF, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_VS, 2);
}
static void tda8295_init_if(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
static unsigned char set_adc_ctl[] = { 0x33, 0x14 };
static unsigned char set_adc_ctl2[] = { 0x34, 0x00 };
static unsigned char set_pll_reg6[] = { 0x3e, 0x63 };
static unsigned char set_pll_reg0[] = { 0x38, 0x23 };
static unsigned char set_pll_reg7[] = { 0x3f, 0x01 };
static unsigned char set_pll_reg10[] = { 0x42, 0x61 };
static unsigned char set_gpio_reg0[] = { 0x44, 0x0b };
tda8295_power(fe, 1);
tda8295_set_easy_mode(fe, 0);
tda8295_set_video_std(fe);
tuner_i2c_xfer_send(&priv->i2c_props, set_adc_ctl, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_adc_ctl2, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_pll_reg6, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_pll_reg0, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_pll_reg7, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_pll_reg10, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_gpio_reg0, 2);
tda8295_agc1_out(fe, 0);
tda8295_agc2_out(fe, 0);
}
static void tda8290_init_tuner(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char tda8275_init[] = { 0x00, 0x00, 0x00, 0x40, 0xdC, 0x04, 0xAf,
0x3F, 0x2A, 0x04, 0xFF, 0x00, 0x00, 0x40 };
unsigned char tda8275a_init[] = { 0x00, 0x00, 0x00, 0x00, 0xdC, 0x05, 0x8b,
0x0c, 0x04, 0x20, 0xFF, 0x00, 0x00, 0x4b };
struct i2c_msg msg = {.addr = priv->tda827x_addr, .flags=0,
.buf=tda8275_init, .len = 14};
if (priv->ver & TDA8275A)
msg.buf = tda8275a_init;
tda8290_i2c_bridge(fe, 1);
i2c_transfer(priv->i2c_props.adap, &msg, 1);
tda8290_i2c_bridge(fe, 0);
}
/*---------------------------------------------------------------------*/
static void tda829x_release(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
/* only try to release the tuner if we've
* attached it from within this module */
if (priv->ver & (TDA18271 | TDA8275 | TDA8275A))
if (fe->ops.tuner_ops.release)
fe->ops.tuner_ops.release(fe);
kfree(fe->analog_demod_priv);
fe->analog_demod_priv = NULL;
}
static struct tda18271_config tda829x_tda18271_config = {
.gate = TDA18271_GATE_ANALOG,
};
static int tda829x_find_tuner(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
struct analog_demod_ops *analog_ops = &fe->ops.analog_ops;
int i, ret, tuners_found;
u32 tuner_addrs;
u8 data;
struct i2c_msg msg = { .flags = I2C_M_RD, .buf = &data, .len = 1 };
if (!analog_ops->i2c_gate_ctrl) {
printk(KERN_ERR "tda8290: no gate control were provided!\n");
return -EINVAL;
}
analog_ops->i2c_gate_ctrl(fe, 1);
/* probe for tuner chip */
tuners_found = 0;
tuner_addrs = 0;
for (i = 0x60; i <= 0x63; i++) {
msg.addr = i;
ret = i2c_transfer(priv->i2c_props.adap, &msg, 1);
if (ret == 1) {
tuners_found++;
tuner_addrs = (tuner_addrs << 8) + i;
}
}
/* if there is more than one tuner, we expect the right one is
behind the bridge and we choose the highest address that doesn't
give a response now
*/
analog_ops->i2c_gate_ctrl(fe, 0);
if (tuners_found > 1)
for (i = 0; i < tuners_found; i++) {
msg.addr = tuner_addrs & 0xff;
ret = i2c_transfer(priv->i2c_props.adap, &msg, 1);
if (ret == 1)
tuner_addrs = tuner_addrs >> 8;
else
break;
}
if (tuner_addrs == 0) {
tuner_addrs = 0x60;
tuner_info("could not clearly identify tuner address, "
"defaulting to %x\n", tuner_addrs);
} else {
tuner_addrs = tuner_addrs & 0xff;
tuner_info("setting tuner address to %x\n", tuner_addrs);
}
priv->tda827x_addr = tuner_addrs;
msg.addr = tuner_addrs;
analog_ops->i2c_gate_ctrl(fe, 1);
ret = i2c_transfer(priv->i2c_props.adap, &msg, 1);
if (ret != 1) {
tuner_warn("tuner access failed!\n");
analog_ops->i2c_gate_ctrl(fe, 0);
return -EREMOTEIO;
}
if ((data == 0x83) || (data == 0x84)) {
priv->ver |= TDA18271;
tda829x_tda18271_config.config = priv->cfg.config;
dvb_attach(tda18271_attach, fe, priv->tda827x_addr,
priv->i2c_props.adap, &tda829x_tda18271_config);
} else {
if ((data & 0x3c) == 0)
priv->ver |= TDA8275;
else
priv->ver |= TDA8275A;
dvb_attach(tda827x_attach, fe, priv->tda827x_addr,
priv->i2c_props.adap, &priv->cfg);
priv->cfg.switch_addr = priv->i2c_props.addr;
}
if (fe->ops.tuner_ops.init)
fe->ops.tuner_ops.init(fe);
if (fe->ops.tuner_ops.sleep)
fe->ops.tuner_ops.sleep(fe);
analog_ops->i2c_gate_ctrl(fe, 0);
return 0;
}
static int tda8290_probe(struct tuner_i2c_props *i2c_props)
{
#define TDA8290_ID 0x89
u8 reg = 0x1f, id;
struct i2c_msg msg_read[] = {
{ .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = ® },
{ .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
};
/* detect tda8290 */
if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
__func__, reg);
return -ENODEV;
}
if (id == TDA8290_ID) {
if (debug)
printk(KERN_DEBUG "%s: tda8290 detected @ %d-%04x\n",
__func__, i2c_adapter_id(i2c_props->adap),
i2c_props->addr);
return 0;
}
return -ENODEV;
}
static int tda8295_probe(struct tuner_i2c_props *i2c_props)
{
#define TDA8295_ID 0x8a
#define TDA8295C2_ID 0x8b
u8 reg = 0x2f, id;
struct i2c_msg msg_read[] = {
{ .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = ® },
{ .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
};
/* detect tda8295 */
if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
__func__, reg);
return -ENODEV;
}
if ((id & 0xfe) == TDA8295_ID) {
if (debug)
printk(KERN_DEBUG "%s: %s detected @ %d-%04x\n",
__func__, (id == TDA8295_ID) ?
"tda8295c1" : "tda8295c2",
i2c_adapter_id(i2c_props->adap),
i2c_props->addr);
return 0;
}
return -ENODEV;
}
static struct analog_demod_ops tda8290_ops = {
.set_params = tda8290_set_params,
.has_signal = tda8290_has_signal,
.standby = tda8290_standby,
.release = tda829x_release,
.i2c_gate_ctrl = tda8290_i2c_bridge,
};
static struct analog_demod_ops tda8295_ops = {
.set_params = tda8295_set_params,
.has_signal = tda8295_has_signal,
.standby = tda8295_standby,
.release = tda829x_release,
.i2c_gate_ctrl = tda8295_i2c_bridge,
};
struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c_adap, u8 i2c_addr,
struct tda829x_config *cfg)
{
struct tda8290_priv *priv = NULL;
char *name;
priv = kzalloc(sizeof(struct tda8290_priv), GFP_KERNEL);
if (priv == NULL)
return NULL;
fe->analog_demod_priv = priv;
priv->i2c_props.addr = i2c_addr;
priv->i2c_props.adap = i2c_adap;
priv->i2c_props.name = "tda829x";
if (cfg)
priv->cfg.config = cfg->lna_cfg;
if (tda8290_probe(&priv->i2c_props) == 0) {
priv->ver = TDA8290;
memcpy(&fe->ops.analog_ops, &tda8290_ops,
sizeof(struct analog_demod_ops));
}
if (tda8295_probe(&priv->i2c_props) == 0) {
priv->ver = TDA8295;
memcpy(&fe->ops.analog_ops, &tda8295_ops,
sizeof(struct analog_demod_ops));
}
if (!(cfg) || (TDA829X_PROBE_TUNER == cfg->probe_tuner)) {
tda8295_power(fe, 1);
if (tda829x_find_tuner(fe) < 0)
goto fail;
}
switch (priv->ver) {
case TDA8290:
name = "tda8290";
break;
case TDA8295:
name = "tda8295";
break;
case TDA8290 | TDA8275:
name = "tda8290+75";
break;
case TDA8295 | TDA8275:
name = "tda8295+75";
break;
case TDA8290 | TDA8275A:
name = "tda8290+75a";
break;
case TDA8295 | TDA8275A:
name = "tda8295+75a";
break;
case TDA8290 | TDA18271:
name = "tda8290+18271";
break;
case TDA8295 | TDA18271:
name = "tda8295+18271";
break;
default:
goto fail;
}
tuner_info("type set to %s\n", name);
fe->ops.analog_ops.info.name = name;
if (priv->ver & TDA8290) {
if (priv->ver & (TDA8275 | TDA8275A))
tda8290_init_tuner(fe);
tda8290_init_if(fe);
} else if (priv->ver & TDA8295)
tda8295_init_if(fe);
return fe;
fail:
memset(&fe->ops.analog_ops, 0, sizeof(struct analog_demod_ops));
tda829x_release(fe);
return NULL;
}
EXPORT_SYMBOL_GPL(tda829x_attach);
int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr)
{
struct tuner_i2c_props i2c_props = {
.adap = i2c_adap,
.addr = i2c_addr,
};
unsigned char soft_reset[] = { 0x00, 0x00 };
unsigned char easy_mode_b[] = { 0x01, 0x02 };
unsigned char easy_mode_g[] = { 0x01, 0x04 };
unsigned char restore_9886[] = { 0x00, 0xd6, 0x30 };
unsigned char addr_dto_lsb = 0x07;
unsigned char data;
#define PROBE_BUFFER_SIZE 8
unsigned char buf[PROBE_BUFFER_SIZE];
int i;
/* rule out tda9887, which would return the same byte repeatedly */
tuner_i2c_xfer_send_recv(&i2c_props,
soft_reset, 1, buf, PROBE_BUFFER_SIZE);
for (i = 1; i < PROBE_BUFFER_SIZE; i++) {
if (buf[i] != buf[0])
break;
}
/* all bytes are equal, not a tda829x - probably a tda9887 */
if (i == PROBE_BUFFER_SIZE)
return -ENODEV;
if ((tda8290_probe(&i2c_props) == 0) ||
(tda8295_probe(&i2c_props) == 0))
return 0;
/* fall back to old probing method */
tuner_i2c_xfer_send(&i2c_props, easy_mode_b, 2);
tuner_i2c_xfer_send(&i2c_props, soft_reset, 2);
tuner_i2c_xfer_send_recv(&i2c_props, &addr_dto_lsb, 1, &data, 1);
if (data == 0) {
tuner_i2c_xfer_send(&i2c_props, easy_mode_g, 2);
tuner_i2c_xfer_send(&i2c_props, soft_reset, 2);
tuner_i2c_xfer_send_recv(&i2c_props,
&addr_dto_lsb, 1, &data, 1);
if (data == 0x7b) {
return 0;
}
}
tuner_i2c_xfer_send(&i2c_props, restore_9886, 3);
return -ENODEV;
}
EXPORT_SYMBOL_GPL(tda829x_probe);
MODULE_DESCRIPTION("Philips/NXP TDA8290/TDA8295 analog IF demodulator driver");
MODULE_AUTHOR("Gerd Knorr, Hartmut Hackmann, Michael Krufky");
MODULE_LICENSE("GPL");
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* ---------------------------------------------------------------------------
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
CyanogenMod/android_kernel_goldfish | drivers/input/keyboard/amikbd.c | 9386 | 6680 | /*
* Copyright (c) 2000-2001 Vojtech Pavlik
*
* Based on the work of:
* Hamish Macdonald
*/
/*
* Amiga keyboard driver for Linux/m68k
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/keyboard.h>
#include <linux/platform_device.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include <asm/irq.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Amiga keyboard driver");
MODULE_LICENSE("GPL");
static unsigned char amikbd_keycode[0x78] __initdata = {
[0] = KEY_GRAVE,
[1] = KEY_1,
[2] = KEY_2,
[3] = KEY_3,
[4] = KEY_4,
[5] = KEY_5,
[6] = KEY_6,
[7] = KEY_7,
[8] = KEY_8,
[9] = KEY_9,
[10] = KEY_0,
[11] = KEY_MINUS,
[12] = KEY_EQUAL,
[13] = KEY_BACKSLASH,
[15] = KEY_KP0,
[16] = KEY_Q,
[17] = KEY_W,
[18] = KEY_E,
[19] = KEY_R,
[20] = KEY_T,
[21] = KEY_Y,
[22] = KEY_U,
[23] = KEY_I,
[24] = KEY_O,
[25] = KEY_P,
[26] = KEY_LEFTBRACE,
[27] = KEY_RIGHTBRACE,
[29] = KEY_KP1,
[30] = KEY_KP2,
[31] = KEY_KP3,
[32] = KEY_A,
[33] = KEY_S,
[34] = KEY_D,
[35] = KEY_F,
[36] = KEY_G,
[37] = KEY_H,
[38] = KEY_J,
[39] = KEY_K,
[40] = KEY_L,
[41] = KEY_SEMICOLON,
[42] = KEY_APOSTROPHE,
[43] = KEY_BACKSLASH,
[45] = KEY_KP4,
[46] = KEY_KP5,
[47] = KEY_KP6,
[48] = KEY_102ND,
[49] = KEY_Z,
[50] = KEY_X,
[51] = KEY_C,
[52] = KEY_V,
[53] = KEY_B,
[54] = KEY_N,
[55] = KEY_M,
[56] = KEY_COMMA,
[57] = KEY_DOT,
[58] = KEY_SLASH,
[60] = KEY_KPDOT,
[61] = KEY_KP7,
[62] = KEY_KP8,
[63] = KEY_KP9,
[64] = KEY_SPACE,
[65] = KEY_BACKSPACE,
[66] = KEY_TAB,
[67] = KEY_KPENTER,
[68] = KEY_ENTER,
[69] = KEY_ESC,
[70] = KEY_DELETE,
[74] = KEY_KPMINUS,
[76] = KEY_UP,
[77] = KEY_DOWN,
[78] = KEY_RIGHT,
[79] = KEY_LEFT,
[80] = KEY_F1,
[81] = KEY_F2,
[82] = KEY_F3,
[83] = KEY_F4,
[84] = KEY_F5,
[85] = KEY_F6,
[86] = KEY_F7,
[87] = KEY_F8,
[88] = KEY_F9,
[89] = KEY_F10,
[90] = KEY_KPLEFTPAREN,
[91] = KEY_KPRIGHTPAREN,
[92] = KEY_KPSLASH,
[93] = KEY_KPASTERISK,
[94] = KEY_KPPLUS,
[95] = KEY_HELP,
[96] = KEY_LEFTSHIFT,
[97] = KEY_RIGHTSHIFT,
[98] = KEY_CAPSLOCK,
[99] = KEY_LEFTCTRL,
[100] = KEY_LEFTALT,
[101] = KEY_RIGHTALT,
[102] = KEY_LEFTMETA,
[103] = KEY_RIGHTMETA
};
static const char *amikbd_messages[8] = {
[0] = KERN_ALERT "amikbd: Ctrl-Amiga-Amiga reset warning!!\n",
[1] = KERN_WARNING "amikbd: keyboard lost sync\n",
[2] = KERN_WARNING "amikbd: keyboard buffer overflow\n",
[3] = KERN_WARNING "amikbd: keyboard controller failure\n",
[4] = KERN_ERR "amikbd: keyboard selftest failure\n",
[5] = KERN_INFO "amikbd: initiate power-up key stream\n",
[6] = KERN_INFO "amikbd: terminate power-up key stream\n",
[7] = KERN_WARNING "amikbd: keyboard interrupt\n"
};
static irqreturn_t amikbd_interrupt(int irq, void *data)
{
struct input_dev *dev = data;
unsigned char scancode, down;
scancode = ~ciaa.sdr; /* get and invert scancode (keyboard is active low) */
ciaa.cra |= 0x40; /* switch SP pin to output for handshake */
udelay(85); /* wait until 85 us have expired */
ciaa.cra &= ~0x40; /* switch CIA serial port to input mode */
down = !(scancode & 1); /* lowest bit is release bit */
scancode >>= 1;
if (scancode < 0x78) { /* scancodes < 0x78 are keys */
if (scancode == 98) { /* CapsLock is a toggle switch key on Amiga */
input_report_key(dev, scancode, 1);
input_report_key(dev, scancode, 0);
} else {
input_report_key(dev, scancode, down);
}
input_sync(dev);
} else /* scancodes >= 0x78 are error codes */
printk(amikbd_messages[scancode - 0x78]);
return IRQ_HANDLED;
}
static int __init amikbd_probe(struct platform_device *pdev)
{
struct input_dev *dev;
int i, j, err;
dev = input_allocate_device();
if (!dev) {
dev_err(&pdev->dev, "Not enough memory for input device\n");
return -ENOMEM;
}
dev->name = pdev->name;
dev->phys = "amikbd/input0";
dev->id.bustype = BUS_AMIGA;
dev->id.vendor = 0x0001;
dev->id.product = 0x0001;
dev->id.version = 0x0100;
dev->dev.parent = &pdev->dev;
dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
for (i = 0; i < 0x78; i++)
set_bit(i, dev->keybit);
for (i = 0; i < MAX_NR_KEYMAPS; i++) {
static u_short temp_map[NR_KEYS] __initdata;
if (!key_maps[i])
continue;
memset(temp_map, 0, sizeof(temp_map));
for (j = 0; j < 0x78; j++) {
if (!amikbd_keycode[j])
continue;
temp_map[j] = key_maps[i][amikbd_keycode[j]];
}
for (j = 0; j < NR_KEYS; j++) {
if (!temp_map[j])
temp_map[j] = 0xf200;
}
memcpy(key_maps[i], temp_map, sizeof(temp_map));
}
ciaa.cra &= ~0x41; /* serial data in, turn off TA */
err = request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
dev);
if (err)
goto fail2;
err = input_register_device(dev);
if (err)
goto fail3;
platform_set_drvdata(pdev, dev);
return 0;
fail3: free_irq(IRQ_AMIGA_CIAA_SP, dev);
fail2: input_free_device(dev);
return err;
}
static int __exit amikbd_remove(struct platform_device *pdev)
{
struct input_dev *dev = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
free_irq(IRQ_AMIGA_CIAA_SP, dev);
input_unregister_device(dev);
return 0;
}
static struct platform_driver amikbd_driver = {
.remove = __exit_p(amikbd_remove),
.driver = {
.name = "amiga-keyboard",
.owner = THIS_MODULE,
},
};
static int __init amikbd_init(void)
{
return platform_driver_probe(&amikbd_driver, amikbd_probe);
}
module_init(amikbd_init);
static void __exit amikbd_exit(void)
{
platform_driver_unregister(&amikbd_driver);
}
module_exit(amikbd_exit);
MODULE_ALIAS("platform:amiga-keyboard");
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.