repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
aapav01/kernel_ms013g_3-4-x | drivers/staging/speakup/speakup_apollo.c | 2988 | 6850 | /*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* this code is specificly written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include "spk_priv.h"
#include "serialio.h"
#include "speakup.h"
#define DRV_VERSION "2.21"
#define SYNTH_CLEAR 0x18
#define PROCSPEECH '\r'
static void do_catch_up(struct spk_synth *synth);
static struct var_t vars[] = {
{ CAPS_START, .u.s = {"cap, " } },
{ CAPS_STOP, .u.s = {"" } },
{ RATE, .u.n = {"@W%d", 6, 1, 9, 0, 0, NULL } },
{ PITCH, .u.n = {"@F%x", 10, 0, 15, 0, 0, NULL } },
{ VOL, .u.n = {"@A%x", 10, 0, 15, 0, 0, NULL } },
{ VOICE, .u.n = {"@V%d", 1, 1, 6, 0, 0, NULL } },
{ LANG, .u.n = {"@=%d,", 1, 1, 4, 0, 0, NULL } },
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/apollo.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute lang_attribute =
__ATTR(lang, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
__ATTR(voice, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&lang_attribute.attr,
&pitch_attribute.attr,
&rate_attribute.attr,
&voice_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_apollo = {
.name = "apollo",
.version = DRV_VERSION,
.long_name = "Apollo",
.init = "@R3@D0@K1\r",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 40000,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.probe = spk_serial_synth_probe,
.release = spk_serial_release,
.synth_immediate = spk_synth_immediate,
.catch_up = do_catch_up,
.flush = spk_synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "apollo",
},
};
static void do_catch_up(struct spk_synth *synth)
{
u_char ch;
unsigned long flags;
unsigned long jiff_max;
struct var_t *jiffy_delta;
struct var_t *delay_time;
struct var_t *full_time;
int full_time_val = 0;
int delay_time_val = 0;
int jiffy_delta_val = 0;
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
full_time = spk_get_var(FULL);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spk_unlock(flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
full_time_val = full_time->u.n.value;
delay_time_val = delay_time->u.n.value;
if (speakup_info.flushing) {
speakup_info.flushing = 0;
spk_unlock(flags);
synth->flush(synth);
continue;
}
if (synth_buffer_empty()) {
spk_unlock(flags);
break;
}
ch = synth_buffer_peek();
set_current_state(TASK_INTERRUPTIBLE);
full_time_val = full_time->u.n.value;
spk_unlock(flags);
if (!spk_serial_out(ch)) {
outb(UART_MCR_DTR, speakup_info.port_tts + UART_MCR);
outb(UART_MCR_DTR | UART_MCR_RTS,
speakup_info.port_tts + UART_MCR);
schedule_timeout(msecs_to_jiffies(full_time_val));
continue;
}
if ((jiffies >= jiff_max) && (ch == SPACE)) {
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
full_time_val = full_time->u.n.value;
delay_time_val = delay_time->u.n.value;
spk_unlock(flags);
if (spk_serial_out(synth->procspeech))
schedule_timeout(msecs_to_jiffies
(delay_time_val));
else
schedule_timeout(msecs_to_jiffies
(full_time_val));
jiff_max = jiffies + jiffy_delta_val;
}
set_current_state(TASK_RUNNING);
spk_lock(flags);
synth_buffer_getc();
spk_unlock(flags);
}
spk_serial_out(PROCSPEECH);
}
module_param_named(ser, synth_apollo.ser, int, S_IRUGO);
module_param_named(start, synth_apollo.startup, short, S_IRUGO);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
static int __init apollo_init(void)
{
return synth_add(&synth_apollo);
}
static void __exit apollo_exit(void)
{
synth_remove(&synth_apollo);
}
module_init(apollo_init);
module_exit(apollo_exit);
MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for Apollo II synthesizer");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
mdeejay/android_kernel_ville | arch/arm/mach-pxa/zylonite_pxa300.c | 2988 | 6325 | /*
* linux/arch/arm/mach-pxa/zylonite_pxa300.c
*
* PXA300/PXA310 specific support code for the
* PXA3xx Development Platform (aka Zylonite)
*
* Copyright (C) 2007 Marvell Internation Ltd.
* 2007-08-21: eric miao <eric.miao@marvell.com>
* initial version
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c/pxa-i2c.h>
#include <linux/i2c/pca953x.h>
#include <linux/gpio.h>
#include <mach/pxa300.h>
#include <mach/zylonite.h>
#include "generic.h"
/* PXA300/PXA310 common configurations */
static mfp_cfg_t common_mfp_cfg[] __initdata = {
/* LCD */
GPIO54_LCD_LDD_0,
GPIO55_LCD_LDD_1,
GPIO56_LCD_LDD_2,
GPIO57_LCD_LDD_3,
GPIO58_LCD_LDD_4,
GPIO59_LCD_LDD_5,
GPIO60_LCD_LDD_6,
GPIO61_LCD_LDD_7,
GPIO62_LCD_LDD_8,
GPIO63_LCD_LDD_9,
GPIO64_LCD_LDD_10,
GPIO65_LCD_LDD_11,
GPIO66_LCD_LDD_12,
GPIO67_LCD_LDD_13,
GPIO68_LCD_LDD_14,
GPIO69_LCD_LDD_15,
GPIO70_LCD_LDD_16,
GPIO71_LCD_LDD_17,
GPIO72_LCD_FCLK,
GPIO73_LCD_LCLK,
GPIO74_LCD_PCLK,
GPIO75_LCD_BIAS,
GPIO76_LCD_VSYNC,
GPIO127_LCD_CS_N,
GPIO20_PWM3_OUT, /* backlight */
/* BTUART */
GPIO111_UART2_RTS,
GPIO112_UART2_RXD | MFP_LPM_EDGE_FALL,
GPIO113_UART2_TXD,
GPIO114_UART2_CTS | MFP_LPM_EDGE_BOTH,
/* STUART */
GPIO109_UART3_TXD,
GPIO110_UART3_RXD | MFP_LPM_EDGE_FALL,
/* AC97 */
GPIO23_AC97_nACRESET,
GPIO24_AC97_SYSCLK,
GPIO29_AC97_BITCLK,
GPIO25_AC97_SDATA_IN_0,
GPIO27_AC97_SDATA_OUT,
GPIO28_AC97_SYNC,
GPIO17_GPIO, /* SDATA_IN_1 but unused - configure to GPIO */
/* SSP3 */
GPIO91_SSP3_SCLK,
GPIO92_SSP3_FRM,
GPIO93_SSP3_TXD,
GPIO94_SSP3_RXD,
/* WM9713 IRQ */
GPIO26_GPIO,
/* Keypad */
GPIO107_KP_DKIN_0 | MFP_LPM_EDGE_BOTH,
GPIO108_KP_DKIN_1 | MFP_LPM_EDGE_BOTH,
GPIO115_KP_MKIN_0 | MFP_LPM_EDGE_BOTH,
GPIO116_KP_MKIN_1 | MFP_LPM_EDGE_BOTH,
GPIO117_KP_MKIN_2 | MFP_LPM_EDGE_BOTH,
GPIO118_KP_MKIN_3 | MFP_LPM_EDGE_BOTH,
GPIO119_KP_MKIN_4 | MFP_LPM_EDGE_BOTH,
GPIO120_KP_MKIN_5 | MFP_LPM_EDGE_BOTH,
GPIO2_2_KP_MKIN_6 | MFP_LPM_EDGE_BOTH,
GPIO3_2_KP_MKIN_7 | MFP_LPM_EDGE_BOTH,
GPIO121_KP_MKOUT_0,
GPIO122_KP_MKOUT_1,
GPIO123_KP_MKOUT_2,
GPIO124_KP_MKOUT_3,
GPIO125_KP_MKOUT_4,
GPIO4_2_KP_MKOUT_5,
GPIO5_2_KP_MKOUT_6,
GPIO6_2_KP_MKOUT_7,
/* MMC1 */
GPIO3_MMC1_DAT0,
GPIO4_MMC1_DAT1 | MFP_LPM_EDGE_BOTH,
GPIO5_MMC1_DAT2,
GPIO6_MMC1_DAT3,
GPIO7_MMC1_CLK,
GPIO8_MMC1_CMD, /* CMD0 for slot 0 */
GPIO15_GPIO, /* CMD1 default as GPIO for slot 0 */
/* MMC2 */
GPIO9_MMC2_DAT0,
GPIO10_MMC2_DAT1 | MFP_LPM_EDGE_BOTH,
GPIO11_MMC2_DAT2,
GPIO12_MMC2_DAT3,
GPIO13_MMC2_CLK,
GPIO14_MMC2_CMD,
/* USB Host */
GPIO0_2_USBH_PEN,
GPIO1_2_USBH_PWR,
/* Standard I2C */
GPIO21_I2C_SCL,
GPIO22_I2C_SDA,
/* GPIO */
GPIO18_GPIO | MFP_PULL_HIGH, /* GPIO Expander #0 INT_N */
GPIO19_GPIO | MFP_PULL_HIGH, /* GPIO Expander #1 INT_N */
};
static mfp_cfg_t pxa300_mfp_cfg[] __initdata = {
/* FFUART */
GPIO30_UART1_RXD | MFP_LPM_EDGE_FALL,
GPIO31_UART1_TXD,
GPIO32_UART1_CTS,
GPIO37_UART1_RTS,
GPIO33_UART1_DCD,
GPIO34_UART1_DSR | MFP_LPM_EDGE_FALL,
GPIO35_UART1_RI,
GPIO36_UART1_DTR,
/* Ethernet */
GPIO2_nCS3,
GPIO99_GPIO,
};
static mfp_cfg_t pxa310_mfp_cfg[] __initdata = {
/* FFUART */
GPIO99_UART1_RXD | MFP_LPM_EDGE_FALL,
GPIO100_UART1_TXD,
GPIO101_UART1_CTS,
GPIO106_UART1_RTS,
/* Ethernet */
GPIO2_nCS3,
GPIO102_GPIO,
/* MMC3 */
GPIO7_2_MMC3_DAT0,
GPIO8_2_MMC3_DAT1 | MFP_LPM_EDGE_BOTH,
GPIO9_2_MMC3_DAT2,
GPIO10_2_MMC3_DAT3,
GPIO103_MMC3_CLK,
GPIO105_MMC3_CMD,
};
#define NUM_LCD_DETECT_PINS 7
static int lcd_detect_pins[] __initdata = {
MFP_PIN_GPIO71, /* LCD_LDD_17 - ORIENT */
MFP_PIN_GPIO70, /* LCD_LDD_16 - LCDID[5] */
MFP_PIN_GPIO75, /* LCD_BIAS - LCDID[4] */
MFP_PIN_GPIO73, /* LCD_LCLK - LCDID[3] */
MFP_PIN_GPIO72, /* LCD_FCLK - LCDID[2] */
MFP_PIN_GPIO127,/* LCD_CS_N - LCDID[1] */
MFP_PIN_GPIO76, /* LCD_VSYNC - LCDID[0] */
};
static void __init zylonite_detect_lcd_panel(void)
{
unsigned long mfpr_save[NUM_LCD_DETECT_PINS];
int i, gpio, id = 0;
/* save the original MFP settings of these pins and configure
* them as GPIO Input, DS01X, Pull Neither, Edge Clear
*/
for (i = 0; i < NUM_LCD_DETECT_PINS; i++) {
mfpr_save[i] = pxa3xx_mfp_read(lcd_detect_pins[i]);
pxa3xx_mfp_write(lcd_detect_pins[i], 0x8440);
}
for (i = 0; i < NUM_LCD_DETECT_PINS; i++) {
id = id << 1;
gpio = mfp_to_gpio(lcd_detect_pins[i]);
gpio_request(gpio, "LCD_ID_PINS");
gpio_direction_input(gpio);
if (gpio_get_value(gpio))
id = id | 0x1;
gpio_free(gpio);
}
/* lcd id, flush out bit 1 */
lcd_id = id & 0x3d;
/* lcd orientation, portrait or landscape */
lcd_orientation = (id >> 6) & 0x1;
/* restore the original MFP settings */
for (i = 0; i < NUM_LCD_DETECT_PINS; i++)
pxa3xx_mfp_write(lcd_detect_pins[i], mfpr_save[i]);
}
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static struct pca953x_platform_data gpio_exp[] = {
[0] = {
.gpio_base = 128,
},
[1] = {
.gpio_base = 144,
},
};
static struct i2c_board_info zylonite_i2c_board_info[] = {
{
.type = "pca9539",
.addr = 0x74,
.platform_data = &gpio_exp[0],
.irq = IRQ_GPIO(18),
}, {
.type = "pca9539",
.addr = 0x75,
.platform_data = &gpio_exp[1],
.irq = IRQ_GPIO(19),
},
};
static void __init zylonite_init_i2c(void)
{
pxa_set_i2c_info(NULL);
i2c_register_board_info(0, ARRAY_AND_SIZE(zylonite_i2c_board_info));
}
#else
static inline void zylonite_init_i2c(void) {}
#endif
void __init zylonite_pxa300_init(void)
{
if (cpu_is_pxa300() || cpu_is_pxa310()) {
/* initialize MFP */
pxa3xx_mfp_config(ARRAY_AND_SIZE(common_mfp_cfg));
/* detect LCD panel */
zylonite_detect_lcd_panel();
/* WM9713 IRQ */
wm9713_irq = mfp_to_gpio(MFP_PIN_GPIO26);
zylonite_init_i2c();
}
if (cpu_is_pxa300()) {
pxa3xx_mfp_config(ARRAY_AND_SIZE(pxa300_mfp_cfg));
gpio_eth_irq = mfp_to_gpio(MFP_PIN_GPIO99);
}
if (cpu_is_pxa310()) {
pxa3xx_mfp_config(ARRAY_AND_SIZE(pxa310_mfp_cfg));
gpio_eth_irq = mfp_to_gpio(MFP_PIN_GPIO102);
}
/* GPIOs for Debug LEDs */
gpio_debug_led1 = EXT_GPIO(25);
gpio_debug_led2 = EXT_GPIO(26);
}
| gpl-2.0 |
cm-a7lte/device_samsung_a7lte | drivers/staging/speakup/speakup_dectlk.c | 2988 | 8885 | /*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include <linux/unistd.h>
#include <linux/proc_fs.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include "speakup.h"
#include "spk_priv.h"
#include "serialio.h"
#define DRV_VERSION "2.20"
#define SYNTH_CLEAR 0x03
#define PROCSPEECH 0x0b
static int xoff;
static inline int synth_full(void)
{
return xoff;
}
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
static void read_buff_add(u_char c);
static unsigned char get_index(void);
static int in_escape;
static int is_flushing;
static spinlock_t flush_lock;
static DECLARE_WAIT_QUEUE_HEAD(flush);
static struct var_t vars[] = {
{ CAPS_START, .u.s = {"[:dv ap 160] " } },
{ CAPS_STOP, .u.s = {"[:dv ap 100 ] " } },
{ RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } },
{ PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } },
{ VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } },
{ PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } },
{ VOICE, .u.n = {"[:n%c] ", 0, 0, 9, 0, 0, "phfdburwkv" } },
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/dectlk.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
__ATTR(voice, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&voice_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static int ap_defaults[] = {122, 89, 155, 110, 208, 240, 200, 106, 306};
static int g5_defaults[] = {86, 81, 86, 84, 81, 80, 83, 83, 73};
static struct spk_synth synth_dectlk = {
.name = "dectlk",
.version = DRV_VERSION,
.long_name = "Dectalk Express",
.init = "[:error sp :name paul :rate 180 :tsr off] ",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 40000,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.default_pitch = ap_defaults,
.default_vol = g5_defaults,
.probe = spk_serial_synth_probe,
.release = spk_serial_release,
.synth_immediate = spk_synth_immediate,
.catch_up = do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = read_buff_add,
.get_index = get_index,
.indexing = {
.command = "[:in re %d ] ",
.lowindex = 1,
.highindex = 8,
.currindex = 1,
},
.attributes = {
.attrs = synth_attrs,
.name = "dectlk",
},
};
static int is_indnum(u_char *ch)
{
if ((*ch >= '0') && (*ch <= '9')) {
*ch = *ch - '0';
return 1;
}
return 0;
}
static u_char lastind;
static unsigned char get_index(void)
{
u_char rv;
rv = lastind;
lastind = 0;
return rv;
}
static void read_buff_add(u_char c)
{
static int ind = -1;
if (c == 0x01) {
unsigned long flags;
spin_lock_irqsave(&flush_lock, flags);
is_flushing = 0;
wake_up_interruptible(&flush);
spin_unlock_irqrestore(&flush_lock, flags);
} else if (c == 0x13) {
xoff = 1;
} else if (c == 0x11) {
xoff = 0;
} else if (is_indnum(&c)) {
if (ind == -1)
ind = c;
else
ind = ind * 10 + c;
} else if ((c > 31) && (c < 127)) {
if (ind != -1)
lastind = (u_char)ind;
ind = -1;
}
}
static void do_catch_up(struct spk_synth *synth)
{
int synth_full_val = 0;
static u_char ch;
static u_char last = '\0';
unsigned long flags;
unsigned long jiff_max;
unsigned long timeout = msecs_to_jiffies(4000);
DEFINE_WAIT(wait);
struct var_t *jiffy_delta;
struct var_t *delay_time;
int jiffy_delta_val;
int delay_time_val;
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spk_unlock(flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
/* if no ctl-a in 4, send data anyway */
spin_lock_irqsave(&flush_lock, flags);
while (is_flushing && timeout) {
prepare_to_wait(&flush, &wait, TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&flush_lock, flags);
timeout = schedule_timeout(timeout);
spin_lock_irqsave(&flush_lock, flags);
}
finish_wait(&flush, &wait);
is_flushing = 0;
spin_unlock_irqrestore(&flush_lock, flags);
spk_lock(flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
spk_unlock(flags);
synth->flush(synth);
continue;
}
if (synth_buffer_empty()) {
spk_unlock(flags);
break;
}
ch = synth_buffer_peek();
set_current_state(TASK_INTERRUPTIBLE);
delay_time_val = delay_time->u.n.value;
synth_full_val = synth_full();
spk_unlock(flags);
if (ch == '\n')
ch = 0x0D;
if (synth_full_val || !spk_serial_out(ch)) {
schedule_timeout(msecs_to_jiffies(delay_time_val));
continue;
}
set_current_state(TASK_RUNNING);
spk_lock(flags);
synth_buffer_getc();
spk_unlock(flags);
if (ch == '[')
in_escape = 1;
else if (ch == ']')
in_escape = 0;
else if (ch <= SPACE) {
if (!in_escape && strchr(",.!?;:", last))
spk_serial_out(PROCSPEECH);
if (jiffies >= jiff_max) {
if (!in_escape)
spk_serial_out(PROCSPEECH);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
spk_unlock(flags);
schedule_timeout(msecs_to_jiffies
(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
}
}
last = ch;
}
if (!in_escape)
spk_serial_out(PROCSPEECH);
}
static void synth_flush(struct spk_synth *synth)
{
if (in_escape) {
/* if in command output ']' so we don't get an error */
spk_serial_out(']');
}
in_escape = 0;
is_flushing = 1;
spk_serial_out(SYNTH_CLEAR);
}
module_param_named(ser, synth_dectlk.ser, int, S_IRUGO);
module_param_named(start, synth_dectlk.startup, short, S_IRUGO);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
static int __init dectlk_init(void)
{
return synth_add(&synth_dectlk);
}
static void __exit dectlk_exit(void)
{
synth_remove(&synth_dectlk);
}
module_init(dectlk_init);
module_exit(dectlk_exit);
MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for DECtalk Express synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
cloudlinux/cl7-kernel | arch/mips/dec/wbflush.c | 4268 | 2095 | /*
* Setup the right wbflush routine for the different DECstations.
*
* Created with information from:
* DECstation 3100 Desktop Workstation Functional Specification
* DECstation 5000/200 KN02 System Module Functional Specification
* mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Harald Koerfgen
* Copyright (C) 2002 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <asm/bootinfo.h>
#include <asm/wbflush.h>
#include <asm/barrier.h>
static void wbflush_kn01(void);
static void wbflush_kn210(void);
static void wbflush_mips(void);
void (*__wbflush) (void);
void __init wbflush_setup(void)
{
switch (mips_machtype) {
case MACH_DS23100:
case MACH_DS5000_200: /* DS5000 3max */
__wbflush = wbflush_kn01;
break;
case MACH_DS5100: /* DS5100 MIPSMATE */
__wbflush = wbflush_kn210;
break;
case MACH_DS5000_1XX: /* DS5000/100 3min */
case MACH_DS5000_XX: /* Personal DS5000/2x */
case MACH_DS5000_2X0: /* DS5000/240 3max+ */
case MACH_DS5900: /* DS5900 bigmax */
default:
__wbflush = wbflush_mips;
break;
}
}
/*
* For the DS3100 and DS5000/200 the R2020/R3220 writeback buffer functions
* as part of Coprocessor 0.
*/
static void wbflush_kn01(void)
{
asm(".set\tpush\n\t"
".set\tnoreorder\n\t"
"1:\tbc0f\t1b\n\t"
"nop\n\t"
".set\tpop");
}
/*
* For the DS5100 the writeback buffer seems to be a part of Coprocessor 3.
* But CP3 has to enabled first.
*/
static void wbflush_kn210(void)
{
asm(".set\tpush\n\t"
".set\tnoreorder\n\t"
"mfc0\t$2,$12\n\t"
"lui\t$3,0x8000\n\t"
"or\t$3,$2,$3\n\t"
"mtc0\t$3,$12\n\t"
"nop\n"
"1:\tbc3f\t1b\n\t"
"nop\n\t"
"mtc0\t$2,$12\n\t"
"nop\n\t"
".set\tpop"
: : : "$2", "$3");
}
/*
* I/O ASIC systems use a standard writeback buffer that gets flushed
* upon an uncached read.
*/
static void wbflush_mips(void)
{
__fast_iob();
}
#include <linux/module.h>
EXPORT_SYMBOL(__wbflush);
| gpl-2.0 |
davidmueller13/Fulgor_Kernel_Lollipop | arch/arm/mach-pxa/trizeps4.c | 4780 | 14447 | /*
* linux/arch/arm/mach-pxa/trizeps4.c
*
* Support for the Keith und Koep Trizeps4 Module Platform.
*
* Author: Jürgen Schindele
* Created: 20 02, 2006
* Copyright: Jürgen Schindele
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/fb.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/dm9000.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/partitions.h>
#include <linux/i2c/pxa-i2c.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/mach/flash.h>
#include <mach/pxa27x.h>
#include <mach/trizeps4.h>
#include <mach/audio.h>
#include <mach/pxafb.h>
#include <mach/mmc.h>
#include <mach/irda.h>
#include <mach/ohci.h>
#include <mach/smemc.h>
#include "generic.h"
#include "devices.h"
/* comment out the following line if you want to use the
* Standard UART from PXA for serial / irda transmission
* and acivate it if you have status leds connected */
#define STATUS_LEDS_ON_STUART_PINS 1
/*****************************************************************************
* MultiFunctionPins of CPU
*****************************************************************************/
static unsigned long trizeps4_pin_config[] __initdata = {
/* Chip Selects */
GPIO15_nCS_1, /* DiskOnChip CS */
GPIO93_GPIO, /* TRIZEPS4_DOC_IRQ */
GPIO94_GPIO, /* DOC lock */
GPIO78_nCS_2, /* DM9000 CS */
GPIO101_GPIO, /* TRIZEPS4_ETH_IRQ */
GPIO79_nCS_3, /* Logic CS */
GPIO0_GPIO | WAKEUP_ON_EDGE_RISE, /* Logic irq */
/* AC97 */
GPIO28_AC97_BITCLK,
GPIO29_AC97_SDATA_IN_0,
GPIO30_AC97_SDATA_OUT,
GPIO31_AC97_SYNC,
/* LCD - 16bpp Active TFT */
GPIOxx_LCD_TFT_16BPP,
/* UART */
GPIO9_FFUART_CTS,
GPIO10_FFUART_DCD,
GPIO16_FFUART_TXD,
GPIO33_FFUART_DSR,
GPIO38_FFUART_RI,
GPIO82_FFUART_DTR,
GPIO83_FFUART_RTS,
GPIO96_FFUART_RXD,
GPIO42_BTUART_RXD,
GPIO43_BTUART_TXD,
GPIO44_BTUART_CTS,
GPIO45_BTUART_RTS,
#ifdef STATUS_LEDS_ON_STUART_PINS
GPIO46_GPIO,
GPIO47_GPIO,
#else
GPIO46_STUART_RXD,
GPIO47_STUART_TXD,
#endif
/* PCMCIA */
GPIO11_GPIO, /* TRIZEPS4_CD_IRQ */
GPIO13_GPIO, /* TRIZEPS4_READY_NINT */
GPIO48_nPOE,
GPIO49_nPWE,
GPIO50_nPIOR,
GPIO51_nPIOW,
GPIO54_nPCE_2,
GPIO55_nPREG,
GPIO56_nPWAIT,
GPIO57_nIOIS16,
GPIO102_nPCE_1,
GPIO104_PSKTSEL,
/* MultiMediaCard */
GPIO32_MMC_CLK,
GPIO92_MMC_DAT_0,
GPIO109_MMC_DAT_1,
GPIO110_MMC_DAT_2,
GPIO111_MMC_DAT_3,
GPIO112_MMC_CMD,
GPIO12_GPIO, /* TRIZEPS4_MMC_IRQ */
/* USB OHCI */
GPIO88_USBH1_PWR, /* USBHPWR1 */
GPIO89_USBH1_PEN, /* USBHPEN1 */
/* I2C */
GPIO117_I2C_SCL,
GPIO118_I2C_SDA,
};
static unsigned long trizeps4wl_pin_config[] __initdata = {
/* SSP 2 */
GPIO14_SSP2_SFRM,
GPIO19_SSP2_SCLK,
GPIO53_GPIO, /* TRIZEPS4_SPI_IRQ */
GPIO86_SSP2_RXD,
GPIO87_SSP2_TXD,
};
/****************************************************************************
* ONBOARD FLASH
****************************************************************************/
static struct mtd_partition trizeps4_partitions[] = {
{
.name = "Bootloader",
.offset = 0x00000000,
.size = 0x00040000,
.mask_flags = MTD_WRITEABLE /* force read-only */
}, {
.name = "Backup",
.offset = 0x00040000,
.size = 0x00040000,
}, {
.name = "Image",
.offset = 0x00080000,
.size = 0x01080000,
}, {
.name = "IPSM",
.offset = 0x01100000,
.size = 0x00e00000,
}, {
.name = "Registry",
.offset = 0x01f00000,
.size = MTDPART_SIZ_FULL,
}
};
static struct physmap_flash_data trizeps4_flash_data[] = {
{
.width = 4, /* bankwidth in bytes */
.parts = trizeps4_partitions,
.nr_parts = ARRAY_SIZE(trizeps4_partitions)
}
};
static struct resource flash_resource = {
.start = PXA_CS0_PHYS,
.end = PXA_CS0_PHYS + SZ_32M - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = trizeps4_flash_data,
},
.resource = &flash_resource,
.num_resources = 1,
};
/****************************************************************************
* DAVICOM DM9000 Ethernet
****************************************************************************/
static struct resource dm9000_resources[] = {
[0] = {
.start = TRIZEPS4_ETH_PHYS+0x300,
.end = TRIZEPS4_ETH_PHYS+0x400-1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = TRIZEPS4_ETH_PHYS+0x8300,
.end = TRIZEPS4_ETH_PHYS+0x8400-1,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = TRIZEPS4_ETH_IRQ,
.end = TRIZEPS4_ETH_IRQ,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
},
};
static struct dm9000_plat_data tri_dm9000_platdata = {
.flags = DM9000_PLATF_32BITONLY,
};
static struct platform_device dm9000_device = {
.name = "dm9000",
.id = -1,
.num_resources = ARRAY_SIZE(dm9000_resources),
.resource = dm9000_resources,
.dev = {
.platform_data = &tri_dm9000_platdata,
}
};
/****************************************************************************
* LED's on GPIO pins of PXA
****************************************************************************/
static struct gpio_led trizeps4_led[] = {
#ifdef STATUS_LEDS_ON_STUART_PINS
{
.name = "led0:orange:heartbeat", /* */
.default_trigger = "heartbeat",
.gpio = GPIO_HEARTBEAT_LED,
.active_low = 1,
},
{
.name = "led1:yellow:cpubusy", /* */
.default_trigger = "cpu-busy",
.gpio = GPIO_SYS_BUSY_LED,
.active_low = 1,
},
#endif
};
static struct gpio_led_platform_data trizeps4_led_data = {
.leds = trizeps4_led,
.num_leds = ARRAY_SIZE(trizeps4_led),
};
static struct platform_device leds_devices = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &trizeps4_led_data,
},
};
static struct platform_device *trizeps4_devices[] __initdata = {
&flash_device,
&dm9000_device,
&leds_devices,
};
static struct platform_device *trizeps4wl_devices[] __initdata = {
&flash_device,
&leds_devices,
};
static short trizeps_conxs_bcr;
/* PCCARD power switching supports only 3,3V */
void board_pcmcia_power(int power)
{
if (power) {
/* switch power on, put in reset and enable buffers */
trizeps_conxs_bcr |= power;
trizeps_conxs_bcr |= ConXS_BCR_CF_RESET;
trizeps_conxs_bcr &= ~ConXS_BCR_CF_BUF_EN;
BCR_writew(trizeps_conxs_bcr);
/* wait a little */
udelay(2000);
/* take reset away */
trizeps_conxs_bcr &= ~ConXS_BCR_CF_RESET;
BCR_writew(trizeps_conxs_bcr);
udelay(2000);
} else {
/* put in reset */
trizeps_conxs_bcr |= ConXS_BCR_CF_RESET;
BCR_writew(trizeps_conxs_bcr);
udelay(1000);
/* switch power off */
trizeps_conxs_bcr &= ~0xf;
BCR_writew(trizeps_conxs_bcr);
}
pr_debug("%s: o%s 0x%x\n", __func__, power ? "n" : "ff",
trizeps_conxs_bcr);
}
EXPORT_SYMBOL(board_pcmcia_power);
/* backlight power switching for LCD panel */
static void board_backlight_power(int on)
{
if (on)
trizeps_conxs_bcr |= ConXS_BCR_L_DISP;
else
trizeps_conxs_bcr &= ~ConXS_BCR_L_DISP;
pr_debug("%s: o%s 0x%x\n", __func__, on ? "n" : "ff",
trizeps_conxs_bcr);
BCR_writew(trizeps_conxs_bcr);
}
/* a I2C based RTC is known on CONXS board */
static struct i2c_board_info trizeps4_i2c_devices[] __initdata = {
{ I2C_BOARD_INFO("rtc-pcf8593", 0x51) }
};
/****************************************************************************
* MMC card slot external to module
****************************************************************************/
static int trizeps4_mci_init(struct device *dev, irq_handler_t mci_detect_int,
void *data)
{
int err;
err = request_irq(TRIZEPS4_MMC_IRQ, mci_detect_int,
IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_SAMPLE_RANDOM,
"MMC card detect", data);
if (err) {
printk(KERN_ERR "trizeps4_mci_init: MMC/SD: can't request"
"MMC card detect IRQ\n");
return -1;
}
return 0;
}
static void trizeps4_mci_exit(struct device *dev, void *data)
{
free_irq(TRIZEPS4_MMC_IRQ, data);
}
static struct pxamci_platform_data trizeps4_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.detect_delay_ms= 10,
.init = trizeps4_mci_init,
.exit = trizeps4_mci_exit,
.get_ro = NULL, /* write-protection not supported */
.setpower = NULL, /* power-switching not supported */
.gpio_card_detect = -1,
.gpio_card_ro = -1,
.gpio_power = -1,
};
/****************************************************************************
* IRDA mode switching on stuart
****************************************************************************/
#ifndef STATUS_LEDS_ON_STUART_PINS
static short trizeps_conxs_ircr;
static int trizeps4_irda_startup(struct device *dev)
{
trizeps_conxs_ircr &= ~ConXS_IRCR_SD;
IRCR_writew(trizeps_conxs_ircr);
return 0;
}
static void trizeps4_irda_shutdown(struct device *dev)
{
trizeps_conxs_ircr |= ConXS_IRCR_SD;
IRCR_writew(trizeps_conxs_ircr);
}
static void trizeps4_irda_transceiver_mode(struct device *dev, int mode)
{
unsigned long flags;
local_irq_save(flags);
/* Switch mode */
if (mode & IR_SIRMODE)
trizeps_conxs_ircr &= ~ConXS_IRCR_MODE; /* Slow mode */
else if (mode & IR_FIRMODE)
trizeps_conxs_ircr |= ConXS_IRCR_MODE; /* Fast mode */
/* Switch power */
if (mode & IR_OFF)
trizeps_conxs_ircr |= ConXS_IRCR_SD;
else
trizeps_conxs_ircr &= ~ConXS_IRCR_SD;
IRCR_writew(trizeps_conxs_ircr);
local_irq_restore(flags);
pxa2xx_transceiver_mode(dev, mode);
}
static struct pxaficp_platform_data trizeps4_ficp_platform_data = {
.gpio_pwdown = -1,
.transceiver_cap = IR_SIRMODE | IR_FIRMODE | IR_OFF,
.transceiver_mode = trizeps4_irda_transceiver_mode,
.startup = trizeps4_irda_startup,
.shutdown = trizeps4_irda_shutdown,
};
#endif
/****************************************************************************
* OHCI USB port
****************************************************************************/
static struct pxaohci_platform_data trizeps4_ohci_platform_data = {
.port_mode = PMM_PERPORT_MODE,
.flags = ENABLE_PORT_ALL | POWER_CONTROL_LOW | POWER_SENSE_LOW,
};
static struct map_desc trizeps4_io_desc[] __initdata = {
{ /* ConXS CFSR */
.virtual = TRIZEPS4_CFSR_VIRT,
.pfn = __phys_to_pfn(TRIZEPS4_CFSR_PHYS),
.length = 0x00001000,
.type = MT_DEVICE
},
{ /* ConXS BCR */
.virtual = TRIZEPS4_BOCR_VIRT,
.pfn = __phys_to_pfn(TRIZEPS4_BOCR_PHYS),
.length = 0x00001000,
.type = MT_DEVICE
},
{ /* ConXS IRCR */
.virtual = TRIZEPS4_IRCR_VIRT,
.pfn = __phys_to_pfn(TRIZEPS4_IRCR_PHYS),
.length = 0x00001000,
.type = MT_DEVICE
},
{ /* ConXS DCR */
.virtual = TRIZEPS4_DICR_VIRT,
.pfn = __phys_to_pfn(TRIZEPS4_DICR_PHYS),
.length = 0x00001000,
.type = MT_DEVICE
},
{ /* ConXS UPSR */
.virtual = TRIZEPS4_UPSR_VIRT,
.pfn = __phys_to_pfn(TRIZEPS4_UPSR_PHYS),
.length = 0x00001000,
.type = MT_DEVICE
}
};
static struct pxafb_mode_info sharp_lcd_mode = {
.pixclock = 78000,
.xres = 640,
.yres = 480,
.bpp = 8,
.hsync_len = 4,
.left_margin = 4,
.right_margin = 4,
.vsync_len = 2,
.upper_margin = 0,
.lower_margin = 0,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.cmap_greyscale = 0,
};
static struct pxafb_mach_info sharp_lcd = {
.modes = &sharp_lcd_mode,
.num_modes = 1,
.lcd_conn = LCD_COLOR_DSTN_16BPP | LCD_PCLK_EDGE_FALL,
.cmap_inverse = 0,
.cmap_static = 0,
.pxafb_backlight_power = board_backlight_power,
};
static struct pxafb_mode_info toshiba_lcd_mode = {
.pixclock = 39720,
.xres = 640,
.yres = 480,
.bpp = 8,
.hsync_len = 63,
.left_margin = 12,
.right_margin = 12,
.vsync_len = 4,
.upper_margin = 32,
.lower_margin = 10,
.sync = 0,
.cmap_greyscale = 0,
};
static struct pxafb_mach_info toshiba_lcd = {
.modes = &toshiba_lcd_mode,
.num_modes = 1,
.lcd_conn = (LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL),
.cmap_inverse = 0,
.cmap_static = 0,
.pxafb_backlight_power = board_backlight_power,
};
static void __init trizeps4_init(void)
{
pxa2xx_mfp_config(ARRAY_AND_SIZE(trizeps4_pin_config));
if (machine_is_trizeps4wl()) {
pxa2xx_mfp_config(ARRAY_AND_SIZE(trizeps4wl_pin_config));
platform_add_devices(trizeps4wl_devices,
ARRAY_SIZE(trizeps4wl_devices));
} else {
platform_add_devices(trizeps4_devices,
ARRAY_SIZE(trizeps4_devices));
}
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
if (0) /* dont know how to determine LCD */
pxa_set_fb_info(NULL, &sharp_lcd);
else
pxa_set_fb_info(NULL, &toshiba_lcd);
pxa_set_mci_info(&trizeps4_mci_platform_data);
#ifndef STATUS_LEDS_ON_STUART_PINS
pxa_set_ficp_info(&trizeps4_ficp_platform_data);
#endif
pxa_set_ohci_info(&trizeps4_ohci_platform_data);
pxa_set_ac97_info(NULL);
pxa_set_i2c_info(NULL);
i2c_register_board_info(0, trizeps4_i2c_devices,
ARRAY_SIZE(trizeps4_i2c_devices));
/* this is the reset value */
trizeps_conxs_bcr = 0x00A0;
BCR_writew(trizeps_conxs_bcr);
board_backlight_power(1);
}
static void __init trizeps4_map_io(void)
{
pxa27x_map_io();
iotable_init(trizeps4_io_desc, ARRAY_SIZE(trizeps4_io_desc));
if ((__raw_readl(MSC0) & 0x8) && (__raw_readl(BOOT_DEF) & 0x1)) {
/* if flash is 16 bit wide its a Trizeps4 WL */
__machine_arch_type = MACH_TYPE_TRIZEPS4WL;
trizeps4_flash_data[0].width = 2;
} else {
/* if flash is 32 bit wide its a Trizeps4 */
__machine_arch_type = MACH_TYPE_TRIZEPS4;
trizeps4_flash_data[0].width = 4;
}
}
MACHINE_START(TRIZEPS4, "Keith und Koep Trizeps IV module")
/* MAINTAINER("Jürgen Schindele") */
.atag_offset = 0x100,
.init_machine = trizeps4_init,
.map_io = trizeps4_map_io,
.nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.restart = pxa_restart,
MACHINE_END
MACHINE_START(TRIZEPS4WL, "Keith und Koep Trizeps IV-WL module")
/* MAINTAINER("Jürgen Schindele") */
.atag_offset = 0x100,
.init_machine = trizeps4_init,
.map_io = trizeps4_map_io,
.nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.restart = pxa_restart,
MACHINE_END
| gpl-2.0 |
blaskewitts/android_kernel_samsung_klte | drivers/scsi/arm/cumana_1.c | 5036 | 7936 | /*
* Generic Generic NCR5380 driver
*
* Copyright 1995-2002, Russell King
*/
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/blkdev.h>
#include <linux/init.h>
#include <asm/ecard.h>
#include <asm/io.h>
#include "../scsi.h"
#include <scsi/scsi_host.h>
#include <scsi/scsicam.h>
#define AUTOSENSE
#define PSEUDO_DMA
#define CUMANASCSI_PUBLIC_RELEASE 1
#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
#define NCR5380_local_declare() struct Scsi_Host *_instance
#define NCR5380_setup(instance) _instance = instance
#define NCR5380_read(reg) cumanascsi_read(_instance, reg)
#define NCR5380_write(reg, value) cumanascsi_write(_instance, reg, value)
#define NCR5380_intr cumanascsi_intr
#define NCR5380_queue_command cumanascsi_queue_command
#define NCR5380_proc_info cumanascsi_proc_info
#define NCR5380_implementation_fields \
unsigned ctrl; \
void __iomem *base; \
void __iomem *dma
#define BOARD_NORMAL 0
#define BOARD_NCR53C400 1
#include "../NCR5380.h"
void cumanascsi_setup(char *str, int *ints)
{
}
const char *cumanascsi_info(struct Scsi_Host *spnt)
{
return "";
}
#define CTRL 0x16fc
#define STAT 0x2004
#define L(v) (((v)<<16)|((v) & 0x0000ffff))
#define H(v) (((v)>>16)|((v) & 0xffff0000))
static inline int
NCR5380_pwrite(struct Scsi_Host *host, unsigned char *addr, int len)
{
unsigned long *laddr;
void __iomem *dma = priv(host)->dma + 0x2000;
if(!len) return 0;
writeb(0x02, priv(host)->base + CTRL);
laddr = (unsigned long *)addr;
while(len >= 32)
{
unsigned int status;
unsigned long v;
status = readb(priv(host)->base + STAT);
if(status & 0x80)
goto end;
if(!(status & 0x40))
continue;
v=*laddr++; writew(L(v), dma); writew(H(v), dma);
v=*laddr++; writew(L(v), dma); writew(H(v), dma);
v=*laddr++; writew(L(v), dma); writew(H(v), dma);
v=*laddr++; writew(L(v), dma); writew(H(v), dma);
v=*laddr++; writew(L(v), dma); writew(H(v), dma);
v=*laddr++; writew(L(v), dma); writew(H(v), dma);
v=*laddr++; writew(L(v), dma); writew(H(v), dma);
v=*laddr++; writew(L(v), dma); writew(H(v), dma);
len -= 32;
if(len == 0)
break;
}
addr = (unsigned char *)laddr;
writeb(0x12, priv(host)->base + CTRL);
while(len > 0)
{
unsigned int status;
status = readb(priv(host)->base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
{
writeb(*addr++, dma);
if(--len == 0)
break;
}
status = readb(priv(host)->base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
{
writeb(*addr++, dma);
if(--len == 0)
break;
}
}
end:
writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
return len;
}
static inline int
NCR5380_pread(struct Scsi_Host *host, unsigned char *addr, int len)
{
unsigned long *laddr;
void __iomem *dma = priv(host)->dma + 0x2000;
if(!len) return 0;
writeb(0x00, priv(host)->base + CTRL);
laddr = (unsigned long *)addr;
while(len >= 32)
{
unsigned int status;
status = readb(priv(host)->base + STAT);
if(status & 0x80)
goto end;
if(!(status & 0x40))
continue;
*laddr++ = readw(dma) | (readw(dma) << 16);
*laddr++ = readw(dma) | (readw(dma) << 16);
*laddr++ = readw(dma) | (readw(dma) << 16);
*laddr++ = readw(dma) | (readw(dma) << 16);
*laddr++ = readw(dma) | (readw(dma) << 16);
*laddr++ = readw(dma) | (readw(dma) << 16);
*laddr++ = readw(dma) | (readw(dma) << 16);
*laddr++ = readw(dma) | (readw(dma) << 16);
len -= 32;
if(len == 0)
break;
}
addr = (unsigned char *)laddr;
writeb(0x10, priv(host)->base + CTRL);
while(len > 0)
{
unsigned int status;
status = readb(priv(host)->base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
{
*addr++ = readb(dma);
if(--len == 0)
break;
}
status = readb(priv(host)->base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
{
*addr++ = readb(dma);
if(--len == 0)
break;
}
}
end:
writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
return len;
}
static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg)
{
void __iomem *base = priv(host)->base;
unsigned char val;
writeb(0, base + CTRL);
val = readb(base + 0x2100 + (reg << 2));
priv(host)->ctrl = 0x40;
writeb(0x40, base + CTRL);
return val;
}
static void cumanascsi_write(struct Scsi_Host *host, unsigned int reg, unsigned int value)
{
void __iomem *base = priv(host)->base;
writeb(0, base + CTRL);
writeb(value, base + 0x2100 + (reg << 2));
priv(host)->ctrl = 0x40;
writeb(0x40, base + CTRL);
}
#include "../NCR5380.c"
static struct scsi_host_template cumanascsi_template = {
.module = THIS_MODULE,
.name = "Cumana 16-bit SCSI",
.info = cumanascsi_info,
.queuecommand = cumanascsi_queue_command,
.eh_abort_handler = NCR5380_abort,
.eh_bus_reset_handler = NCR5380_bus_reset,
.can_queue = 16,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.use_clustering = DISABLE_CLUSTERING,
.proc_name = "CumanaSCSI-1",
};
static int __devinit
cumanascsi1_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct Scsi_Host *host;
int ret;
ret = ecard_request_resources(ec);
if (ret)
goto out;
host = scsi_host_alloc(&cumanascsi_template, sizeof(struct NCR5380_hostdata));
if (!host) {
ret = -ENOMEM;
goto out_release;
}
priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
ecard_resource_len(ec, ECARD_RES_IOCSLOW));
priv(host)->dma = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
ecard_resource_len(ec, ECARD_RES_MEMC));
if (!priv(host)->base || !priv(host)->dma) {
ret = -ENOMEM;
goto out_unmap;
}
host->irq = ec->irq;
NCR5380_init(host, 0);
priv(host)->ctrl = 0;
writeb(0, priv(host)->base + CTRL);
host->n_io_port = 255;
if (!(request_region(host->io_port, host->n_io_port, "CumanaSCSI-1"))) {
ret = -EBUSY;
goto out_unmap;
}
ret = request_irq(host->irq, cumanascsi_intr, IRQF_DISABLED,
"CumanaSCSI-1", host);
if (ret) {
printk("scsi%d: IRQ%d not free: %d\n",
host->host_no, host->irq, ret);
goto out_unmap;
}
printk("scsi%d: at port 0x%08lx irq %d",
host->host_no, host->io_port, host->irq);
printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
host->can_queue, host->cmd_per_lun, CUMANASCSI_PUBLIC_RELEASE);
printk("\nscsi%d:", host->host_no);
NCR5380_print_options(host);
printk("\n");
ret = scsi_add_host(host, &ec->dev);
if (ret)
goto out_free_irq;
scsi_scan_host(host);
goto out;
out_free_irq:
free_irq(host->irq, host);
out_unmap:
iounmap(priv(host)->base);
iounmap(priv(host)->dma);
scsi_host_put(host);
out_release:
ecard_release_resources(ec);
out:
return ret;
}
static void __devexit cumanascsi1_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
ecard_set_drvdata(ec, NULL);
scsi_remove_host(host);
free_irq(host->irq, host);
NCR5380_exit(host);
iounmap(priv(host)->base);
iounmap(priv(host)->dma);
scsi_host_put(host);
ecard_release_resources(ec);
}
static const struct ecard_id cumanascsi1_cids[] = {
{ MANU_CUMANA, PROD_CUMANA_SCSI_1 },
{ 0xffff, 0xffff }
};
static struct ecard_driver cumanascsi1_driver = {
.probe = cumanascsi1_probe,
.remove = __devexit_p(cumanascsi1_remove),
.id_table = cumanascsi1_cids,
.drv = {
.name = "cumanascsi1",
},
};
static int __init cumanascsi_init(void)
{
return ecard_register_driver(&cumanascsi1_driver);
}
static void __exit cumanascsi_exit(void)
{
ecard_remove_driver(&cumanascsi1_driver);
}
module_init(cumanascsi_init);
module_exit(cumanascsi_exit);
MODULE_DESCRIPTION("Cumana SCSI-1 driver for Acorn machines");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Fagulhas/android_kernel_huawei_u8815 | arch/unicore32/kernel/hibernate.c | 6828 | 3524 | /*
* linux/arch/unicore32/kernel/hibernate.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
* Copyright (C) 2001-2010 Guan Xuetao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/gfp.h>
#include <linux/suspend.h>
#include <linux/bootmem.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/suspend.h>
#include "mach/pm.h"
/* Pointer to the temporary resume page tables */
pgd_t *resume_pg_dir;
struct swsusp_arch_regs swsusp_arch_regs_cpu0;
/*
* Create a middle page table on a resume-safe page and put a pointer to it in
* the given global directory entry. This only returns the gd entry
* in non-PAE compilation mode, since the middle layer is folded.
*/
static pmd_t *resume_one_md_table_init(pgd_t *pgd)
{
pud_t *pud;
pmd_t *pmd_table;
pud = pud_offset(pgd, 0);
pmd_table = pmd_offset(pud, 0);
return pmd_table;
}
/*
* Create a page table on a resume-safe page and place a pointer to it in
* a middle page directory entry.
*/
static pte_t *resume_one_page_table_init(pmd_t *pmd)
{
if (pmd_none(*pmd)) {
pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
if (!page_table)
return NULL;
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_KERNEL_TABLE));
BUG_ON(page_table != pte_offset_kernel(pmd, 0));
return page_table;
}
return pte_offset_kernel(pmd, 0);
}
/*
* This maps the physical memory to kernel virtual address space, a total
* of max_low_pfn pages, by creating page tables starting from address
* PAGE_OFFSET. The page tables are allocated out of resume-safe pages.
*/
static int resume_physical_mapping_init(pgd_t *pgd_base)
{
unsigned long pfn;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
int pgd_idx, pmd_idx;
pgd_idx = pgd_index(PAGE_OFFSET);
pgd = pgd_base + pgd_idx;
pfn = 0;
for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
pmd = resume_one_md_table_init(pgd);
if (!pmd)
return -ENOMEM;
if (pfn >= max_low_pfn)
continue;
for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
pte_t *max_pte;
if (pfn >= max_low_pfn)
break;
/* Map with normal page tables.
* NOTE: We can mark everything as executable here
*/
pte = resume_one_page_table_init(pmd);
if (!pte)
return -ENOMEM;
max_pte = pte + PTRS_PER_PTE;
for (; pte < max_pte; pte++, pfn++) {
if (pfn >= max_low_pfn)
break;
set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
}
}
}
return 0;
}
static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
{
}
int swsusp_arch_resume(void)
{
int error;
resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!resume_pg_dir)
return -ENOMEM;
resume_init_first_level_page_table(resume_pg_dir);
error = resume_physical_mapping_init(resume_pg_dir);
if (error)
return error;
/* We have got enough memory and from now on we cannot recover */
restore_image(resume_pg_dir, restore_pblist);
return 0;
}
/*
* pfn_is_nosave - check if given pfn is in the 'nosave' section
*/
int pfn_is_nosave(unsigned long pfn)
{
unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
return (pfn >= begin_pfn) && (pfn < end_pfn);
}
void save_processor_state(void)
{
}
void restore_processor_state(void)
{
local_flush_tlb_all();
}
| gpl-2.0 |
Split-Screen/android_kernel_lge_g3 | drivers/staging/vt6656/tcrc.c | 9132 | 6673 | /*
* Copyright (c) 2003 VIA Networking, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: tcrc.c
*
* Purpose: Implement functions to caculate CRC
*
* Author: Tevin Chen
*
* Date: May 21, 1996
*
* Functions:
* CRCdwCrc32 -
* CRCdwGetCrc32 -
* CRCdwGetCrc32Ex -
*
* Revision History:
*
*/
#include "tcrc.h"
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
/* 32-bit CRC table */
static const DWORD s_adwCrc32Table[256] = {
0x00000000L, 0x77073096L, 0xEE0E612CL, 0x990951BAL,
0x076DC419L, 0x706AF48FL, 0xE963A535L, 0x9E6495A3L,
0x0EDB8832L, 0x79DCB8A4L, 0xE0D5E91EL, 0x97D2D988L,
0x09B64C2BL, 0x7EB17CBDL, 0xE7B82D07L, 0x90BF1D91L,
0x1DB71064L, 0x6AB020F2L, 0xF3B97148L, 0x84BE41DEL,
0x1ADAD47DL, 0x6DDDE4EBL, 0xF4D4B551L, 0x83D385C7L,
0x136C9856L, 0x646BA8C0L, 0xFD62F97AL, 0x8A65C9ECL,
0x14015C4FL, 0x63066CD9L, 0xFA0F3D63L, 0x8D080DF5L,
0x3B6E20C8L, 0x4C69105EL, 0xD56041E4L, 0xA2677172L,
0x3C03E4D1L, 0x4B04D447L, 0xD20D85FDL, 0xA50AB56BL,
0x35B5A8FAL, 0x42B2986CL, 0xDBBBC9D6L, 0xACBCF940L,
0x32D86CE3L, 0x45DF5C75L, 0xDCD60DCFL, 0xABD13D59L,
0x26D930ACL, 0x51DE003AL, 0xC8D75180L, 0xBFD06116L,
0x21B4F4B5L, 0x56B3C423L, 0xCFBA9599L, 0xB8BDA50FL,
0x2802B89EL, 0x5F058808L, 0xC60CD9B2L, 0xB10BE924L,
0x2F6F7C87L, 0x58684C11L, 0xC1611DABL, 0xB6662D3DL,
0x76DC4190L, 0x01DB7106L, 0x98D220BCL, 0xEFD5102AL,
0x71B18589L, 0x06B6B51FL, 0x9FBFE4A5L, 0xE8B8D433L,
0x7807C9A2L, 0x0F00F934L, 0x9609A88EL, 0xE10E9818L,
0x7F6A0DBBL, 0x086D3D2DL, 0x91646C97L, 0xE6635C01L,
0x6B6B51F4L, 0x1C6C6162L, 0x856530D8L, 0xF262004EL,
0x6C0695EDL, 0x1B01A57BL, 0x8208F4C1L, 0xF50FC457L,
0x65B0D9C6L, 0x12B7E950L, 0x8BBEB8EAL, 0xFCB9887CL,
0x62DD1DDFL, 0x15DA2D49L, 0x8CD37CF3L, 0xFBD44C65L,
0x4DB26158L, 0x3AB551CEL, 0xA3BC0074L, 0xD4BB30E2L,
0x4ADFA541L, 0x3DD895D7L, 0xA4D1C46DL, 0xD3D6F4FBL,
0x4369E96AL, 0x346ED9FCL, 0xAD678846L, 0xDA60B8D0L,
0x44042D73L, 0x33031DE5L, 0xAA0A4C5FL, 0xDD0D7CC9L,
0x5005713CL, 0x270241AAL, 0xBE0B1010L, 0xC90C2086L,
0x5768B525L, 0x206F85B3L, 0xB966D409L, 0xCE61E49FL,
0x5EDEF90EL, 0x29D9C998L, 0xB0D09822L, 0xC7D7A8B4L,
0x59B33D17L, 0x2EB40D81L, 0xB7BD5C3BL, 0xC0BA6CADL,
0xEDB88320L, 0x9ABFB3B6L, 0x03B6E20CL, 0x74B1D29AL,
0xEAD54739L, 0x9DD277AFL, 0x04DB2615L, 0x73DC1683L,
0xE3630B12L, 0x94643B84L, 0x0D6D6A3EL, 0x7A6A5AA8L,
0xE40ECF0BL, 0x9309FF9DL, 0x0A00AE27L, 0x7D079EB1L,
0xF00F9344L, 0x8708A3D2L, 0x1E01F268L, 0x6906C2FEL,
0xF762575DL, 0x806567CBL, 0x196C3671L, 0x6E6B06E7L,
0xFED41B76L, 0x89D32BE0L, 0x10DA7A5AL, 0x67DD4ACCL,
0xF9B9DF6FL, 0x8EBEEFF9L, 0x17B7BE43L, 0x60B08ED5L,
0xD6D6A3E8L, 0xA1D1937EL, 0x38D8C2C4L, 0x4FDFF252L,
0xD1BB67F1L, 0xA6BC5767L, 0x3FB506DDL, 0x48B2364BL,
0xD80D2BDAL, 0xAF0A1B4CL, 0x36034AF6L, 0x41047A60L,
0xDF60EFC3L, 0xA867DF55L, 0x316E8EEFL, 0x4669BE79L,
0xCB61B38CL, 0xBC66831AL, 0x256FD2A0L, 0x5268E236L,
0xCC0C7795L, 0xBB0B4703L, 0x220216B9L, 0x5505262FL,
0xC5BA3BBEL, 0xB2BD0B28L, 0x2BB45A92L, 0x5CB36A04L,
0xC2D7FFA7L, 0xB5D0CF31L, 0x2CD99E8BL, 0x5BDEAE1DL,
0x9B64C2B0L, 0xEC63F226L, 0x756AA39CL, 0x026D930AL,
0x9C0906A9L, 0xEB0E363FL, 0x72076785L, 0x05005713L,
0x95BF4A82L, 0xE2B87A14L, 0x7BB12BAEL, 0x0CB61B38L,
0x92D28E9BL, 0xE5D5BE0DL, 0x7CDCEFB7L, 0x0BDBDF21L,
0x86D3D2D4L, 0xF1D4E242L, 0x68DDB3F8L, 0x1FDA836EL,
0x81BE16CDL, 0xF6B9265BL, 0x6FB077E1L, 0x18B74777L,
0x88085AE6L, 0xFF0F6A70L, 0x66063BCAL, 0x11010B5CL,
0x8F659EFFL, 0xF862AE69L, 0x616BFFD3L, 0x166CCF45L,
0xA00AE278L, 0xD70DD2EEL, 0x4E048354L, 0x3903B3C2L,
0xA7672661L, 0xD06016F7L, 0x4969474DL, 0x3E6E77DBL,
0xAED16A4AL, 0xD9D65ADCL, 0x40DF0B66L, 0x37D83BF0L,
0xA9BCAE53L, 0xDEBB9EC5L, 0x47B2CF7FL, 0x30B5FFE9L,
0xBDBDF21CL, 0xCABAC28AL, 0x53B39330L, 0x24B4A3A6L,
0xBAD03605L, 0xCDD70693L, 0x54DE5729L, 0x23D967BFL,
0xB3667A2EL, 0xC4614AB8L, 0x5D681B02L, 0x2A6F2B94L,
0xB40BBE37L, 0xC30C8EA1L, 0x5A05DF1BL, 0x2D02EF8DL
};
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*+
*
* Description:
* Generate a CRC-32 from the data stream
*
* Parameters:
* In:
* pbyData - the data stream
* cbByte - the length of the stream
* dwCrcSeed - Seed for CRC32
* Out:
* none
*
* Return Value: CRC-32
*
-*/
DWORD CRCdwCrc32(PBYTE pbyData, unsigned int cbByte, DWORD dwCrcSeed)
{
DWORD dwCrc;
dwCrc = dwCrcSeed;
while (cbByte--) {
dwCrc = s_adwCrc32Table[(BYTE)((dwCrc ^ (*pbyData)) & 0xFF)] ^
(dwCrc >> 8);
pbyData++;
}
return dwCrc;
}
/*+
*
* Description:
* To test CRC generator, input 8 bytes packet
* -- 0xff 0xff 0xff 0xff 0x00 0x00 0x00 0x00
* the generated CRC should be
* -- 0xff 0xff 0xff 0xff
*
* Parameters:
* In:
* pbyData - the data stream
* cbByte - the length of the stream
* Out:
* none
*
* Return Value: CRC-32
*
-*/
DWORD CRCdwGetCrc32(PBYTE pbyData, unsigned int cbByte)
{
return ~CRCdwCrc32(pbyData, cbByte, 0xFFFFFFFFL);
}
/*+
*
* Description:
*
* NOTE.... Because CRCdwGetCrc32Ex() is an iteration function,
* this means we will use the output of CRCdwGetCrc32Ex()
* to be a new argument to do next CRCdwGetCrc32Ex() calculation.
* Thus, the final result must be inverted to be the
* correct answer.
*
* Parameters:
* In:
* pbyData - the data stream
* cbByte - the length of the stream
* Out:
* none
*
* Return Value: CRC-32
*
-*/
DWORD CRCdwGetCrc32Ex(PBYTE pbyData, unsigned int cbByte, DWORD dwPreCRC)
{
return CRCdwCrc32(pbyData, cbByte, dwPreCRC);
}
| gpl-2.0 |
tpmullan/android_kernel_asus_tf700 | drivers/media/video/pvrusb2/pvrusb2-ctrl.c | 11692 | 14185 | /*
*
*
* Copyright (C) 2005 Mike Isely <isely@pobox.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include "pvrusb2-ctrl.h"
#include "pvrusb2-hdw-internal.h"
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mutex.h>
static int pvr2_ctrl_range_check(struct pvr2_ctrl *cptr,int val)
{
if (cptr->info->check_value) {
if (!cptr->info->check_value(cptr,val)) return -ERANGE;
} else if (cptr->info->type == pvr2_ctl_enum) {
if (val < 0) return -ERANGE;
if (val >= cptr->info->def.type_enum.count) return -ERANGE;
} else {
int lim;
lim = cptr->info->def.type_int.min_value;
if (cptr->info->get_min_value) {
cptr->info->get_min_value(cptr,&lim);
}
if (val < lim) return -ERANGE;
lim = cptr->info->def.type_int.max_value;
if (cptr->info->get_max_value) {
cptr->info->get_max_value(cptr,&lim);
}
if (val > lim) return -ERANGE;
}
return 0;
}
/* Set the given control. */
int pvr2_ctrl_set_value(struct pvr2_ctrl *cptr,int val)
{
return pvr2_ctrl_set_mask_value(cptr,~0,val);
}
/* Set/clear specific bits of the given control. */
int pvr2_ctrl_set_mask_value(struct pvr2_ctrl *cptr,int mask,int val)
{
int ret = 0;
if (!cptr) return -EINVAL;
LOCK_TAKE(cptr->hdw->big_lock); do {
if (cptr->info->set_value) {
if (cptr->info->type == pvr2_ctl_bitmask) {
mask &= cptr->info->def.type_bitmask.valid_bits;
} else if ((cptr->info->type == pvr2_ctl_int)||
(cptr->info->type == pvr2_ctl_enum)) {
ret = pvr2_ctrl_range_check(cptr,val);
if (ret < 0) break;
} else if (cptr->info->type != pvr2_ctl_bool) {
break;
}
ret = cptr->info->set_value(cptr,mask,val);
} else {
ret = -EPERM;
}
} while(0); LOCK_GIVE(cptr->hdw->big_lock);
return ret;
}
/* Get the current value of the given control. */
int pvr2_ctrl_get_value(struct pvr2_ctrl *cptr,int *valptr)
{
int ret = 0;
if (!cptr) return -EINVAL;
LOCK_TAKE(cptr->hdw->big_lock); do {
ret = cptr->info->get_value(cptr,valptr);
} while(0); LOCK_GIVE(cptr->hdw->big_lock);
return ret;
}
/* Retrieve control's type */
enum pvr2_ctl_type pvr2_ctrl_get_type(struct pvr2_ctrl *cptr)
{
if (!cptr) return pvr2_ctl_int;
return cptr->info->type;
}
/* Retrieve control's maximum value (int type) */
int pvr2_ctrl_get_max(struct pvr2_ctrl *cptr)
{
int ret = 0;
if (!cptr) return 0;
LOCK_TAKE(cptr->hdw->big_lock); do {
if (cptr->info->get_max_value) {
cptr->info->get_max_value(cptr,&ret);
} else if (cptr->info->type == pvr2_ctl_int) {
ret = cptr->info->def.type_int.max_value;
}
} while(0); LOCK_GIVE(cptr->hdw->big_lock);
return ret;
}
/* Retrieve control's minimum value (int type) */
int pvr2_ctrl_get_min(struct pvr2_ctrl *cptr)
{
int ret = 0;
if (!cptr) return 0;
LOCK_TAKE(cptr->hdw->big_lock); do {
if (cptr->info->get_min_value) {
cptr->info->get_min_value(cptr,&ret);
} else if (cptr->info->type == pvr2_ctl_int) {
ret = cptr->info->def.type_int.min_value;
}
} while(0); LOCK_GIVE(cptr->hdw->big_lock);
return ret;
}
/* Retrieve control's default value (any type) */
int pvr2_ctrl_get_def(struct pvr2_ctrl *cptr, int *valptr)
{
int ret = 0;
if (!cptr) return -EINVAL;
LOCK_TAKE(cptr->hdw->big_lock); do {
if (cptr->info->get_def_value) {
ret = cptr->info->get_def_value(cptr, valptr);
} else {
*valptr = cptr->info->default_value;
}
} while(0); LOCK_GIVE(cptr->hdw->big_lock);
return ret;
}
/* Retrieve control's enumeration count (enum only) */
int pvr2_ctrl_get_cnt(struct pvr2_ctrl *cptr)
{
int ret = 0;
if (!cptr) return 0;
LOCK_TAKE(cptr->hdw->big_lock); do {
if (cptr->info->type == pvr2_ctl_enum) {
ret = cptr->info->def.type_enum.count;
}
} while(0); LOCK_GIVE(cptr->hdw->big_lock);
return ret;
}
/* Retrieve control's valid mask bits (bit mask only) */
int pvr2_ctrl_get_mask(struct pvr2_ctrl *cptr)
{
int ret = 0;
if (!cptr) return 0;
LOCK_TAKE(cptr->hdw->big_lock); do {
if (cptr->info->type == pvr2_ctl_bitmask) {
ret = cptr->info->def.type_bitmask.valid_bits;
}
} while(0); LOCK_GIVE(cptr->hdw->big_lock);
return ret;
}
/* Retrieve the control's name */
const char *pvr2_ctrl_get_name(struct pvr2_ctrl *cptr)
{
if (!cptr) return NULL;
return cptr->info->name;
}
/* Retrieve the control's desc */
const char *pvr2_ctrl_get_desc(struct pvr2_ctrl *cptr)
{
if (!cptr) return NULL;
return cptr->info->desc;
}
/* Retrieve a control enumeration or bit mask value */
int pvr2_ctrl_get_valname(struct pvr2_ctrl *cptr,int val,
char *bptr,unsigned int bmax,
unsigned int *blen)
{
int ret = -EINVAL;
if (!cptr) return 0;
*blen = 0;
LOCK_TAKE(cptr->hdw->big_lock); do {
if (cptr->info->type == pvr2_ctl_enum) {
const char * const *names;
names = cptr->info->def.type_enum.value_names;
if (pvr2_ctrl_range_check(cptr,val) == 0) {
if (names[val]) {
*blen = scnprintf(
bptr,bmax,"%s",
names[val]);
} else {
*blen = 0;
}
ret = 0;
}
} else if (cptr->info->type == pvr2_ctl_bitmask) {
const char **names;
unsigned int idx;
int msk;
names = cptr->info->def.type_bitmask.bit_names;
val &= cptr->info->def.type_bitmask.valid_bits;
for (idx = 0, msk = 1; val; idx++, msk <<= 1) {
if (val & msk) {
*blen = scnprintf(bptr,bmax,"%s",
names[idx]);
ret = 0;
break;
}
}
}
} while(0); LOCK_GIVE(cptr->hdw->big_lock);
return ret;
}
/* Return V4L ID for this control or zero if none */
int pvr2_ctrl_get_v4lid(struct pvr2_ctrl *cptr)
{
if (!cptr) return 0;
return cptr->info->v4l_id;
}
unsigned int pvr2_ctrl_get_v4lflags(struct pvr2_ctrl *cptr)
{
unsigned int flags = 0;
if (cptr->info->get_v4lflags) {
flags = cptr->info->get_v4lflags(cptr);
}
if (cptr->info->set_value) {
flags &= ~V4L2_CTRL_FLAG_READ_ONLY;
} else {
flags |= V4L2_CTRL_FLAG_READ_ONLY;
}
return flags;
}
/* Return true if control is writable */
int pvr2_ctrl_is_writable(struct pvr2_ctrl *cptr)
{
if (!cptr) return 0;
return cptr->info->set_value != NULL;
}
/* Return true if control has custom symbolic representation */
int pvr2_ctrl_has_custom_symbols(struct pvr2_ctrl *cptr)
{
if (!cptr) return 0;
if (!cptr->info->val_to_sym) return 0;
if (!cptr->info->sym_to_val) return 0;
return !0;
}
/* Convert a given mask/val to a custom symbolic value */
int pvr2_ctrl_custom_value_to_sym(struct pvr2_ctrl *cptr,
int mask,int val,
char *buf,unsigned int maxlen,
unsigned int *len)
{
if (!cptr) return -EINVAL;
if (!cptr->info->val_to_sym) return -EINVAL;
return cptr->info->val_to_sym(cptr,mask,val,buf,maxlen,len);
}
/* Convert a symbolic value to a mask/value pair */
int pvr2_ctrl_custom_sym_to_value(struct pvr2_ctrl *cptr,
const char *buf,unsigned int len,
int *maskptr,int *valptr)
{
if (!cptr) return -EINVAL;
if (!cptr->info->sym_to_val) return -EINVAL;
return cptr->info->sym_to_val(cptr,buf,len,maskptr,valptr);
}
static unsigned int gen_bitmask_string(int msk,int val,int msk_only,
const char **names,
char *ptr,unsigned int len)
{
unsigned int idx;
long sm,um;
int spcFl;
unsigned int uc,cnt;
const char *idStr;
spcFl = 0;
uc = 0;
um = 0;
for (idx = 0, sm = 1; msk; idx++, sm <<= 1) {
if (sm & msk) {
msk &= ~sm;
idStr = names[idx];
if (idStr) {
cnt = scnprintf(ptr,len,"%s%s%s",
(spcFl ? " " : ""),
(msk_only ? "" :
((val & sm) ? "+" : "-")),
idStr);
ptr += cnt; len -= cnt; uc += cnt;
spcFl = !0;
} else {
um |= sm;
}
}
}
if (um) {
if (msk_only) {
cnt = scnprintf(ptr,len,"%s0x%lx",
(spcFl ? " " : ""),
um);
ptr += cnt; len -= cnt; uc += cnt;
spcFl = !0;
} else if (um & val) {
cnt = scnprintf(ptr,len,"%s+0x%lx",
(spcFl ? " " : ""),
um & val);
ptr += cnt; len -= cnt; uc += cnt;
spcFl = !0;
} else if (um & ~val) {
cnt = scnprintf(ptr,len,"%s+0x%lx",
(spcFl ? " " : ""),
um & ~val);
ptr += cnt; len -= cnt; uc += cnt;
spcFl = !0;
}
}
return uc;
}
static const char *boolNames[] = {
"false",
"true",
"no",
"yes",
};
static int parse_token(const char *ptr,unsigned int len,
int *valptr,
const char * const *names, unsigned int namecnt)
{
char buf[33];
unsigned int slen;
unsigned int idx;
int negfl;
char *p2;
*valptr = 0;
if (!names) namecnt = 0;
for (idx = 0; idx < namecnt; idx++) {
if (!names[idx]) continue;
slen = strlen(names[idx]);
if (slen != len) continue;
if (memcmp(names[idx],ptr,slen)) continue;
*valptr = idx;
return 0;
}
negfl = 0;
if ((*ptr == '-') || (*ptr == '+')) {
negfl = (*ptr == '-');
ptr++; len--;
}
if (len >= sizeof(buf)) return -EINVAL;
memcpy(buf,ptr,len);
buf[len] = 0;
*valptr = simple_strtol(buf,&p2,0);
if (negfl) *valptr = -(*valptr);
if (*p2) return -EINVAL;
return 1;
}
static int parse_mtoken(const char *ptr,unsigned int len,
int *valptr,
const char **names,int valid_bits)
{
char buf[33];
unsigned int slen;
unsigned int idx;
char *p2;
int msk;
*valptr = 0;
for (idx = 0, msk = 1; valid_bits; idx++, msk <<= 1) {
if (!(msk & valid_bits)) continue;
valid_bits &= ~msk;
if (!names[idx]) continue;
slen = strlen(names[idx]);
if (slen != len) continue;
if (memcmp(names[idx],ptr,slen)) continue;
*valptr = msk;
return 0;
}
if (len >= sizeof(buf)) return -EINVAL;
memcpy(buf,ptr,len);
buf[len] = 0;
*valptr = simple_strtol(buf,&p2,0);
if (*p2) return -EINVAL;
return 0;
}
static int parse_tlist(const char *ptr,unsigned int len,
int *maskptr,int *valptr,
const char **names,int valid_bits)
{
unsigned int cnt;
int mask,val,kv,mode,ret;
mask = 0;
val = 0;
ret = 0;
while (len) {
cnt = 0;
while ((cnt < len) &&
((ptr[cnt] <= 32) ||
(ptr[cnt] >= 127))) cnt++;
ptr += cnt;
len -= cnt;
mode = 0;
if ((*ptr == '-') || (*ptr == '+')) {
mode = (*ptr == '-') ? -1 : 1;
ptr++;
len--;
}
cnt = 0;
while (cnt < len) {
if (ptr[cnt] <= 32) break;
if (ptr[cnt] >= 127) break;
cnt++;
}
if (!cnt) break;
if (parse_mtoken(ptr,cnt,&kv,names,valid_bits)) {
ret = -EINVAL;
break;
}
ptr += cnt;
len -= cnt;
switch (mode) {
case 0:
mask = valid_bits;
val |= kv;
break;
case -1:
mask |= kv;
val &= ~kv;
break;
case 1:
mask |= kv;
val |= kv;
break;
default:
break;
}
}
*maskptr = mask;
*valptr = val;
return ret;
}
/* Convert a symbolic value to a mask/value pair */
int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr,
const char *ptr,unsigned int len,
int *maskptr,int *valptr)
{
int ret = -EINVAL;
unsigned int cnt;
*maskptr = 0;
*valptr = 0;
cnt = 0;
while ((cnt < len) && ((ptr[cnt] <= 32) || (ptr[cnt] >= 127))) cnt++;
len -= cnt; ptr += cnt;
cnt = 0;
while ((cnt < len) && ((ptr[len-(cnt+1)] <= 32) ||
(ptr[len-(cnt+1)] >= 127))) cnt++;
len -= cnt;
if (!len) return -EINVAL;
LOCK_TAKE(cptr->hdw->big_lock); do {
if (cptr->info->type == pvr2_ctl_int) {
ret = parse_token(ptr,len,valptr,NULL,0);
if (ret >= 0) {
ret = pvr2_ctrl_range_check(cptr,*valptr);
}
*maskptr = ~0;
} else if (cptr->info->type == pvr2_ctl_bool) {
ret = parse_token(ptr,len,valptr,boolNames,
ARRAY_SIZE(boolNames));
if (ret == 1) {
*valptr = *valptr ? !0 : 0;
} else if (ret == 0) {
*valptr = (*valptr & 1) ? !0 : 0;
}
*maskptr = 1;
} else if (cptr->info->type == pvr2_ctl_enum) {
ret = parse_token(
ptr,len,valptr,
cptr->info->def.type_enum.value_names,
cptr->info->def.type_enum.count);
if (ret >= 0) {
ret = pvr2_ctrl_range_check(cptr,*valptr);
}
*maskptr = ~0;
} else if (cptr->info->type == pvr2_ctl_bitmask) {
ret = parse_tlist(
ptr,len,maskptr,valptr,
cptr->info->def.type_bitmask.bit_names,
cptr->info->def.type_bitmask.valid_bits);
}
} while(0); LOCK_GIVE(cptr->hdw->big_lock);
return ret;
}
/* Convert a given mask/val to a symbolic value */
int pvr2_ctrl_value_to_sym_internal(struct pvr2_ctrl *cptr,
int mask,int val,
char *buf,unsigned int maxlen,
unsigned int *len)
{
int ret = -EINVAL;
*len = 0;
if (cptr->info->type == pvr2_ctl_int) {
*len = scnprintf(buf,maxlen,"%d",val);
ret = 0;
} else if (cptr->info->type == pvr2_ctl_bool) {
*len = scnprintf(buf,maxlen,"%s",val ? "true" : "false");
ret = 0;
} else if (cptr->info->type == pvr2_ctl_enum) {
const char * const *names;
names = cptr->info->def.type_enum.value_names;
if ((val >= 0) &&
(val < cptr->info->def.type_enum.count)) {
if (names[val]) {
*len = scnprintf(
buf,maxlen,"%s",
names[val]);
} else {
*len = 0;
}
ret = 0;
}
} else if (cptr->info->type == pvr2_ctl_bitmask) {
*len = gen_bitmask_string(
val & mask & cptr->info->def.type_bitmask.valid_bits,
~0,!0,
cptr->info->def.type_bitmask.bit_names,
buf,maxlen);
}
return ret;
}
/* Convert a given mask/val to a symbolic value */
int pvr2_ctrl_value_to_sym(struct pvr2_ctrl *cptr,
int mask,int val,
char *buf,unsigned int maxlen,
unsigned int *len)
{
int ret;
LOCK_TAKE(cptr->hdw->big_lock); do {
ret = pvr2_ctrl_value_to_sym_internal(cptr,mask,val,
buf,maxlen,len);
} while(0); LOCK_GIVE(cptr->hdw->big_lock);
return ret;
}
/*
Stuff for Emacs to see, in order to encourage consistent editing style:
*** Local Variables: ***
*** mode: c ***
*** fill-column: 75 ***
*** tab-width: 8 ***
*** c-basic-offset: 8 ***
*** End: ***
*/
| gpl-2.0 |
andreturket/kernel_asus_moorefield_stock | net/ipv4/netfilter/ipt_ah.c | 12972 | 2333 | /* Kernel module to match AH parameters. */
/* (C) 1999-2000 Yon Uriarte <yon@astaro.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/netfilter_ipv4/ipt_ah.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yon Uriarte <yon@astaro.de>");
MODULE_DESCRIPTION("Xtables: IPv4 IPsec-AH SPI match");
/* Returns 1 if the spi is matched by the range, 0 otherwise */
static inline bool
spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
{
bool r;
pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
invert ? '!' : ' ', min, spi, max);
r=(spi >= min && spi <= max) ^ invert;
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
return r;
}
static bool ah_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ip_auth_hdr _ahdr;
const struct ip_auth_hdr *ah;
const struct ipt_ah *ahinfo = par->matchinfo;
/* Must not be a fragment. */
if (par->fragoff != 0)
return false;
ah = skb_header_pointer(skb, par->thoff, sizeof(_ahdr), &_ahdr);
if (ah == NULL) {
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
pr_debug("Dropping evil AH tinygram.\n");
par->hotdrop = true;
return 0;
}
return spi_match(ahinfo->spis[0], ahinfo->spis[1],
ntohl(ah->spi),
!!(ahinfo->invflags & IPT_AH_INV_SPI));
}
static int ah_mt_check(const struct xt_mtchk_param *par)
{
const struct ipt_ah *ahinfo = par->matchinfo;
/* Must specify no unknown invflags */
if (ahinfo->invflags & ~IPT_AH_INV_MASK) {
pr_debug("unknown flags %X\n", ahinfo->invflags);
return -EINVAL;
}
return 0;
}
static struct xt_match ah_mt_reg __read_mostly = {
.name = "ah",
.family = NFPROTO_IPV4,
.match = ah_mt,
.matchsize = sizeof(struct ipt_ah),
.proto = IPPROTO_AH,
.checkentry = ah_mt_check,
.me = THIS_MODULE,
};
static int __init ah_mt_init(void)
{
return xt_register_match(&ah_mt_reg);
}
static void __exit ah_mt_exit(void)
{
xt_unregister_match(&ah_mt_reg);
}
module_init(ah_mt_init);
module_exit(ah_mt_exit);
| gpl-2.0 |
n3ocort3x/kernel_oppo_msm8974 | arch/sh/mm/cache-sh3.c | 12972 | 2611 | /*
* arch/sh/mm/cache-sh3.c
*
* Copyright (C) 1999, 2000 Niibe Yutaka
* Copyright (C) 2002 Paul Mundt
*
* Released under the terms of the GNU GPL v2.0.
*/
#include <linux/init.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/threads.h>
#include <asm/addrspace.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
/*
* Write back the dirty D-caches, but not invalidate them.
*
* Is this really worth it, or should we just alias this routine
* to __flush_purge_region too?
*
* START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region.
*/
static void sh3__flush_wback_region(void *start, int size)
{
unsigned long v, j;
unsigned long begin, end;
unsigned long flags;
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
unsigned long addrstart = CACHE_OC_ADDRESS_ARRAY;
for (j = 0; j < current_cpu_data.dcache.ways; j++) {
unsigned long data, addr, p;
p = __pa(v);
addr = addrstart | (v & current_cpu_data.dcache.entry_mask);
local_irq_save(flags);
data = __raw_readl(addr);
if ((data & CACHE_PHYSADDR_MASK) ==
(p & CACHE_PHYSADDR_MASK)) {
data &= ~SH_CACHE_UPDATED;
__raw_writel(data, addr);
local_irq_restore(flags);
break;
}
local_irq_restore(flags);
addrstart += current_cpu_data.dcache.way_incr;
}
}
}
/*
* Write back the dirty D-caches and invalidate them.
*
* START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region.
*/
static void sh3__flush_purge_region(void *start, int size)
{
unsigned long v;
unsigned long begin, end;
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
unsigned long data, addr;
data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */
addr = CACHE_OC_ADDRESS_ARRAY |
(v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC;
__raw_writel(data, addr);
}
}
void __init sh3_cache_init(void)
{
__flush_wback_region = sh3__flush_wback_region;
__flush_purge_region = sh3__flush_purge_region;
/*
* No write back please
*
* Except I don't think there's any way to avoid the writeback.
* So we just alias it to sh3__flush_purge_region(). dwmw2.
*/
__flush_invalidate_region = sh3__flush_purge_region;
}
| gpl-2.0 |
KutuSystems/linux | arch/x86/kernel/sysfb_efi.c | 429 | 9167 | /*
* Generic System Framebuffers on x86
* Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
*
* EFI Quirks Copyright (c) 2006 Edgar Hucek <gimli@dark-green.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
/*
* EFI Quirks
* Several EFI systems do not correctly advertise their boot framebuffers.
* Hence, we use this static table of known broken machines and fix up the
* information so framebuffer drivers can load corectly.
*/
#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/screen_info.h>
#include <video/vga.h>
#include <asm/sysfb.h>
enum {
OVERRIDE_NONE = 0x0,
OVERRIDE_BASE = 0x1,
OVERRIDE_STRIDE = 0x2,
OVERRIDE_HEIGHT = 0x4,
OVERRIDE_WIDTH = 0x8,
};
struct efifb_dmi_info efifb_dmi_list[] = {
[M_I17] = { "i17", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
[M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050, OVERRIDE_NONE }, /* guess */
[M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050, OVERRIDE_NONE },
[M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE }, /* guess */
[M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
[M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080, OVERRIDE_NONE },
[M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440, OVERRIDE_NONE },
[M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768, OVERRIDE_NONE },
[M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768, OVERRIDE_NONE },
[M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
[M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
[M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
[M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
[M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
[M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
/* 11" Macbook Air 3,1 passes the wrong stride */
[M_MBA_3] = { "mba3", 0, 2048 * 4, 0, 0, OVERRIDE_STRIDE },
[M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
[M_MBP_2] = { "mbp2", 0, 0, 0, 0, OVERRIDE_NONE }, /* placeholder */
[M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
[M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
[M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
[M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
[M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
[M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
[M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
[M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050, OVERRIDE_NONE },
[M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
[M_MBP_8_2] = { "mbp82", 0x90010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
[M_UNKNOWN] = { NULL, 0, 0, 0, 0, OVERRIDE_NONE }
};
void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
{
int i;
for (i = 0; i < M_UNKNOWN; i++) {
if (efifb_dmi_list[i].base != 0 &&
!strcmp(opt, efifb_dmi_list[i].optname)) {
si->lfb_base = efifb_dmi_list[i].base;
si->lfb_linelength = efifb_dmi_list[i].stride;
si->lfb_width = efifb_dmi_list[i].width;
si->lfb_height = efifb_dmi_list[i].height;
}
}
}
#define choose_value(dmivalue, fwvalue, field, flags) ({ \
typeof(fwvalue) _ret_ = fwvalue; \
if ((flags) & (field)) \
_ret_ = dmivalue; \
else if ((fwvalue) == 0) \
_ret_ = dmivalue; \
_ret_; \
})
static int __init efifb_set_system(const struct dmi_system_id *id)
{
struct efifb_dmi_info *info = id->driver_data;
if (info->base == 0 && info->height == 0 && info->width == 0 &&
info->stride == 0)
return 0;
/* Trust the bootloader over the DMI tables */
if (screen_info.lfb_base == 0) {
#if defined(CONFIG_PCI)
struct pci_dev *dev = NULL;
int found_bar = 0;
#endif
if (info->base) {
screen_info.lfb_base = choose_value(info->base,
screen_info.lfb_base, OVERRIDE_BASE,
info->flags);
#if defined(CONFIG_PCI)
/* make sure that the address in the table is actually
* on a VGA device's PCI BAR */
for_each_pci_dev(dev) {
int i;
if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
continue;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
resource_size_t start, end;
unsigned long flags;
flags = pci_resource_flags(dev, i);
if (!(flags & IORESOURCE_MEM))
continue;
if (flags & IORESOURCE_UNSET)
continue;
if (pci_resource_len(dev, i) == 0)
continue;
start = pci_resource_start(dev, i);
end = pci_resource_end(dev, i);
if (screen_info.lfb_base >= start &&
screen_info.lfb_base < end) {
found_bar = 1;
break;
}
}
}
if (!found_bar)
screen_info.lfb_base = 0;
#endif
}
}
if (screen_info.lfb_base) {
screen_info.lfb_linelength = choose_value(info->stride,
screen_info.lfb_linelength, OVERRIDE_STRIDE,
info->flags);
screen_info.lfb_width = choose_value(info->width,
screen_info.lfb_width, OVERRIDE_WIDTH,
info->flags);
screen_info.lfb_height = choose_value(info->height,
screen_info.lfb_height, OVERRIDE_HEIGHT,
info->flags);
if (screen_info.orig_video_isVGA == 0)
screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
} else {
screen_info.lfb_linelength = 0;
screen_info.lfb_width = 0;
screen_info.lfb_height = 0;
screen_info.orig_video_isVGA = 0;
return 0;
}
printk(KERN_INFO "efifb: dmi detected %s - framebuffer at 0x%08x "
"(%dx%d, stride %d)\n", id->ident,
screen_info.lfb_base, screen_info.lfb_width,
screen_info.lfb_height, screen_info.lfb_linelength);
return 1;
}
#define EFIFB_DMI_SYSTEM_ID(vendor, name, enumid) \
{ \
efifb_set_system, \
name, \
{ \
DMI_MATCH(DMI_BIOS_VENDOR, vendor), \
DMI_MATCH(DMI_PRODUCT_NAME, name) \
}, \
&efifb_dmi_list[enumid] \
}
static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac4,1", M_I17),
/* At least one of these two will be right; maybe both? */
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac5,1", M_I20),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac5,1", M_I20),
/* At least one of these two will be right; maybe both? */
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
/* At least one of these two will be right; maybe both? */
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook2,1", M_MB),
/* At least one of these two will be right; maybe both? */
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir3,1", M_MBA_3),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro8,2", M_MBP_8_2),
{},
};
__init void sysfb_apply_efi_quirks(void)
{
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
!(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
dmi_check_system(efifb_dmi_system_table);
}
| gpl-2.0 |
austinkelleher/linux | net/ipv4/inetpeer.c | 941 | 16577 | /*
* INETPEER - A storage for permanent information about peers
*
* This source is covered by the GNU GPL, the same as all kernel sources.
*
* Authors: Andrey V. Savochkin <saw@msu.ru>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/random.h>
#include <linux/timer.h>
#include <linux/time.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/net.h>
#include <linux/workqueue.h>
#include <net/ip.h>
#include <net/inetpeer.h>
#include <net/secure_seq.h>
/*
* Theory of operations.
* We keep one entry for each peer IP address. The nodes contains long-living
* information about the peer which doesn't depend on routes.
*
* Nodes are removed only when reference counter goes to 0.
* When it's happened the node may be removed when a sufficient amount of
* time has been passed since its last use. The less-recently-used entry can
* also be removed if the pool is overloaded i.e. if the total amount of
* entries is greater-or-equal than the threshold.
*
* Node pool is organised as an AVL tree.
* Such an implementation has been chosen not just for fun. It's a way to
* prevent easy and efficient DoS attacks by creating hash collisions. A huge
* amount of long living nodes in a single hash slot would significantly delay
* lookups performed with disabled BHs.
*
* Serialisation issues.
* 1. Nodes may appear in the tree only with the pool lock held.
* 2. Nodes may disappear from the tree only with the pool lock held
* AND reference count being 0.
* 3. Global variable peer_total is modified under the pool lock.
* 4. struct inet_peer fields modification:
* avl_left, avl_right, avl_parent, avl_height: pool lock
* refcnt: atomically against modifications on other CPU;
* usually under some other lock to prevent node disappearing
* daddr: unchangeable
*/
static struct kmem_cache *peer_cachep __read_mostly;
static LIST_HEAD(gc_list);
static const int gc_delay = 60 * HZ;
static struct delayed_work gc_work;
static DEFINE_SPINLOCK(gc_lock);
#define node_height(x) x->avl_height
#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
static const struct inet_peer peer_fake_node = {
.avl_left = peer_avl_empty_rcu,
.avl_right = peer_avl_empty_rcu,
.avl_height = 0
};
void inet_peer_base_init(struct inet_peer_base *bp)
{
bp->root = peer_avl_empty_rcu;
seqlock_init(&bp->lock);
bp->total = 0;
}
EXPORT_SYMBOL_GPL(inet_peer_base_init);
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
/* Exported for sysctl_net_ipv4. */
int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
* aggressively at this stage */
int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
static void inetpeer_gc_worker(struct work_struct *work)
{
struct inet_peer *p, *n, *c;
struct list_head list;
spin_lock_bh(&gc_lock);
list_replace_init(&gc_list, &list);
spin_unlock_bh(&gc_lock);
if (list_empty(&list))
return;
list_for_each_entry_safe(p, n, &list, gc_list) {
if (need_resched())
cond_resched();
c = rcu_dereference_protected(p->avl_left, 1);
if (c != peer_avl_empty) {
list_add_tail(&c->gc_list, &list);
p->avl_left = peer_avl_empty_rcu;
}
c = rcu_dereference_protected(p->avl_right, 1);
if (c != peer_avl_empty) {
list_add_tail(&c->gc_list, &list);
p->avl_right = peer_avl_empty_rcu;
}
n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
if (!atomic_read(&p->refcnt)) {
list_del(&p->gc_list);
kmem_cache_free(peer_cachep, p);
}
}
if (list_empty(&list))
return;
spin_lock_bh(&gc_lock);
list_splice(&list, &gc_list);
spin_unlock_bh(&gc_lock);
schedule_delayed_work(&gc_work, gc_delay);
}
/* Called from ip_output.c:ip_init */
void __init inet_initpeers(void)
{
struct sysinfo si;
/* Use the straight interface to information about memory. */
si_meminfo(&si);
/* The values below were suggested by Alexey Kuznetsov
* <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values
* myself. --SAW
*/
if (si.totalram <= (32768*1024)/PAGE_SIZE)
inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
if (si.totalram <= (16384*1024)/PAGE_SIZE)
inet_peer_threshold >>= 1; /* about 512KB */
if (si.totalram <= (8192*1024)/PAGE_SIZE)
inet_peer_threshold >>= 2; /* about 128KB */
peer_cachep = kmem_cache_create("inet_peer_cache",
sizeof(struct inet_peer),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
NULL);
INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker);
}
static int addr_compare(const struct inetpeer_addr *a,
const struct inetpeer_addr *b)
{
int i, n = (a->family == AF_INET ? 1 : 4);
for (i = 0; i < n; i++) {
if (a->addr.a6[i] == b->addr.a6[i])
continue;
if ((__force u32)a->addr.a6[i] < (__force u32)b->addr.a6[i])
return -1;
return 1;
}
return 0;
}
#define rcu_deref_locked(X, BASE) \
rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
/*
* Called with local BH disabled and the pool lock held.
*/
#define lookup(_daddr, _stack, _base) \
({ \
struct inet_peer *u; \
struct inet_peer __rcu **v; \
\
stackptr = _stack; \
*stackptr++ = &_base->root; \
for (u = rcu_deref_locked(_base->root, _base); \
u != peer_avl_empty;) { \
int cmp = addr_compare(_daddr, &u->daddr); \
if (cmp == 0) \
break; \
if (cmp == -1) \
v = &u->avl_left; \
else \
v = &u->avl_right; \
*stackptr++ = v; \
u = rcu_deref_locked(*v, _base); \
} \
u; \
})
/*
* Called with rcu_read_lock()
* Because we hold no lock against a writer, its quite possible we fall
* in an endless loop.
* But every pointer we follow is guaranteed to be valid thanks to RCU.
* We exit from this function if number of links exceeds PEER_MAXDEPTH
*/
static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
struct inet_peer_base *base)
{
struct inet_peer *u = rcu_dereference(base->root);
int count = 0;
while (u != peer_avl_empty) {
int cmp = addr_compare(daddr, &u->daddr);
if (cmp == 0) {
/* Before taking a reference, check if this entry was
* deleted (refcnt=-1)
*/
if (!atomic_add_unless(&u->refcnt, 1, -1))
u = NULL;
return u;
}
if (cmp == -1)
u = rcu_dereference(u->avl_left);
else
u = rcu_dereference(u->avl_right);
if (unlikely(++count == PEER_MAXDEPTH))
break;
}
return NULL;
}
/* Called with local BH disabled and the pool lock held. */
#define lookup_rightempty(start, base) \
({ \
struct inet_peer *u; \
struct inet_peer __rcu **v; \
*stackptr++ = &start->avl_left; \
v = &start->avl_left; \
for (u = rcu_deref_locked(*v, base); \
u->avl_right != peer_avl_empty_rcu;) { \
v = &u->avl_right; \
*stackptr++ = v; \
u = rcu_deref_locked(*v, base); \
} \
u; \
})
/* Called with local BH disabled and the pool lock held.
* Variable names are the proof of operation correctness.
* Look into mm/map_avl.c for more detail description of the ideas.
*/
static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
struct inet_peer __rcu ***stackend,
struct inet_peer_base *base)
{
struct inet_peer __rcu **nodep;
struct inet_peer *node, *l, *r;
int lh, rh;
while (stackend > stack) {
nodep = *--stackend;
node = rcu_deref_locked(*nodep, base);
l = rcu_deref_locked(node->avl_left, base);
r = rcu_deref_locked(node->avl_right, base);
lh = node_height(l);
rh = node_height(r);
if (lh > rh + 1) { /* l: RH+2 */
struct inet_peer *ll, *lr, *lrl, *lrr;
int lrh;
ll = rcu_deref_locked(l->avl_left, base);
lr = rcu_deref_locked(l->avl_right, base);
lrh = node_height(lr);
if (lrh <= node_height(ll)) { /* ll: RH+1 */
RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */
RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
node->avl_height = lrh + 1; /* RH+1 or RH+2 */
RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */
RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */
l->avl_height = node->avl_height + 1;
RCU_INIT_POINTER(*nodep, l);
} else { /* ll: RH, lr: RH+1 */
lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */
RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
node->avl_height = rh + 1; /* node: RH+1 */
RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */
RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */
l->avl_height = rh + 1; /* l: RH+1 */
RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */
RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */
lr->avl_height = rh + 2;
RCU_INIT_POINTER(*nodep, lr);
}
} else if (rh > lh + 1) { /* r: LH+2 */
struct inet_peer *rr, *rl, *rlr, *rll;
int rlh;
rr = rcu_deref_locked(r->avl_right, base);
rl = rcu_deref_locked(r->avl_left, base);
rlh = node_height(rl);
if (rlh <= node_height(rr)) { /* rr: LH+1 */
RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */
RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
node->avl_height = rlh + 1; /* LH+1 or LH+2 */
RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */
RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */
r->avl_height = node->avl_height + 1;
RCU_INIT_POINTER(*nodep, r);
} else { /* rr: RH, rl: RH+1 */
rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
node->avl_height = lh + 1; /* node: LH+1 */
RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */
RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */
r->avl_height = lh + 1; /* r: LH+1 */
RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */
RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */
rl->avl_height = lh + 2;
RCU_INIT_POINTER(*nodep, rl);
}
} else {
node->avl_height = (lh > rh ? lh : rh) + 1;
}
}
}
/* Called with local BH disabled and the pool lock held. */
#define link_to_pool(n, base) \
do { \
n->avl_height = 1; \
n->avl_left = peer_avl_empty_rcu; \
n->avl_right = peer_avl_empty_rcu; \
/* lockless readers can catch us now */ \
rcu_assign_pointer(**--stackptr, n); \
peer_avl_rebalance(stack, stackptr, base); \
} while (0)
static void inetpeer_free_rcu(struct rcu_head *head)
{
kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
}
static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
struct inet_peer __rcu **stack[PEER_MAXDEPTH])
{
struct inet_peer __rcu ***stackptr, ***delp;
if (lookup(&p->daddr, stack, base) != p)
BUG();
delp = stackptr - 1; /* *delp[0] == p */
if (p->avl_left == peer_avl_empty_rcu) {
*delp[0] = p->avl_right;
--stackptr;
} else {
/* look for a node to insert instead of p */
struct inet_peer *t;
t = lookup_rightempty(p, base);
BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
**--stackptr = t->avl_left;
/* t is removed, t->daddr > x->daddr for any
* x in p->avl_left subtree.
* Put t in the old place of p. */
RCU_INIT_POINTER(*delp[0], t);
t->avl_left = p->avl_left;
t->avl_right = p->avl_right;
t->avl_height = p->avl_height;
BUG_ON(delp[1] != &p->avl_left);
delp[1] = &t->avl_left; /* was &p->avl_left */
}
peer_avl_rebalance(stack, stackptr, base);
base->total--;
call_rcu(&p->rcu, inetpeer_free_rcu);
}
/* perform garbage collect on all items stacked during a lookup */
static int inet_peer_gc(struct inet_peer_base *base,
struct inet_peer __rcu **stack[PEER_MAXDEPTH],
struct inet_peer __rcu ***stackptr)
{
struct inet_peer *p, *gchead = NULL;
__u32 delta, ttl;
int cnt = 0;
if (base->total >= inet_peer_threshold)
ttl = 0; /* be aggressive */
else
ttl = inet_peer_maxttl
- (inet_peer_maxttl - inet_peer_minttl) / HZ *
base->total / inet_peer_threshold * HZ;
stackptr--; /* last stack slot is peer_avl_empty */
while (stackptr > stack) {
stackptr--;
p = rcu_deref_locked(**stackptr, base);
if (atomic_read(&p->refcnt) == 0) {
smp_rmb();
delta = (__u32)jiffies - p->dtime;
if (delta >= ttl &&
atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
p->gc_next = gchead;
gchead = p;
}
}
}
while ((p = gchead) != NULL) {
gchead = p->gc_next;
cnt++;
unlink_from_pool(p, base, stack);
}
return cnt;
}
struct inet_peer *inet_getpeer(struct inet_peer_base *base,
const struct inetpeer_addr *daddr,
int create)
{
struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
struct inet_peer *p;
unsigned int sequence;
int invalidated, gccnt = 0;
/* Attempt a lockless lookup first.
* Because of a concurrent writer, we might not find an existing entry.
*/
rcu_read_lock();
sequence = read_seqbegin(&base->lock);
p = lookup_rcu(daddr, base);
invalidated = read_seqretry(&base->lock, sequence);
rcu_read_unlock();
if (p)
return p;
/* If no writer did a change during our lookup, we can return early. */
if (!create && !invalidated)
return NULL;
/* retry an exact lookup, taking the lock before.
* At least, nodes should be hot in our cache.
*/
write_seqlock_bh(&base->lock);
relookup:
p = lookup(daddr, stack, base);
if (p != peer_avl_empty) {
atomic_inc(&p->refcnt);
write_sequnlock_bh(&base->lock);
return p;
}
if (!gccnt) {
gccnt = inet_peer_gc(base, stack, stackptr);
if (gccnt && create)
goto relookup;
}
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
if (p) {
p->daddr = *daddr;
atomic_set(&p->refcnt, 1);
atomic_set(&p->rid, 0);
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p->rate_tokens = 0;
/* 60*HZ is arbitrary, but chosen enough high so that the first
* calculation of tokens is at its maximum.
*/
p->rate_last = jiffies - 60*HZ;
INIT_LIST_HEAD(&p->gc_list);
/* Link the node. */
link_to_pool(p, base);
base->total++;
}
write_sequnlock_bh(&base->lock);
return p;
}
EXPORT_SYMBOL_GPL(inet_getpeer);
void inet_putpeer(struct inet_peer *p)
{
p->dtime = (__u32)jiffies;
smp_mb__before_atomic();
atomic_dec(&p->refcnt);
}
EXPORT_SYMBOL_GPL(inet_putpeer);
/*
* Check transmit rate limitation for given message.
* The rate information is held in the inet_peer entries now.
* This function is generic and could be used for other purposes
* too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
*
* Note that the same inet_peer fields are modified by functions in
* route.c too, but these work for packet destinations while xrlim_allow
* works for icmp destinations. This means the rate limiting information
* for one "ip object" is shared - and these ICMPs are twice limited:
* by source and by destination.
*
* RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
* SHOULD allow setting of rate limits
*
* Shared between ICMPv4 and ICMPv6.
*/
#define XRLIM_BURST_FACTOR 6
bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
{
unsigned long now, token;
bool rc = false;
if (!peer)
return true;
token = peer->rate_tokens;
now = jiffies;
token += now - peer->rate_last;
peer->rate_last = now;
if (token > XRLIM_BURST_FACTOR * timeout)
token = XRLIM_BURST_FACTOR * timeout;
if (token >= timeout) {
token -= timeout;
rc = true;
}
peer->rate_tokens = token;
return rc;
}
EXPORT_SYMBOL(inet_peer_xrlim_allow);
static void inetpeer_inval_rcu(struct rcu_head *head)
{
struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
spin_lock_bh(&gc_lock);
list_add_tail(&p->gc_list, &gc_list);
spin_unlock_bh(&gc_lock);
schedule_delayed_work(&gc_work, gc_delay);
}
void inetpeer_invalidate_tree(struct inet_peer_base *base)
{
struct inet_peer *root;
write_seqlock_bh(&base->lock);
root = rcu_deref_locked(base->root, base);
if (root != peer_avl_empty) {
base->root = peer_avl_empty_rcu;
base->total = 0;
call_rcu(&root->gc_rcu, inetpeer_inval_rcu);
}
write_sequnlock_bh(&base->lock);
}
EXPORT_SYMBOL(inetpeer_invalidate_tree);
| gpl-2.0 |
wujiku/EVO-3D- | drivers/atm/lanai.c | 941 | 83839 | /* lanai.c -- Copyright 1999-2003 by Mitchell Blank Jr <mitch@sfgoth.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* This driver supports ATM cards based on the Efficient "Lanai"
* chipset such as the Speedstream 3010 and the ENI-25p. The
* Speedstream 3060 is currently not supported since we don't
* have the code to drive the on-board Alcatel DSL chipset (yet).
*
* Thanks to Efficient for supporting this project with hardware,
* documentation, and by answering my questions.
*
* Things not working yet:
*
* o We don't support the Speedstream 3060 yet - this card has
* an on-board DSL modem chip by Alcatel and the driver will
* need some extra code added to handle it
*
* o Note that due to limitations of the Lanai only one VCC can be
* in CBR at once
*
* o We don't currently parse the EEPROM at all. The code is all
* there as per the spec, but it doesn't actually work. I think
* there may be some issues with the docs. Anyway, do NOT
* enable it yet - bugs in that code may actually damage your
* hardware! Because of this you should hardware an ESI before
* trying to use this in a LANE or MPOA environment.
*
* o AAL0 is stubbed in but the actual rx/tx path isn't written yet:
* vcc_tx_aal0() needs to send or queue a SKB
* vcc_tx_unqueue_aal0() needs to attempt to send queued SKBs
* vcc_rx_aal0() needs to handle AAL0 interrupts
* This isn't too much work - I just wanted to get other things
* done first.
*
* o lanai_change_qos() isn't written yet
*
* o There aren't any ioctl's yet -- I'd like to eventually support
* setting loopback and LED modes that way.
*
* o If the segmentation engine or DMA gets shut down we should restart
* card as per section 17.0i. (see lanai_reset)
*
* o setsockopt(SO_CIRANGE) isn't done (although despite what the
* API says it isn't exactly commonly implemented)
*/
/* Version history:
* v.1.00 -- 26-JUL-2003 -- PCI/DMA updates
* v.0.02 -- 11-JAN-2000 -- Endian fixes
* v.0.01 -- 30-NOV-1999 -- Initial release
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/atmdev.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
/* -------------------- TUNABLE PARAMATERS: */
/*
* Maximum number of VCIs per card. Setting it lower could theoretically
* save some memory, but since we allocate our vcc list with get_free_pages,
* it's not really likely for most architectures
*/
#define NUM_VCI (1024)
/*
* Enable extra debugging
*/
#define DEBUG
/*
* Debug _all_ register operations with card, except the memory test.
* Also disables the timed poll to prevent extra chattiness. This
* isn't for normal use
*/
#undef DEBUG_RW
/*
* The programming guide specifies a full test of the on-board SRAM
* at initialization time. Undefine to remove this
*/
#define FULL_MEMORY_TEST
/*
* This is the number of (4 byte) service entries that we will
* try to allocate at startup. Note that we will end up with
* one PAGE_SIZE's worth regardless of what this is set to
*/
#define SERVICE_ENTRIES (1024)
/* TODO: make above a module load-time option */
/*
* We normally read the onboard EEPROM in order to discover our MAC
* address. Undefine to _not_ do this
*/
/* #define READ_EEPROM */ /* ***DONT ENABLE YET*** */
/* TODO: make above a module load-time option (also) */
/*
* Depth of TX fifo (in 128 byte units; range 2-31)
* Smaller numbers are better for network latency
* Larger numbers are better for PCI latency
* I'm really sure where the best tradeoff is, but the BSD driver uses
* 7 and it seems to work ok.
*/
#define TX_FIFO_DEPTH (7)
/* TODO: make above a module load-time option */
/*
* How often (in jiffies) we will try to unstick stuck connections -
* shouldn't need to happen much
*/
#define LANAI_POLL_PERIOD (10*HZ)
/* TODO: make above a module load-time option */
/*
* When allocating an AAL5 receiving buffer, try to make it at least
* large enough to hold this many max_sdu sized PDUs
*/
#define AAL5_RX_MULTIPLIER (3)
/* TODO: make above a module load-time option */
/*
* Same for transmitting buffer
*/
#define AAL5_TX_MULTIPLIER (3)
/* TODO: make above a module load-time option */
/*
* When allocating an AAL0 transmiting buffer, how many cells should fit.
* Remember we'll end up with a PAGE_SIZE of them anyway, so this isn't
* really critical
*/
#define AAL0_TX_MULTIPLIER (40)
/* TODO: make above a module load-time option */
/*
* How large should we make the AAL0 receiving buffer. Remember that this
* is shared between all AAL0 VC's
*/
#define AAL0_RX_BUFFER_SIZE (PAGE_SIZE)
/* TODO: make above a module load-time option */
/*
* Should we use Lanai's "powerdown" feature when no vcc's are bound?
*/
/* #define USE_POWERDOWN */
/* TODO: make above a module load-time option (also) */
/* -------------------- DEBUGGING AIDS: */
#define DEV_LABEL "lanai"
#ifdef DEBUG
#define DPRINTK(format, args...) \
printk(KERN_DEBUG DEV_LABEL ": " format, ##args)
#define APRINTK(truth, format, args...) \
do { \
if (unlikely(!(truth))) \
printk(KERN_ERR DEV_LABEL ": " format, ##args); \
} while (0)
#else /* !DEBUG */
#define DPRINTK(format, args...)
#define APRINTK(truth, format, args...)
#endif /* DEBUG */
#ifdef DEBUG_RW
#define RWDEBUG(format, args...) \
printk(KERN_DEBUG DEV_LABEL ": " format, ##args)
#else /* !DEBUG_RW */
#define RWDEBUG(format, args...)
#endif
/* -------------------- DATA DEFINITIONS: */
#define LANAI_MAPPING_SIZE (0x40000)
#define LANAI_EEPROM_SIZE (128)
typedef int vci_t;
typedef void __iomem *bus_addr_t;
/* DMA buffer in host memory for TX, RX, or service list. */
struct lanai_buffer {
u32 *start; /* From get_free_pages */
u32 *end; /* One past last byte */
u32 *ptr; /* Pointer to current host location */
dma_addr_t dmaaddr;
};
struct lanai_vcc_stats {
unsigned rx_nomem;
union {
struct {
unsigned rx_badlen;
unsigned service_trash;
unsigned service_stream;
unsigned service_rxcrc;
} aal5;
struct {
} aal0;
} x;
};
struct lanai_dev; /* Forward declaration */
/*
* This is the card-specific per-vcc data. Note that unlike some other
* drivers there is NOT a 1-to-1 correspondance between these and
* atm_vcc's - each one of these represents an actual 2-way vcc, but
* an atm_vcc can be 1-way and share with a 1-way vcc in the other
* direction. To make it weirder, there can even be 0-way vccs
* bound to us, waiting to do a change_qos
*/
struct lanai_vcc {
bus_addr_t vbase; /* Base of VCC's registers */
struct lanai_vcc_stats stats;
int nref; /* # of atm_vcc's who reference us */
vci_t vci;
struct {
struct lanai_buffer buf;
struct atm_vcc *atmvcc; /* atm_vcc who is receiver */
} rx;
struct {
struct lanai_buffer buf;
struct atm_vcc *atmvcc; /* atm_vcc who is transmitter */
int endptr; /* last endptr from service entry */
struct sk_buff_head backlog;
void (*unqueue)(struct lanai_dev *, struct lanai_vcc *, int);
} tx;
};
enum lanai_type {
lanai2 = PCI_DEVICE_ID_EF_ATM_LANAI2,
lanaihb = PCI_DEVICE_ID_EF_ATM_LANAIHB
};
struct lanai_dev_stats {
unsigned ovfl_trash; /* # of cells dropped - buffer overflow */
unsigned vci_trash; /* # of cells dropped - closed vci */
unsigned hec_err; /* # of cells dropped - bad HEC */
unsigned atm_ovfl; /* # of cells dropped - rx fifo overflow */
unsigned pcierr_parity_detect;
unsigned pcierr_serr_set;
unsigned pcierr_master_abort;
unsigned pcierr_m_target_abort;
unsigned pcierr_s_target_abort;
unsigned pcierr_master_parity;
unsigned service_notx;
unsigned service_norx;
unsigned service_rxnotaal5;
unsigned dma_reenable;
unsigned card_reset;
};
struct lanai_dev {
bus_addr_t base;
struct lanai_dev_stats stats;
struct lanai_buffer service;
struct lanai_vcc **vccs;
#ifdef USE_POWERDOWN
int nbound; /* number of bound vccs */
#endif
enum lanai_type type;
vci_t num_vci; /* Currently just NUM_VCI */
u8 eeprom[LANAI_EEPROM_SIZE];
u32 serialno, magicno;
struct pci_dev *pci;
DECLARE_BITMAP(backlog_vccs, NUM_VCI); /* VCCs with tx backlog */
DECLARE_BITMAP(transmit_ready, NUM_VCI); /* VCCs with transmit space */
struct timer_list timer;
int naal0;
struct lanai_buffer aal0buf; /* AAL0 RX buffers */
u32 conf1, conf2; /* CONFIG[12] registers */
u32 status; /* STATUS register */
spinlock_t endtxlock;
spinlock_t servicelock;
struct atm_vcc *cbrvcc;
int number;
int board_rev;
/* TODO - look at race conditions with maintence of conf1/conf2 */
/* TODO - transmit locking: should we use _irq not _irqsave? */
/* TODO - organize above in some rational fashion (see <asm/cache.h>) */
};
/*
* Each device has two bitmaps for each VCC (baclog_vccs and transmit_ready)
* This function iterates one of these, calling a given function for each
* vci with their bit set
*/
static void vci_bitfield_iterate(struct lanai_dev *lanai,
const unsigned long *lp,
void (*func)(struct lanai_dev *,vci_t vci))
{
vci_t vci;
for_each_set_bit(vci, lp, NUM_VCI)
func(lanai, vci);
}
/* -------------------- BUFFER UTILITIES: */
/*
* Lanai needs DMA buffers aligned to 256 bytes of at least 1024 bytes -
* usually any page allocation will do. Just to be safe in case
* PAGE_SIZE is insanely tiny, though...
*/
#define LANAI_PAGE_SIZE ((PAGE_SIZE >= 1024) ? PAGE_SIZE : 1024)
/*
* Allocate a buffer in host RAM for service list, RX, or TX
* Returns buf->start==NULL if no memory
* Note that the size will be rounded up 2^n bytes, and
* if we can't allocate that we'll settle for something smaller
* until minbytes
*/
static void lanai_buf_allocate(struct lanai_buffer *buf,
size_t bytes, size_t minbytes, struct pci_dev *pci)
{
int size;
if (bytes > (128 * 1024)) /* max lanai buffer size */
bytes = 128 * 1024;
for (size = LANAI_PAGE_SIZE; size < bytes; size *= 2)
;
if (minbytes < LANAI_PAGE_SIZE)
minbytes = LANAI_PAGE_SIZE;
do {
/*
* Technically we could use non-consistent mappings for
* everything, but the way the lanai uses DMA memory would
* make that a terrific pain. This is much simpler.
*/
buf->start = pci_alloc_consistent(pci, size, &buf->dmaaddr);
if (buf->start != NULL) { /* Success */
/* Lanai requires 256-byte alignment of DMA bufs */
APRINTK((buf->dmaaddr & ~0xFFFFFF00) == 0,
"bad dmaaddr: 0x%lx\n",
(unsigned long) buf->dmaaddr);
buf->ptr = buf->start;
buf->end = (u32 *)
(&((unsigned char *) buf->start)[size]);
memset(buf->start, 0, size);
break;
}
size /= 2;
} while (size >= minbytes);
}
/* size of buffer in bytes */
static inline size_t lanai_buf_size(const struct lanai_buffer *buf)
{
return ((unsigned long) buf->end) - ((unsigned long) buf->start);
}
static void lanai_buf_deallocate(struct lanai_buffer *buf,
struct pci_dev *pci)
{
if (buf->start != NULL) {
pci_free_consistent(pci, lanai_buf_size(buf),
buf->start, buf->dmaaddr);
buf->start = buf->end = buf->ptr = NULL;
}
}
/* size of buffer as "card order" (0=1k .. 7=128k) */
static int lanai_buf_size_cardorder(const struct lanai_buffer *buf)
{
int order = get_order(lanai_buf_size(buf)) + (PAGE_SHIFT - 10);
/* This can only happen if PAGE_SIZE is gigantic, but just in case */
if (order > 7)
order = 7;
return order;
}
/* -------------------- PORT I/O UTILITIES: */
/* Registers (and their bit-fields) */
enum lanai_register {
Reset_Reg = 0x00, /* Reset; read for chip type; bits: */
#define RESET_GET_BOARD_REV(x) (((x)>> 0)&0x03) /* Board revision */
#define RESET_GET_BOARD_ID(x) (((x)>> 2)&0x03) /* Board ID */
#define BOARD_ID_LANAI256 (0) /* 25.6M adapter card */
Endian_Reg = 0x04, /* Endian setting */
IntStatus_Reg = 0x08, /* Interrupt status */
IntStatusMasked_Reg = 0x0C, /* Interrupt status (masked) */
IntAck_Reg = 0x10, /* Interrupt acknowledge */
IntAckMasked_Reg = 0x14, /* Interrupt acknowledge (masked) */
IntStatusSet_Reg = 0x18, /* Get status + enable/disable */
IntStatusSetMasked_Reg = 0x1C, /* Get status + en/di (masked) */
IntControlEna_Reg = 0x20, /* Interrupt control enable */
IntControlDis_Reg = 0x24, /* Interrupt control disable */
Status_Reg = 0x28, /* Status */
#define STATUS_PROMDATA (0x00000001) /* PROM_DATA pin */
#define STATUS_WAITING (0x00000002) /* Interrupt being delayed */
#define STATUS_SOOL (0x00000004) /* SOOL alarm */
#define STATUS_LOCD (0x00000008) /* LOCD alarm */
#define STATUS_LED (0x00000010) /* LED (HAPPI) output */
#define STATUS_GPIN (0x00000020) /* GPIN pin */
#define STATUS_BUTTBUSY (0x00000040) /* Butt register is pending */
Config1_Reg = 0x2C, /* Config word 1; bits: */
#define CONFIG1_PROMDATA (0x00000001) /* PROM_DATA pin */
#define CONFIG1_PROMCLK (0x00000002) /* PROM_CLK pin */
#define CONFIG1_SET_READMODE(x) ((x)*0x004) /* PCI BM reads; values: */
#define READMODE_PLAIN (0) /* Plain memory read */
#define READMODE_LINE (2) /* Memory read line */
#define READMODE_MULTIPLE (3) /* Memory read multiple */
#define CONFIG1_DMA_ENABLE (0x00000010) /* Turn on DMA */
#define CONFIG1_POWERDOWN (0x00000020) /* Turn off clocks */
#define CONFIG1_SET_LOOPMODE(x) ((x)*0x080) /* Clock&loop mode; values: */
#define LOOPMODE_NORMAL (0) /* Normal - no loop */
#define LOOPMODE_TIME (1)
#define LOOPMODE_DIAG (2)
#define LOOPMODE_LINE (3)
#define CONFIG1_MASK_LOOPMODE (0x00000180)
#define CONFIG1_SET_LEDMODE(x) ((x)*0x0200) /* Mode of LED; values: */
#define LEDMODE_NOT_SOOL (0) /* !SOOL */
#define LEDMODE_OFF (1) /* 0 */
#define LEDMODE_ON (2) /* 1 */
#define LEDMODE_NOT_LOCD (3) /* !LOCD */
#define LEDMORE_GPIN (4) /* GPIN */
#define LEDMODE_NOT_GPIN (7) /* !GPIN */
#define CONFIG1_MASK_LEDMODE (0x00000E00)
#define CONFIG1_GPOUT1 (0x00001000) /* Toggle for reset */
#define CONFIG1_GPOUT2 (0x00002000) /* Loopback PHY */
#define CONFIG1_GPOUT3 (0x00004000) /* Loopback lanai */
Config2_Reg = 0x30, /* Config word 2; bits: */
#define CONFIG2_HOWMANY (0x00000001) /* >512 VCIs? */
#define CONFIG2_PTI7_MODE (0x00000002) /* Make PTI=7 RM, not OAM */
#define CONFIG2_VPI_CHK_DIS (0x00000004) /* Ignore RX VPI value */
#define CONFIG2_HEC_DROP (0x00000008) /* Drop cells w/ HEC errors */
#define CONFIG2_VCI0_NORMAL (0x00000010) /* Treat VCI=0 normally */
#define CONFIG2_CBR_ENABLE (0x00000020) /* Deal with CBR traffic */
#define CONFIG2_TRASH_ALL (0x00000040) /* Trashing incoming cells */
#define CONFIG2_TX_DISABLE (0x00000080) /* Trashing outgoing cells */
#define CONFIG2_SET_TRASH (0x00000100) /* Turn trashing on */
Statistics_Reg = 0x34, /* Statistics; bits: */
#define STATS_GET_FIFO_OVFL(x) (((x)>> 0)&0xFF) /* FIFO overflowed */
#define STATS_GET_HEC_ERR(x) (((x)>> 8)&0xFF) /* HEC was bad */
#define STATS_GET_BAD_VCI(x) (((x)>>16)&0xFF) /* VCI not open */
#define STATS_GET_BUF_OVFL(x) (((x)>>24)&0xFF) /* VCC buffer full */
ServiceStuff_Reg = 0x38, /* Service stuff; bits: */
#define SSTUFF_SET_SIZE(x) ((x)*0x20000000) /* size of service buffer */
#define SSTUFF_SET_ADDR(x) ((x)>>8) /* set address of buffer */
ServWrite_Reg = 0x3C, /* ServWrite Pointer */
ServRead_Reg = 0x40, /* ServRead Pointer */
TxDepth_Reg = 0x44, /* FIFO Transmit Depth */
Butt_Reg = 0x48, /* Butt register */
CBR_ICG_Reg = 0x50,
CBR_PTR_Reg = 0x54,
PingCount_Reg = 0x58, /* Ping count */
DMA_Addr_Reg = 0x5C /* DMA address */
};
static inline bus_addr_t reg_addr(const struct lanai_dev *lanai,
enum lanai_register reg)
{
return lanai->base + reg;
}
static inline u32 reg_read(const struct lanai_dev *lanai,
enum lanai_register reg)
{
u32 t;
t = readl(reg_addr(lanai, reg));
RWDEBUG("R [0x%08X] 0x%02X = 0x%08X\n", (unsigned int) lanai->base,
(int) reg, t);
return t;
}
static inline void reg_write(const struct lanai_dev *lanai, u32 val,
enum lanai_register reg)
{
RWDEBUG("W [0x%08X] 0x%02X < 0x%08X\n", (unsigned int) lanai->base,
(int) reg, val);
writel(val, reg_addr(lanai, reg));
}
static inline void conf1_write(const struct lanai_dev *lanai)
{
reg_write(lanai, lanai->conf1, Config1_Reg);
}
static inline void conf2_write(const struct lanai_dev *lanai)
{
reg_write(lanai, lanai->conf2, Config2_Reg);
}
/* Same as conf2_write(), but defers I/O if we're powered down */
static inline void conf2_write_if_powerup(const struct lanai_dev *lanai)
{
#ifdef USE_POWERDOWN
if (unlikely((lanai->conf1 & CONFIG1_POWERDOWN) != 0))
return;
#endif /* USE_POWERDOWN */
conf2_write(lanai);
}
static inline void reset_board(const struct lanai_dev *lanai)
{
DPRINTK("about to reset board\n");
reg_write(lanai, 0, Reset_Reg);
/*
* If we don't delay a little while here then we can end up
* leaving the card in a VERY weird state and lock up the
* PCI bus. This isn't documented anywhere but I've convinced
* myself after a lot of painful experimentation
*/
udelay(5);
}
/* -------------------- CARD SRAM UTILITIES: */
/* The SRAM is mapped into normal PCI memory space - the only catch is
* that it is only 16-bits wide but must be accessed as 32-bit. The
* 16 high bits will be zero. We don't hide this, since they get
* programmed mostly like discrete registers anyway
*/
#define SRAM_START (0x20000)
#define SRAM_BYTES (0x20000) /* Again, half don't really exist */
static inline bus_addr_t sram_addr(const struct lanai_dev *lanai, int offset)
{
return lanai->base + SRAM_START + offset;
}
static inline u32 sram_read(const struct lanai_dev *lanai, int offset)
{
return readl(sram_addr(lanai, offset));
}
static inline void sram_write(const struct lanai_dev *lanai,
u32 val, int offset)
{
writel(val, sram_addr(lanai, offset));
}
static int __devinit sram_test_word(const struct lanai_dev *lanai,
int offset, u32 pattern)
{
u32 readback;
sram_write(lanai, pattern, offset);
readback = sram_read(lanai, offset);
if (likely(readback == pattern))
return 0;
printk(KERN_ERR DEV_LABEL
"(itf %d): SRAM word at %d bad: wrote 0x%X, read 0x%X\n",
lanai->number, offset,
(unsigned int) pattern, (unsigned int) readback);
return -EIO;
}
static int __devinit sram_test_pass(const struct lanai_dev *lanai, u32 pattern)
{
int offset, result = 0;
for (offset = 0; offset < SRAM_BYTES && result == 0; offset += 4)
result = sram_test_word(lanai, offset, pattern);
return result;
}
static int __devinit sram_test_and_clear(const struct lanai_dev *lanai)
{
#ifdef FULL_MEMORY_TEST
int result;
DPRINTK("testing SRAM\n");
if ((result = sram_test_pass(lanai, 0x5555)) != 0)
return result;
if ((result = sram_test_pass(lanai, 0xAAAA)) != 0)
return result;
#endif
DPRINTK("clearing SRAM\n");
return sram_test_pass(lanai, 0x0000);
}
/* -------------------- CARD-BASED VCC TABLE UTILITIES: */
/* vcc table */
enum lanai_vcc_offset {
vcc_rxaddr1 = 0x00, /* Location1, plus bits: */
#define RXADDR1_SET_SIZE(x) ((x)*0x0000100) /* size of RX buffer */
#define RXADDR1_SET_RMMODE(x) ((x)*0x00800) /* RM cell action; values: */
#define RMMODE_TRASH (0) /* discard */
#define RMMODE_PRESERVE (1) /* input as AAL0 */
#define RMMODE_PIPE (2) /* pipe to coscheduler */
#define RMMODE_PIPEALL (3) /* pipe non-RM too */
#define RXADDR1_OAM_PRESERVE (0x00002000) /* Input OAM cells as AAL0 */
#define RXADDR1_SET_MODE(x) ((x)*0x0004000) /* Reassembly mode */
#define RXMODE_TRASH (0) /* discard */
#define RXMODE_AAL0 (1) /* non-AAL5 mode */
#define RXMODE_AAL5 (2) /* AAL5, intr. each PDU */
#define RXMODE_AAL5_STREAM (3) /* AAL5 w/o per-PDU intr */
vcc_rxaddr2 = 0x04, /* Location2 */
vcc_rxcrc1 = 0x08, /* RX CRC claculation space */
vcc_rxcrc2 = 0x0C,
vcc_rxwriteptr = 0x10, /* RX writeptr, plus bits: */
#define RXWRITEPTR_LASTEFCI (0x00002000) /* Last PDU had EFCI bit */
#define RXWRITEPTR_DROPPING (0x00004000) /* Had error, dropping */
#define RXWRITEPTR_TRASHING (0x00008000) /* Trashing */
vcc_rxbufstart = 0x14, /* RX bufstart, plus bits: */
#define RXBUFSTART_CLP (0x00004000)
#define RXBUFSTART_CI (0x00008000)
vcc_rxreadptr = 0x18, /* RX readptr */
vcc_txicg = 0x1C, /* TX ICG */
vcc_txaddr1 = 0x20, /* Location1, plus bits: */
#define TXADDR1_SET_SIZE(x) ((x)*0x0000100) /* size of TX buffer */
#define TXADDR1_ABR (0x00008000) /* use ABR (doesn't work) */
vcc_txaddr2 = 0x24, /* Location2 */
vcc_txcrc1 = 0x28, /* TX CRC claculation space */
vcc_txcrc2 = 0x2C,
vcc_txreadptr = 0x30, /* TX Readptr, plus bits: */
#define TXREADPTR_GET_PTR(x) ((x)&0x01FFF)
#define TXREADPTR_MASK_DELTA (0x0000E000) /* ? */
vcc_txendptr = 0x34, /* TX Endptr, plus bits: */
#define TXENDPTR_CLP (0x00002000)
#define TXENDPTR_MASK_PDUMODE (0x0000C000) /* PDU mode; values: */
#define PDUMODE_AAL0 (0*0x04000)
#define PDUMODE_AAL5 (2*0x04000)
#define PDUMODE_AAL5STREAM (3*0x04000)
vcc_txwriteptr = 0x38, /* TX Writeptr */
#define TXWRITEPTR_GET_PTR(x) ((x)&0x1FFF)
vcc_txcbr_next = 0x3C /* # of next CBR VCI in ring */
#define TXCBR_NEXT_BOZO (0x00008000) /* "bozo bit" */
};
#define CARDVCC_SIZE (0x40)
static inline bus_addr_t cardvcc_addr(const struct lanai_dev *lanai,
vci_t vci)
{
return sram_addr(lanai, vci * CARDVCC_SIZE);
}
static inline u32 cardvcc_read(const struct lanai_vcc *lvcc,
enum lanai_vcc_offset offset)
{
u32 val;
APRINTK(lvcc->vbase != NULL, "cardvcc_read: unbound vcc!\n");
val= readl(lvcc->vbase + offset);
RWDEBUG("VR vci=%04d 0x%02X = 0x%08X\n",
lvcc->vci, (int) offset, val);
return val;
}
static inline void cardvcc_write(const struct lanai_vcc *lvcc,
u32 val, enum lanai_vcc_offset offset)
{
APRINTK(lvcc->vbase != NULL, "cardvcc_write: unbound vcc!\n");
APRINTK((val & ~0xFFFF) == 0,
"cardvcc_write: bad val 0x%X (vci=%d, addr=0x%02X)\n",
(unsigned int) val, lvcc->vci, (unsigned int) offset);
RWDEBUG("VW vci=%04d 0x%02X > 0x%08X\n",
lvcc->vci, (unsigned int) offset, (unsigned int) val);
writel(val, lvcc->vbase + offset);
}
/* -------------------- COMPUTE SIZE OF AN AAL5 PDU: */
/* How many bytes will an AAL5 PDU take to transmit - remember that:
* o we need to add 8 bytes for length, CPI, UU, and CRC
* o we need to round up to 48 bytes for cells
*/
static inline int aal5_size(int size)
{
int cells = (size + 8 + 47) / 48;
return cells * 48;
}
/* How many bytes can we send if we have "space" space, assuming we have
* to send full cells
*/
static inline int aal5_spacefor(int space)
{
int cells = space / 48;
return cells * 48;
}
/* -------------------- FREE AN ATM SKB: */
static inline void lanai_free_skb(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
if (atmvcc->pop != NULL)
atmvcc->pop(atmvcc, skb);
else
dev_kfree_skb_any(skb);
}
/* -------------------- TURN VCCS ON AND OFF: */
static void host_vcc_start_rx(const struct lanai_vcc *lvcc)
{
u32 addr1;
if (lvcc->rx.atmvcc->qos.aal == ATM_AAL5) {
dma_addr_t dmaaddr = lvcc->rx.buf.dmaaddr;
cardvcc_write(lvcc, 0xFFFF, vcc_rxcrc1);
cardvcc_write(lvcc, 0xFFFF, vcc_rxcrc2);
cardvcc_write(lvcc, 0, vcc_rxwriteptr);
cardvcc_write(lvcc, 0, vcc_rxbufstart);
cardvcc_write(lvcc, 0, vcc_rxreadptr);
cardvcc_write(lvcc, (dmaaddr >> 16) & 0xFFFF, vcc_rxaddr2);
addr1 = ((dmaaddr >> 8) & 0xFF) |
RXADDR1_SET_SIZE(lanai_buf_size_cardorder(&lvcc->rx.buf))|
RXADDR1_SET_RMMODE(RMMODE_TRASH) | /* ??? */
/* RXADDR1_OAM_PRESERVE | --- no OAM support yet */
RXADDR1_SET_MODE(RXMODE_AAL5);
} else
addr1 = RXADDR1_SET_RMMODE(RMMODE_PRESERVE) | /* ??? */
RXADDR1_OAM_PRESERVE | /* ??? */
RXADDR1_SET_MODE(RXMODE_AAL0);
/* This one must be last! */
cardvcc_write(lvcc, addr1, vcc_rxaddr1);
}
static void host_vcc_start_tx(const struct lanai_vcc *lvcc)
{
dma_addr_t dmaaddr = lvcc->tx.buf.dmaaddr;
cardvcc_write(lvcc, 0, vcc_txicg);
cardvcc_write(lvcc, 0xFFFF, vcc_txcrc1);
cardvcc_write(lvcc, 0xFFFF, vcc_txcrc2);
cardvcc_write(lvcc, 0, vcc_txreadptr);
cardvcc_write(lvcc, 0, vcc_txendptr);
cardvcc_write(lvcc, 0, vcc_txwriteptr);
cardvcc_write(lvcc,
(lvcc->tx.atmvcc->qos.txtp.traffic_class == ATM_CBR) ?
TXCBR_NEXT_BOZO | lvcc->vci : 0, vcc_txcbr_next);
cardvcc_write(lvcc, (dmaaddr >> 16) & 0xFFFF, vcc_txaddr2);
cardvcc_write(lvcc,
((dmaaddr >> 8) & 0xFF) |
TXADDR1_SET_SIZE(lanai_buf_size_cardorder(&lvcc->tx.buf)),
vcc_txaddr1);
}
/* Shutdown receiving on card */
static void lanai_shutdown_rx_vci(const struct lanai_vcc *lvcc)
{
if (lvcc->vbase == NULL) /* We were never bound to a VCI */
return;
/* 15.1.1 - set to trashing, wait one cell time (15us) */
cardvcc_write(lvcc,
RXADDR1_SET_RMMODE(RMMODE_TRASH) |
RXADDR1_SET_MODE(RXMODE_TRASH), vcc_rxaddr1);
udelay(15);
/* 15.1.2 - clear rest of entries */
cardvcc_write(lvcc, 0, vcc_rxaddr2);
cardvcc_write(lvcc, 0, vcc_rxcrc1);
cardvcc_write(lvcc, 0, vcc_rxcrc2);
cardvcc_write(lvcc, 0, vcc_rxwriteptr);
cardvcc_write(lvcc, 0, vcc_rxbufstart);
cardvcc_write(lvcc, 0, vcc_rxreadptr);
}
/* Shutdown transmitting on card.
* Unfortunately the lanai needs us to wait until all the data
* drains out of the buffer before we can dealloc it, so this
* can take awhile -- up to 370ms for a full 128KB buffer
* assuming everone else is quiet. In theory the time is
* boundless if there's a CBR VCC holding things up.
*/
static void lanai_shutdown_tx_vci(struct lanai_dev *lanai,
struct lanai_vcc *lvcc)
{
struct sk_buff *skb;
unsigned long flags, timeout;
int read, write, lastread = -1;
APRINTK(!in_interrupt(),
"lanai_shutdown_tx_vci called w/o process context!\n");
if (lvcc->vbase == NULL) /* We were never bound to a VCI */
return;
/* 15.2.1 - wait for queue to drain */
while ((skb = skb_dequeue(&lvcc->tx.backlog)) != NULL)
lanai_free_skb(lvcc->tx.atmvcc, skb);
read_lock_irqsave(&vcc_sklist_lock, flags);
__clear_bit(lvcc->vci, lanai->backlog_vccs);
read_unlock_irqrestore(&vcc_sklist_lock, flags);
/*
* We need to wait for the VCC to drain but don't wait forever. We
* give each 1K of buffer size 1/128th of a second to clear out.
* TODO: maybe disable CBR if we're about to timeout?
*/
timeout = jiffies +
(((lanai_buf_size(&lvcc->tx.buf) / 1024) * HZ) >> 7);
write = TXWRITEPTR_GET_PTR(cardvcc_read(lvcc, vcc_txwriteptr));
for (;;) {
read = TXREADPTR_GET_PTR(cardvcc_read(lvcc, vcc_txreadptr));
if (read == write && /* Is TX buffer empty? */
(lvcc->tx.atmvcc->qos.txtp.traffic_class != ATM_CBR ||
(cardvcc_read(lvcc, vcc_txcbr_next) &
TXCBR_NEXT_BOZO) == 0))
break;
if (read != lastread) { /* Has there been any progress? */
lastread = read;
timeout += HZ / 10;
}
if (unlikely(time_after(jiffies, timeout))) {
printk(KERN_ERR DEV_LABEL "(itf %d): Timed out on "
"backlog closing vci %d\n",
lvcc->tx.atmvcc->dev->number, lvcc->vci);
DPRINTK("read, write = %d, %d\n", read, write);
break;
}
msleep(40);
}
/* 15.2.2 - clear out all tx registers */
cardvcc_write(lvcc, 0, vcc_txreadptr);
cardvcc_write(lvcc, 0, vcc_txwriteptr);
cardvcc_write(lvcc, 0, vcc_txendptr);
cardvcc_write(lvcc, 0, vcc_txcrc1);
cardvcc_write(lvcc, 0, vcc_txcrc2);
cardvcc_write(lvcc, 0, vcc_txaddr2);
cardvcc_write(lvcc, 0, vcc_txaddr1);
}
/* -------------------- MANAGING AAL0 RX BUFFER: */
static inline int aal0_buffer_allocate(struct lanai_dev *lanai)
{
DPRINTK("aal0_buffer_allocate: allocating AAL0 RX buffer\n");
lanai_buf_allocate(&lanai->aal0buf, AAL0_RX_BUFFER_SIZE, 80,
lanai->pci);
return (lanai->aal0buf.start == NULL) ? -ENOMEM : 0;
}
static inline void aal0_buffer_free(struct lanai_dev *lanai)
{
DPRINTK("aal0_buffer_allocate: freeing AAL0 RX buffer\n");
lanai_buf_deallocate(&lanai->aal0buf, lanai->pci);
}
/* -------------------- EEPROM UTILITIES: */
/* Offsets of data in the EEPROM */
#define EEPROM_COPYRIGHT (0)
#define EEPROM_COPYRIGHT_LEN (44)
#define EEPROM_CHECKSUM (62)
#define EEPROM_CHECKSUM_REV (63)
#define EEPROM_MAC (64)
#define EEPROM_MAC_REV (70)
#define EEPROM_SERIAL (112)
#define EEPROM_SERIAL_REV (116)
#define EEPROM_MAGIC (120)
#define EEPROM_MAGIC_REV (124)
#define EEPROM_MAGIC_VALUE (0x5AB478D2)
#ifndef READ_EEPROM
/* Stub functions to use if EEPROM reading is disabled */
static int __devinit eeprom_read(struct lanai_dev *lanai)
{
printk(KERN_INFO DEV_LABEL "(itf %d): *NOT* reading EEPROM\n",
lanai->number);
memset(&lanai->eeprom[EEPROM_MAC], 0, 6);
return 0;
}
static int __devinit eeprom_validate(struct lanai_dev *lanai)
{
lanai->serialno = 0;
lanai->magicno = EEPROM_MAGIC_VALUE;
return 0;
}
#else /* READ_EEPROM */
static int __devinit eeprom_read(struct lanai_dev *lanai)
{
int i, address;
u8 data;
u32 tmp;
#define set_config1(x) do { lanai->conf1 = x; conf1_write(lanai); \
} while (0)
#define clock_h() set_config1(lanai->conf1 | CONFIG1_PROMCLK)
#define clock_l() set_config1(lanai->conf1 &~ CONFIG1_PROMCLK)
#define data_h() set_config1(lanai->conf1 | CONFIG1_PROMDATA)
#define data_l() set_config1(lanai->conf1 &~ CONFIG1_PROMDATA)
#define pre_read() do { data_h(); clock_h(); udelay(5); } while (0)
#define read_pin() (reg_read(lanai, Status_Reg) & STATUS_PROMDATA)
#define send_stop() do { data_l(); udelay(5); clock_h(); udelay(5); \
data_h(); udelay(5); } while (0)
/* start with both clock and data high */
data_h(); clock_h(); udelay(5);
for (address = 0; address < LANAI_EEPROM_SIZE; address++) {
data = (address << 1) | 1; /* Command=read + address */
/* send start bit */
data_l(); udelay(5);
clock_l(); udelay(5);
for (i = 128; i != 0; i >>= 1) { /* write command out */
tmp = (lanai->conf1 & ~CONFIG1_PROMDATA) |
((data & i) ? CONFIG1_PROMDATA : 0);
if (lanai->conf1 != tmp) {
set_config1(tmp);
udelay(5); /* Let new data settle */
}
clock_h(); udelay(5); clock_l(); udelay(5);
}
/* look for ack */
data_h(); clock_h(); udelay(5);
if (read_pin() != 0)
goto error; /* No ack seen */
clock_l(); udelay(5);
/* read back result */
for (data = 0, i = 7; i >= 0; i--) {
data_h(); clock_h(); udelay(5);
data = (data << 1) | !!read_pin();
clock_l(); udelay(5);
}
/* look again for ack */
data_h(); clock_h(); udelay(5);
if (read_pin() == 0)
goto error; /* Spurious ack */
clock_l(); udelay(5);
send_stop();
lanai->eeprom[address] = data;
DPRINTK("EEPROM 0x%04X %02X\n",
(unsigned int) address, (unsigned int) data);
}
return 0;
error:
clock_l(); udelay(5); /* finish read */
send_stop();
printk(KERN_ERR DEV_LABEL "(itf %d): error reading EEPROM byte %d\n",
lanai->number, address);
return -EIO;
#undef set_config1
#undef clock_h
#undef clock_l
#undef data_h
#undef data_l
#undef pre_read
#undef read_pin
#undef send_stop
}
/* read a big-endian 4-byte value out of eeprom */
static inline u32 eeprom_be4(const struct lanai_dev *lanai, int address)
{
return be32_to_cpup((const u32 *) &lanai->eeprom[address]);
}
/* Checksum/validate EEPROM contents */
static int __devinit eeprom_validate(struct lanai_dev *lanai)
{
int i, s;
u32 v;
const u8 *e = lanai->eeprom;
#ifdef DEBUG
/* First, see if we can get an ASCIIZ string out of the copyright */
for (i = EEPROM_COPYRIGHT;
i < (EEPROM_COPYRIGHT + EEPROM_COPYRIGHT_LEN); i++)
if (e[i] < 0x20 || e[i] > 0x7E)
break;
if ( i != EEPROM_COPYRIGHT &&
i != EEPROM_COPYRIGHT + EEPROM_COPYRIGHT_LEN && e[i] == '\0')
DPRINTK("eeprom: copyright = \"%s\"\n",
(char *) &e[EEPROM_COPYRIGHT]);
else
DPRINTK("eeprom: copyright not found\n");
#endif
/* Validate checksum */
for (i = s = 0; i < EEPROM_CHECKSUM; i++)
s += e[i];
s &= 0xFF;
if (s != e[EEPROM_CHECKSUM]) {
printk(KERN_ERR DEV_LABEL "(itf %d): EEPROM checksum bad "
"(wanted 0x%02X, got 0x%02X)\n", lanai->number,
(unsigned int) s, (unsigned int) e[EEPROM_CHECKSUM]);
return -EIO;
}
s ^= 0xFF;
if (s != e[EEPROM_CHECKSUM_REV]) {
printk(KERN_ERR DEV_LABEL "(itf %d): EEPROM inverse checksum "
"bad (wanted 0x%02X, got 0x%02X)\n", lanai->number,
(unsigned int) s, (unsigned int) e[EEPROM_CHECKSUM_REV]);
return -EIO;
}
/* Verify MAC address */
for (i = 0; i < 6; i++)
if ((e[EEPROM_MAC + i] ^ e[EEPROM_MAC_REV + i]) != 0xFF) {
printk(KERN_ERR DEV_LABEL
"(itf %d) : EEPROM MAC addresses don't match "
"(0x%02X, inverse 0x%02X)\n", lanai->number,
(unsigned int) e[EEPROM_MAC + i],
(unsigned int) e[EEPROM_MAC_REV + i]);
return -EIO;
}
DPRINTK("eeprom: MAC address = %pM\n", &e[EEPROM_MAC]);
/* Verify serial number */
lanai->serialno = eeprom_be4(lanai, EEPROM_SERIAL);
v = eeprom_be4(lanai, EEPROM_SERIAL_REV);
if ((lanai->serialno ^ v) != 0xFFFFFFFF) {
printk(KERN_ERR DEV_LABEL "(itf %d): EEPROM serial numbers "
"don't match (0x%08X, inverse 0x%08X)\n", lanai->number,
(unsigned int) lanai->serialno, (unsigned int) v);
return -EIO;
}
DPRINTK("eeprom: Serial number = %d\n", (unsigned int) lanai->serialno);
/* Verify magic number */
lanai->magicno = eeprom_be4(lanai, EEPROM_MAGIC);
v = eeprom_be4(lanai, EEPROM_MAGIC_REV);
if ((lanai->magicno ^ v) != 0xFFFFFFFF) {
printk(KERN_ERR DEV_LABEL "(itf %d): EEPROM magic numbers "
"don't match (0x%08X, inverse 0x%08X)\n", lanai->number,
lanai->magicno, v);
return -EIO;
}
DPRINTK("eeprom: Magic number = 0x%08X\n", lanai->magicno);
if (lanai->magicno != EEPROM_MAGIC_VALUE)
printk(KERN_WARNING DEV_LABEL "(itf %d): warning - EEPROM "
"magic not what expected (got 0x%08X, not 0x%08X)\n",
lanai->number, (unsigned int) lanai->magicno,
(unsigned int) EEPROM_MAGIC_VALUE);
return 0;
}
#endif /* READ_EEPROM */
static inline const u8 *eeprom_mac(const struct lanai_dev *lanai)
{
return &lanai->eeprom[EEPROM_MAC];
}
/* -------------------- INTERRUPT HANDLING UTILITIES: */
/* Interrupt types */
#define INT_STATS (0x00000002) /* Statistics counter overflow */
#define INT_SOOL (0x00000004) /* SOOL changed state */
#define INT_LOCD (0x00000008) /* LOCD changed state */
#define INT_LED (0x00000010) /* LED (HAPPI) changed state */
#define INT_GPIN (0x00000020) /* GPIN changed state */
#define INT_PING (0x00000040) /* PING_COUNT fulfilled */
#define INT_WAKE (0x00000080) /* Lanai wants bus */
#define INT_CBR0 (0x00000100) /* CBR sched hit VCI 0 */
#define INT_LOCK (0x00000200) /* Service list overflow */
#define INT_MISMATCH (0x00000400) /* TX magic list mismatch */
#define INT_AAL0_STR (0x00000800) /* Non-AAL5 buffer half filled */
#define INT_AAL0 (0x00001000) /* Non-AAL5 data available */
#define INT_SERVICE (0x00002000) /* Service list entries available */
#define INT_TABORTSENT (0x00004000) /* Target abort sent by lanai */
#define INT_TABORTBM (0x00008000) /* Abort rcv'd as bus master */
#define INT_TIMEOUTBM (0x00010000) /* No response to bus master */
#define INT_PCIPARITY (0x00020000) /* Parity error on PCI */
/* Sets of the above */
#define INT_ALL (0x0003FFFE) /* All interrupts */
#define INT_STATUS (0x0000003C) /* Some status pin changed */
#define INT_DMASHUT (0x00038000) /* DMA engine got shut down */
#define INT_SEGSHUT (0x00000700) /* Segmentation got shut down */
static inline u32 intr_pending(const struct lanai_dev *lanai)
{
return reg_read(lanai, IntStatusMasked_Reg);
}
static inline void intr_enable(const struct lanai_dev *lanai, u32 i)
{
reg_write(lanai, i, IntControlEna_Reg);
}
static inline void intr_disable(const struct lanai_dev *lanai, u32 i)
{
reg_write(lanai, i, IntControlDis_Reg);
}
/* -------------------- CARD/PCI STATUS: */
static void status_message(int itf, const char *name, int status)
{
static const char *onoff[2] = { "off to on", "on to off" };
printk(KERN_INFO DEV_LABEL "(itf %d): %s changed from %s\n",
itf, name, onoff[!status]);
}
static void lanai_check_status(struct lanai_dev *lanai)
{
u32 new = reg_read(lanai, Status_Reg);
u32 changes = new ^ lanai->status;
lanai->status = new;
#define e(flag, name) \
if (changes & flag) \
status_message(lanai->number, name, new & flag)
e(STATUS_SOOL, "SOOL");
e(STATUS_LOCD, "LOCD");
e(STATUS_LED, "LED");
e(STATUS_GPIN, "GPIN");
#undef e
}
static void pcistatus_got(int itf, const char *name)
{
printk(KERN_INFO DEV_LABEL "(itf %d): PCI got %s error\n", itf, name);
}
static void pcistatus_check(struct lanai_dev *lanai, int clearonly)
{
u16 s;
int result;
result = pci_read_config_word(lanai->pci, PCI_STATUS, &s);
if (result != PCIBIOS_SUCCESSFUL) {
printk(KERN_ERR DEV_LABEL "(itf %d): can't read PCI_STATUS: "
"%d\n", lanai->number, result);
return;
}
s &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT |
PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_PARITY;
if (s == 0)
return;
result = pci_write_config_word(lanai->pci, PCI_STATUS, s);
if (result != PCIBIOS_SUCCESSFUL)
printk(KERN_ERR DEV_LABEL "(itf %d): can't write PCI_STATUS: "
"%d\n", lanai->number, result);
if (clearonly)
return;
#define e(flag, name, stat) \
if (s & flag) { \
pcistatus_got(lanai->number, name); \
++lanai->stats.pcierr_##stat; \
}
e(PCI_STATUS_DETECTED_PARITY, "parity", parity_detect);
e(PCI_STATUS_SIG_SYSTEM_ERROR, "signalled system", serr_set);
e(PCI_STATUS_REC_MASTER_ABORT, "master", master_abort);
e(PCI_STATUS_REC_TARGET_ABORT, "master target", m_target_abort);
e(PCI_STATUS_SIG_TARGET_ABORT, "slave", s_target_abort);
e(PCI_STATUS_PARITY, "master parity", master_parity);
#undef e
}
/* -------------------- VCC TX BUFFER UTILITIES: */
/* space left in tx buffer in bytes */
static inline int vcc_tx_space(const struct lanai_vcc *lvcc, int endptr)
{
int r;
r = endptr * 16;
r -= ((unsigned long) lvcc->tx.buf.ptr) -
((unsigned long) lvcc->tx.buf.start);
r -= 16; /* Leave "bubble" - if start==end it looks empty */
if (r < 0)
r += lanai_buf_size(&lvcc->tx.buf);
return r;
}
/* test if VCC is currently backlogged */
static inline int vcc_is_backlogged(const struct lanai_vcc *lvcc)
{
return !skb_queue_empty(&lvcc->tx.backlog);
}
/* Bit fields in the segmentation buffer descriptor */
#define DESCRIPTOR_MAGIC (0xD0000000)
#define DESCRIPTOR_AAL5 (0x00008000)
#define DESCRIPTOR_AAL5_STREAM (0x00004000)
#define DESCRIPTOR_CLP (0x00002000)
/* Add 32-bit descriptor with its padding */
static inline void vcc_tx_add_aal5_descriptor(struct lanai_vcc *lvcc,
u32 flags, int len)
{
int pos;
APRINTK((((unsigned long) lvcc->tx.buf.ptr) & 15) == 0,
"vcc_tx_add_aal5_descriptor: bad ptr=%p\n", lvcc->tx.buf.ptr);
lvcc->tx.buf.ptr += 4; /* Hope the values REALLY don't matter */
pos = ((unsigned char *) lvcc->tx.buf.ptr) -
(unsigned char *) lvcc->tx.buf.start;
APRINTK((pos & ~0x0001FFF0) == 0,
"vcc_tx_add_aal5_descriptor: bad pos (%d) before, vci=%d, "
"start,ptr,end=%p,%p,%p\n", pos, lvcc->vci,
lvcc->tx.buf.start, lvcc->tx.buf.ptr, lvcc->tx.buf.end);
pos = (pos + len) & (lanai_buf_size(&lvcc->tx.buf) - 1);
APRINTK((pos & ~0x0001FFF0) == 0,
"vcc_tx_add_aal5_descriptor: bad pos (%d) after, vci=%d, "
"start,ptr,end=%p,%p,%p\n", pos, lvcc->vci,
lvcc->tx.buf.start, lvcc->tx.buf.ptr, lvcc->tx.buf.end);
lvcc->tx.buf.ptr[-1] =
cpu_to_le32(DESCRIPTOR_MAGIC | DESCRIPTOR_AAL5 |
((lvcc->tx.atmvcc->atm_options & ATM_ATMOPT_CLP) ?
DESCRIPTOR_CLP : 0) | flags | pos >> 4);
if (lvcc->tx.buf.ptr >= lvcc->tx.buf.end)
lvcc->tx.buf.ptr = lvcc->tx.buf.start;
}
/* Add 32-bit AAL5 trailer and leave room for its CRC */
static inline void vcc_tx_add_aal5_trailer(struct lanai_vcc *lvcc,
int len, int cpi, int uu)
{
APRINTK((((unsigned long) lvcc->tx.buf.ptr) & 15) == 8,
"vcc_tx_add_aal5_trailer: bad ptr=%p\n", lvcc->tx.buf.ptr);
lvcc->tx.buf.ptr += 2;
lvcc->tx.buf.ptr[-2] = cpu_to_be32((uu << 24) | (cpi << 16) | len);
if (lvcc->tx.buf.ptr >= lvcc->tx.buf.end)
lvcc->tx.buf.ptr = lvcc->tx.buf.start;
}
static inline void vcc_tx_memcpy(struct lanai_vcc *lvcc,
const unsigned char *src, int n)
{
unsigned char *e;
int m;
e = ((unsigned char *) lvcc->tx.buf.ptr) + n;
m = e - (unsigned char *) lvcc->tx.buf.end;
if (m < 0)
m = 0;
memcpy(lvcc->tx.buf.ptr, src, n - m);
if (m != 0) {
memcpy(lvcc->tx.buf.start, src + n - m, m);
e = ((unsigned char *) lvcc->tx.buf.start) + m;
}
lvcc->tx.buf.ptr = (u32 *) e;
}
static inline void vcc_tx_memzero(struct lanai_vcc *lvcc, int n)
{
unsigned char *e;
int m;
if (n == 0)
return;
e = ((unsigned char *) lvcc->tx.buf.ptr) + n;
m = e - (unsigned char *) lvcc->tx.buf.end;
if (m < 0)
m = 0;
memset(lvcc->tx.buf.ptr, 0, n - m);
if (m != 0) {
memset(lvcc->tx.buf.start, 0, m);
e = ((unsigned char *) lvcc->tx.buf.start) + m;
}
lvcc->tx.buf.ptr = (u32 *) e;
}
/* Update "butt" register to specify new WritePtr */
static inline void lanai_endtx(struct lanai_dev *lanai,
const struct lanai_vcc *lvcc)
{
int i, ptr = ((unsigned char *) lvcc->tx.buf.ptr) -
(unsigned char *) lvcc->tx.buf.start;
APRINTK((ptr & ~0x0001FFF0) == 0,
"lanai_endtx: bad ptr (%d), vci=%d, start,ptr,end=%p,%p,%p\n",
ptr, lvcc->vci, lvcc->tx.buf.start, lvcc->tx.buf.ptr,
lvcc->tx.buf.end);
/*
* Since the "butt register" is a shared resounce on the card we
* serialize all accesses to it through this spinlock. This is
* mostly just paranoia sicne the register is rarely "busy" anyway
* but is needed for correctness.
*/
spin_lock(&lanai->endtxlock);
/*
* We need to check if the "butt busy" bit is set before
* updating the butt register. In theory this should
* never happen because the ATM card is plenty fast at
* updating the register. Still, we should make sure
*/
for (i = 0; reg_read(lanai, Status_Reg) & STATUS_BUTTBUSY; i++) {
if (unlikely(i > 50)) {
printk(KERN_ERR DEV_LABEL "(itf %d): butt register "
"always busy!\n", lanai->number);
break;
}
udelay(5);
}
/*
* Before we tall the card to start work we need to be sure 100% of
* the info in the service buffer has been written before we tell
* the card about it
*/
wmb();
reg_write(lanai, (ptr << 12) | lvcc->vci, Butt_Reg);
spin_unlock(&lanai->endtxlock);
}
/*
* Add one AAL5 PDU to lvcc's transmit buffer. Caller garauntees there's
* space available. "pdusize" is the number of bytes the PDU will take
*/
static void lanai_send_one_aal5(struct lanai_dev *lanai,
struct lanai_vcc *lvcc, struct sk_buff *skb, int pdusize)
{
int pad;
APRINTK(pdusize == aal5_size(skb->len),
"lanai_send_one_aal5: wrong size packet (%d != %d)\n",
pdusize, aal5_size(skb->len));
vcc_tx_add_aal5_descriptor(lvcc, 0, pdusize);
pad = pdusize - skb->len - 8;
APRINTK(pad >= 0, "pad is negative (%d)\n", pad);
APRINTK(pad < 48, "pad is too big (%d)\n", pad);
vcc_tx_memcpy(lvcc, skb->data, skb->len);
vcc_tx_memzero(lvcc, pad);
vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
lanai_endtx(lanai, lvcc);
lanai_free_skb(lvcc->tx.atmvcc, skb);
atomic_inc(&lvcc->tx.atmvcc->stats->tx);
}
/* Try to fill the buffer - don't call unless there is backlog */
static void vcc_tx_unqueue_aal5(struct lanai_dev *lanai,
struct lanai_vcc *lvcc, int endptr)
{
int n;
struct sk_buff *skb;
int space = vcc_tx_space(lvcc, endptr);
APRINTK(vcc_is_backlogged(lvcc),
"vcc_tx_unqueue() called with empty backlog (vci=%d)\n",
lvcc->vci);
while (space >= 64) {
skb = skb_dequeue(&lvcc->tx.backlog);
if (skb == NULL)
goto no_backlog;
n = aal5_size(skb->len);
if (n + 16 > space) {
/* No room for this packet - put it back on queue */
skb_queue_head(&lvcc->tx.backlog, skb);
return;
}
lanai_send_one_aal5(lanai, lvcc, skb, n);
space -= n + 16;
}
if (!vcc_is_backlogged(lvcc)) {
no_backlog:
__clear_bit(lvcc->vci, lanai->backlog_vccs);
}
}
/* Given an skb that we want to transmit either send it now or queue */
static void vcc_tx_aal5(struct lanai_dev *lanai, struct lanai_vcc *lvcc,
struct sk_buff *skb)
{
int space, n;
if (vcc_is_backlogged(lvcc)) /* Already backlogged */
goto queue_it;
space = vcc_tx_space(lvcc,
TXREADPTR_GET_PTR(cardvcc_read(lvcc, vcc_txreadptr)));
n = aal5_size(skb->len);
APRINTK(n + 16 >= 64, "vcc_tx_aal5: n too small (%d)\n", n);
if (space < n + 16) { /* No space for this PDU */
__set_bit(lvcc->vci, lanai->backlog_vccs);
queue_it:
skb_queue_tail(&lvcc->tx.backlog, skb);
return;
}
lanai_send_one_aal5(lanai, lvcc, skb, n);
}
static void vcc_tx_unqueue_aal0(struct lanai_dev *lanai,
struct lanai_vcc *lvcc, int endptr)
{
printk(KERN_INFO DEV_LABEL
": vcc_tx_unqueue_aal0: not implemented\n");
}
static void vcc_tx_aal0(struct lanai_dev *lanai, struct lanai_vcc *lvcc,
struct sk_buff *skb)
{
printk(KERN_INFO DEV_LABEL ": vcc_tx_aal0: not implemented\n");
/* Remember to increment lvcc->tx.atmvcc->stats->tx */
lanai_free_skb(lvcc->tx.atmvcc, skb);
}
/* -------------------- VCC RX BUFFER UTILITIES: */
/* unlike the _tx_ cousins, this doesn't update ptr */
static inline void vcc_rx_memcpy(unsigned char *dest,
const struct lanai_vcc *lvcc, int n)
{
int m = ((const unsigned char *) lvcc->rx.buf.ptr) + n -
((const unsigned char *) (lvcc->rx.buf.end));
if (m < 0)
m = 0;
memcpy(dest, lvcc->rx.buf.ptr, n - m);
memcpy(dest + n - m, lvcc->rx.buf.start, m);
/* Make sure that these copies don't get reordered */
barrier();
}
/* Receive AAL5 data on a VCC with a particular endptr */
static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
{
int size;
struct sk_buff *skb;
const u32 *x;
u32 *end = &lvcc->rx.buf.start[endptr * 4];
int n = ((unsigned long) end) - ((unsigned long) lvcc->rx.buf.ptr);
if (n < 0)
n += lanai_buf_size(&lvcc->rx.buf);
APRINTK(n >= 0 && n < lanai_buf_size(&lvcc->rx.buf) && !(n & 15),
"vcc_rx_aal5: n out of range (%d/%Zu)\n",
n, lanai_buf_size(&lvcc->rx.buf));
/* Recover the second-to-last word to get true pdu length */
if ((x = &end[-2]) < lvcc->rx.buf.start)
x = &lvcc->rx.buf.end[-2];
/*
* Before we actually read from the buffer, make sure the memory
* changes have arrived
*/
rmb();
size = be32_to_cpup(x) & 0xffff;
if (unlikely(n != aal5_size(size))) {
/* Make sure size matches padding */
printk(KERN_INFO DEV_LABEL "(itf %d): Got bad AAL5 length "
"on vci=%d - size=%d n=%d\n",
lvcc->rx.atmvcc->dev->number, lvcc->vci, size, n);
lvcc->stats.x.aal5.rx_badlen++;
goto out;
}
skb = atm_alloc_charge(lvcc->rx.atmvcc, size, GFP_ATOMIC);
if (unlikely(skb == NULL)) {
lvcc->stats.rx_nomem++;
goto out;
}
skb_put(skb, size);
vcc_rx_memcpy(skb->data, lvcc, size);
ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
__net_timestamp(skb);
lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
atomic_inc(&lvcc->rx.atmvcc->stats->rx);
out:
lvcc->rx.buf.ptr = end;
cardvcc_write(lvcc, endptr, vcc_rxreadptr);
}
static void vcc_rx_aal0(struct lanai_dev *lanai)
{
printk(KERN_INFO DEV_LABEL ": vcc_rx_aal0: not implemented\n");
/* Remember to get read_lock(&vcc_sklist_lock) while looking up VC */
/* Remember to increment lvcc->rx.atmvcc->stats->rx */
}
/* -------------------- MANAGING HOST-BASED VCC TABLE: */
/* Decide whether to use vmalloc or get_zeroed_page for VCC table */
#if (NUM_VCI * BITS_PER_LONG) <= PAGE_SIZE
#define VCCTABLE_GETFREEPAGE
#else
#include <linux/vmalloc.h>
#endif
static int __devinit vcc_table_allocate(struct lanai_dev *lanai)
{
#ifdef VCCTABLE_GETFREEPAGE
APRINTK((lanai->num_vci) * sizeof(struct lanai_vcc *) <= PAGE_SIZE,
"vcc table > PAGE_SIZE!");
lanai->vccs = (struct lanai_vcc **) get_zeroed_page(GFP_KERNEL);
return (lanai->vccs == NULL) ? -ENOMEM : 0;
#else
int bytes = (lanai->num_vci) * sizeof(struct lanai_vcc *);
lanai->vccs = (struct lanai_vcc **) vmalloc(bytes);
if (unlikely(lanai->vccs == NULL))
return -ENOMEM;
memset(lanai->vccs, 0, bytes);
return 0;
#endif
}
static inline void vcc_table_deallocate(const struct lanai_dev *lanai)
{
#ifdef VCCTABLE_GETFREEPAGE
free_page((unsigned long) lanai->vccs);
#else
vfree(lanai->vccs);
#endif
}
/* Allocate a fresh lanai_vcc, with the appropriate things cleared */
static inline struct lanai_vcc *new_lanai_vcc(void)
{
struct lanai_vcc *lvcc;
lvcc = kzalloc(sizeof(*lvcc), GFP_KERNEL);
if (likely(lvcc != NULL)) {
skb_queue_head_init(&lvcc->tx.backlog);
#ifdef DEBUG
lvcc->vci = -1;
#endif
}
return lvcc;
}
static int lanai_get_sized_buffer(struct lanai_dev *lanai,
struct lanai_buffer *buf, int max_sdu, int multiplier,
const char *name)
{
int size;
if (unlikely(max_sdu < 1))
max_sdu = 1;
max_sdu = aal5_size(max_sdu);
size = (max_sdu + 16) * multiplier + 16;
lanai_buf_allocate(buf, size, max_sdu + 32, lanai->pci);
if (unlikely(buf->start == NULL))
return -ENOMEM;
if (unlikely(lanai_buf_size(buf) < size))
printk(KERN_WARNING DEV_LABEL "(itf %d): wanted %d bytes "
"for %s buffer, got only %Zu\n", lanai->number, size,
name, lanai_buf_size(buf));
DPRINTK("Allocated %Zu byte %s buffer\n", lanai_buf_size(buf), name);
return 0;
}
/* Setup a RX buffer for a currently unbound AAL5 vci */
static inline int lanai_setup_rx_vci_aal5(struct lanai_dev *lanai,
struct lanai_vcc *lvcc, const struct atm_qos *qos)
{
return lanai_get_sized_buffer(lanai, &lvcc->rx.buf,
qos->rxtp.max_sdu, AAL5_RX_MULTIPLIER, "RX");
}
/* Setup a TX buffer for a currently unbound AAL5 vci */
static int lanai_setup_tx_vci(struct lanai_dev *lanai, struct lanai_vcc *lvcc,
const struct atm_qos *qos)
{
int max_sdu, multiplier;
if (qos->aal == ATM_AAL0) {
lvcc->tx.unqueue = vcc_tx_unqueue_aal0;
max_sdu = ATM_CELL_SIZE - 1;
multiplier = AAL0_TX_MULTIPLIER;
} else {
lvcc->tx.unqueue = vcc_tx_unqueue_aal5;
max_sdu = qos->txtp.max_sdu;
multiplier = AAL5_TX_MULTIPLIER;
}
return lanai_get_sized_buffer(lanai, &lvcc->tx.buf, max_sdu,
multiplier, "TX");
}
static inline void host_vcc_bind(struct lanai_dev *lanai,
struct lanai_vcc *lvcc, vci_t vci)
{
if (lvcc->vbase != NULL)
return; /* We already were bound in the other direction */
DPRINTK("Binding vci %d\n", vci);
#ifdef USE_POWERDOWN
if (lanai->nbound++ == 0) {
DPRINTK("Coming out of powerdown\n");
lanai->conf1 &= ~CONFIG1_POWERDOWN;
conf1_write(lanai);
conf2_write(lanai);
}
#endif
lvcc->vbase = cardvcc_addr(lanai, vci);
lanai->vccs[lvcc->vci = vci] = lvcc;
}
static inline void host_vcc_unbind(struct lanai_dev *lanai,
struct lanai_vcc *lvcc)
{
if (lvcc->vbase == NULL)
return; /* This vcc was never bound */
DPRINTK("Unbinding vci %d\n", lvcc->vci);
lvcc->vbase = NULL;
lanai->vccs[lvcc->vci] = NULL;
#ifdef USE_POWERDOWN
if (--lanai->nbound == 0) {
DPRINTK("Going into powerdown\n");
lanai->conf1 |= CONFIG1_POWERDOWN;
conf1_write(lanai);
}
#endif
}
/* -------------------- RESET CARD: */
static void lanai_reset(struct lanai_dev *lanai)
{
printk(KERN_CRIT DEV_LABEL "(itf %d): *NOT* reseting - not "
"implemented\n", lanai->number);
/* TODO */
/* The following is just a hack until we write the real
* resetter - at least ack whatever interrupt sent us
* here
*/
reg_write(lanai, INT_ALL, IntAck_Reg);
lanai->stats.card_reset++;
}
/* -------------------- SERVICE LIST UTILITIES: */
/*
* Allocate service buffer and tell card about it
*/
static int __devinit service_buffer_allocate(struct lanai_dev *lanai)
{
lanai_buf_allocate(&lanai->service, SERVICE_ENTRIES * 4, 8,
lanai->pci);
if (unlikely(lanai->service.start == NULL))
return -ENOMEM;
DPRINTK("allocated service buffer at 0x%08lX, size %Zu(%d)\n",
(unsigned long) lanai->service.start,
lanai_buf_size(&lanai->service),
lanai_buf_size_cardorder(&lanai->service));
/* Clear ServWrite register to be safe */
reg_write(lanai, 0, ServWrite_Reg);
/* ServiceStuff register contains size and address of buffer */
reg_write(lanai,
SSTUFF_SET_SIZE(lanai_buf_size_cardorder(&lanai->service)) |
SSTUFF_SET_ADDR(lanai->service.dmaaddr),
ServiceStuff_Reg);
return 0;
}
static inline void service_buffer_deallocate(struct lanai_dev *lanai)
{
lanai_buf_deallocate(&lanai->service, lanai->pci);
}
/* Bitfields in service list */
#define SERVICE_TX (0x80000000) /* Was from transmission */
#define SERVICE_TRASH (0x40000000) /* RXed PDU was trashed */
#define SERVICE_CRCERR (0x20000000) /* RXed PDU had CRC error */
#define SERVICE_CI (0x10000000) /* RXed PDU had CI set */
#define SERVICE_CLP (0x08000000) /* RXed PDU had CLP set */
#define SERVICE_STREAM (0x04000000) /* RX Stream mode */
#define SERVICE_GET_VCI(x) (((x)>>16)&0x3FF)
#define SERVICE_GET_END(x) ((x)&0x1FFF)
/* Handle one thing from the service list - returns true if it marked a
* VCC ready for xmit
*/
static int handle_service(struct lanai_dev *lanai, u32 s)
{
vci_t vci = SERVICE_GET_VCI(s);
struct lanai_vcc *lvcc;
read_lock(&vcc_sklist_lock);
lvcc = lanai->vccs[vci];
if (unlikely(lvcc == NULL)) {
read_unlock(&vcc_sklist_lock);
DPRINTK("(itf %d) got service entry 0x%X for nonexistent "
"vcc %d\n", lanai->number, (unsigned int) s, vci);
if (s & SERVICE_TX)
lanai->stats.service_notx++;
else
lanai->stats.service_norx++;
return 0;
}
if (s & SERVICE_TX) { /* segmentation interrupt */
if (unlikely(lvcc->tx.atmvcc == NULL)) {
read_unlock(&vcc_sklist_lock);
DPRINTK("(itf %d) got service entry 0x%X for non-TX "
"vcc %d\n", lanai->number, (unsigned int) s, vci);
lanai->stats.service_notx++;
return 0;
}
__set_bit(vci, lanai->transmit_ready);
lvcc->tx.endptr = SERVICE_GET_END(s);
read_unlock(&vcc_sklist_lock);
return 1;
}
if (unlikely(lvcc->rx.atmvcc == NULL)) {
read_unlock(&vcc_sklist_lock);
DPRINTK("(itf %d) got service entry 0x%X for non-RX "
"vcc %d\n", lanai->number, (unsigned int) s, vci);
lanai->stats.service_norx++;
return 0;
}
if (unlikely(lvcc->rx.atmvcc->qos.aal != ATM_AAL5)) {
read_unlock(&vcc_sklist_lock);
DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
"vcc %d\n", lanai->number, (unsigned int) s, vci);
lanai->stats.service_rxnotaal5++;
atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
return 0;
}
if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
vcc_rx_aal5(lvcc, SERVICE_GET_END(s));
read_unlock(&vcc_sklist_lock);
return 0;
}
if (s & SERVICE_TRASH) {
int bytes;
read_unlock(&vcc_sklist_lock);
DPRINTK("got trashed rx pdu on vci %d\n", vci);
atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
lvcc->stats.x.aal5.service_trash++;
bytes = (SERVICE_GET_END(s) * 16) -
(((unsigned long) lvcc->rx.buf.ptr) -
((unsigned long) lvcc->rx.buf.start)) + 47;
if (bytes < 0)
bytes += lanai_buf_size(&lvcc->rx.buf);
lanai->stats.ovfl_trash += (bytes / 48);
return 0;
}
if (s & SERVICE_STREAM) {
read_unlock(&vcc_sklist_lock);
atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
lvcc->stats.x.aal5.service_stream++;
printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
"PDU on VCI %d!\n", lanai->number, vci);
lanai_reset(lanai);
return 0;
}
DPRINTK("got rx crc error on vci %d\n", vci);
atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
lvcc->stats.x.aal5.service_rxcrc++;
lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
read_unlock(&vcc_sklist_lock);
return 0;
}
/* Try transmitting on all VCIs that we marked ready to serve */
static void iter_transmit(struct lanai_dev *lanai, vci_t vci)
{
struct lanai_vcc *lvcc = lanai->vccs[vci];
if (vcc_is_backlogged(lvcc))
lvcc->tx.unqueue(lanai, lvcc, lvcc->tx.endptr);
}
/* Run service queue -- called from interrupt context or with
* interrupts otherwise disabled and with the lanai->servicelock
* lock held
*/
static void run_service(struct lanai_dev *lanai)
{
int ntx = 0;
u32 wreg = reg_read(lanai, ServWrite_Reg);
const u32 *end = lanai->service.start + wreg;
while (lanai->service.ptr != end) {
ntx += handle_service(lanai,
le32_to_cpup(lanai->service.ptr++));
if (lanai->service.ptr >= lanai->service.end)
lanai->service.ptr = lanai->service.start;
}
reg_write(lanai, wreg, ServRead_Reg);
if (ntx != 0) {
read_lock(&vcc_sklist_lock);
vci_bitfield_iterate(lanai, lanai->transmit_ready,
iter_transmit);
bitmap_zero(lanai->transmit_ready, NUM_VCI);
read_unlock(&vcc_sklist_lock);
}
}
/* -------------------- GATHER STATISTICS: */
static void get_statistics(struct lanai_dev *lanai)
{
u32 statreg = reg_read(lanai, Statistics_Reg);
lanai->stats.atm_ovfl += STATS_GET_FIFO_OVFL(statreg);
lanai->stats.hec_err += STATS_GET_HEC_ERR(statreg);
lanai->stats.vci_trash += STATS_GET_BAD_VCI(statreg);
lanai->stats.ovfl_trash += STATS_GET_BUF_OVFL(statreg);
}
/* -------------------- POLLING TIMER: */
#ifndef DEBUG_RW
/* Try to undequeue 1 backlogged vcc */
static void iter_dequeue(struct lanai_dev *lanai, vci_t vci)
{
struct lanai_vcc *lvcc = lanai->vccs[vci];
int endptr;
if (lvcc == NULL || lvcc->tx.atmvcc == NULL ||
!vcc_is_backlogged(lvcc)) {
__clear_bit(vci, lanai->backlog_vccs);
return;
}
endptr = TXREADPTR_GET_PTR(cardvcc_read(lvcc, vcc_txreadptr));
lvcc->tx.unqueue(lanai, lvcc, endptr);
}
#endif /* !DEBUG_RW */
static void lanai_timed_poll(unsigned long arg)
{
struct lanai_dev *lanai = (struct lanai_dev *) arg;
#ifndef DEBUG_RW
unsigned long flags;
#ifdef USE_POWERDOWN
if (lanai->conf1 & CONFIG1_POWERDOWN)
return;
#endif /* USE_POWERDOWN */
local_irq_save(flags);
/* If we can grab the spinlock, check if any services need to be run */
if (spin_trylock(&lanai->servicelock)) {
run_service(lanai);
spin_unlock(&lanai->servicelock);
}
/* ...and see if any backlogged VCs can make progress */
/* unfortunately linux has no read_trylock() currently */
read_lock(&vcc_sklist_lock);
vci_bitfield_iterate(lanai, lanai->backlog_vccs, iter_dequeue);
read_unlock(&vcc_sklist_lock);
local_irq_restore(flags);
get_statistics(lanai);
#endif /* !DEBUG_RW */
mod_timer(&lanai->timer, jiffies + LANAI_POLL_PERIOD);
}
static inline void lanai_timed_poll_start(struct lanai_dev *lanai)
{
init_timer(&lanai->timer);
lanai->timer.expires = jiffies + LANAI_POLL_PERIOD;
lanai->timer.data = (unsigned long) lanai;
lanai->timer.function = lanai_timed_poll;
add_timer(&lanai->timer);
}
static inline void lanai_timed_poll_stop(struct lanai_dev *lanai)
{
del_timer_sync(&lanai->timer);
}
/* -------------------- INTERRUPT SERVICE: */
static inline void lanai_int_1(struct lanai_dev *lanai, u32 reason)
{
u32 ack = 0;
if (reason & INT_SERVICE) {
ack = INT_SERVICE;
spin_lock(&lanai->servicelock);
run_service(lanai);
spin_unlock(&lanai->servicelock);
}
if (reason & (INT_AAL0_STR | INT_AAL0)) {
ack |= reason & (INT_AAL0_STR | INT_AAL0);
vcc_rx_aal0(lanai);
}
/* The rest of the interrupts are pretty rare */
if (ack == reason)
goto done;
if (reason & INT_STATS) {
reason &= ~INT_STATS; /* No need to ack */
get_statistics(lanai);
}
if (reason & INT_STATUS) {
ack |= reason & INT_STATUS;
lanai_check_status(lanai);
}
if (unlikely(reason & INT_DMASHUT)) {
printk(KERN_ERR DEV_LABEL "(itf %d): driver error - DMA "
"shutdown, reason=0x%08X, address=0x%08X\n",
lanai->number, (unsigned int) (reason & INT_DMASHUT),
(unsigned int) reg_read(lanai, DMA_Addr_Reg));
if (reason & INT_TABORTBM) {
lanai_reset(lanai);
return;
}
ack |= (reason & INT_DMASHUT);
printk(KERN_ERR DEV_LABEL "(itf %d): re-enabling DMA\n",
lanai->number);
conf1_write(lanai);
lanai->stats.dma_reenable++;
pcistatus_check(lanai, 0);
}
if (unlikely(reason & INT_TABORTSENT)) {
ack |= (reason & INT_TABORTSENT);
printk(KERN_ERR DEV_LABEL "(itf %d): sent PCI target abort\n",
lanai->number);
pcistatus_check(lanai, 0);
}
if (unlikely(reason & INT_SEGSHUT)) {
printk(KERN_ERR DEV_LABEL "(itf %d): driver error - "
"segmentation shutdown, reason=0x%08X\n", lanai->number,
(unsigned int) (reason & INT_SEGSHUT));
lanai_reset(lanai);
return;
}
if (unlikely(reason & (INT_PING | INT_WAKE))) {
printk(KERN_ERR DEV_LABEL "(itf %d): driver error - "
"unexpected interrupt 0x%08X, resetting\n",
lanai->number,
(unsigned int) (reason & (INT_PING | INT_WAKE)));
lanai_reset(lanai);
return;
}
#ifdef DEBUG
if (unlikely(ack != reason)) {
DPRINTK("unacked ints: 0x%08X\n",
(unsigned int) (reason & ~ack));
ack = reason;
}
#endif
done:
if (ack != 0)
reg_write(lanai, ack, IntAck_Reg);
}
static irqreturn_t lanai_int(int irq, void *devid)
{
struct lanai_dev *lanai = devid;
u32 reason;
#ifdef USE_POWERDOWN
/*
* If we're powered down we shouldn't be generating any interrupts -
* so assume that this is a shared interrupt line and it's for someone
* else
*/
if (unlikely(lanai->conf1 & CONFIG1_POWERDOWN))
return IRQ_NONE;
#endif
reason = intr_pending(lanai);
if (reason == 0)
return IRQ_NONE; /* Must be for someone else */
do {
if (unlikely(reason == 0xFFFFFFFF))
break; /* Maybe we've been unplugged? */
lanai_int_1(lanai, reason);
reason = intr_pending(lanai);
} while (reason != 0);
return IRQ_HANDLED;
}
/* TODO - it would be nice if we could use the "delayed interrupt" system
* to some advantage
*/
/* -------------------- CHECK BOARD ID/REV: */
/*
* The board id and revision are stored both in the reset register and
* in the PCI configuration space - the documentation says to check
* each of them. If revp!=NULL we store the revision there
*/
static int check_board_id_and_rev(const char *name, u32 val, int *revp)
{
DPRINTK("%s says board_id=%d, board_rev=%d\n", name,
(int) RESET_GET_BOARD_ID(val),
(int) RESET_GET_BOARD_REV(val));
if (RESET_GET_BOARD_ID(val) != BOARD_ID_LANAI256) {
printk(KERN_ERR DEV_LABEL ": Found %s board-id %d -- not a "
"Lanai 25.6\n", name, (int) RESET_GET_BOARD_ID(val));
return -ENODEV;
}
if (revp != NULL)
*revp = RESET_GET_BOARD_REV(val);
return 0;
}
/* -------------------- PCI INITIALIZATION/SHUTDOWN: */
static int __devinit lanai_pci_start(struct lanai_dev *lanai)
{
struct pci_dev *pci = lanai->pci;
int result;
u16 w;
if (pci_enable_device(pci) != 0) {
printk(KERN_ERR DEV_LABEL "(itf %d): can't enable "
"PCI device", lanai->number);
return -ENXIO;
}
pci_set_master(pci);
if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) != 0) {
printk(KERN_WARNING DEV_LABEL
"(itf %d): No suitable DMA available.\n", lanai->number);
return -EBUSY;
}
if (pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) != 0) {
printk(KERN_WARNING DEV_LABEL
"(itf %d): No suitable DMA available.\n", lanai->number);
return -EBUSY;
}
result = pci_read_config_word(pci, PCI_SUBSYSTEM_ID, &w);
if (result != PCIBIOS_SUCCESSFUL) {
printk(KERN_ERR DEV_LABEL "(itf %d): can't read "
"PCI_SUBSYSTEM_ID: %d\n", lanai->number, result);
return -EINVAL;
}
result = check_board_id_and_rev("PCI", w, NULL);
if (result != 0)
return result;
/* Set latency timer to zero as per lanai docs */
result = pci_write_config_byte(pci, PCI_LATENCY_TIMER, 0);
if (result != PCIBIOS_SUCCESSFUL) {
printk(KERN_ERR DEV_LABEL "(itf %d): can't write "
"PCI_LATENCY_TIMER: %d\n", lanai->number, result);
return -EINVAL;
}
pcistatus_check(lanai, 1);
pcistatus_check(lanai, 0);
return 0;
}
/* -------------------- VPI/VCI ALLOCATION: */
/*
* We _can_ use VCI==0 for normal traffic, but only for UBR (or we'll
* get a CBRZERO interrupt), and we can use it only if noone is receiving
* AAL0 traffic (since they will use the same queue) - according to the
* docs we shouldn't even use it for AAL0 traffic
*/
static inline int vci0_is_ok(struct lanai_dev *lanai,
const struct atm_qos *qos)
{
if (qos->txtp.traffic_class == ATM_CBR || qos->aal == ATM_AAL0)
return 0;
if (qos->rxtp.traffic_class != ATM_NONE) {
if (lanai->naal0 != 0)
return 0;
lanai->conf2 |= CONFIG2_VCI0_NORMAL;
conf2_write_if_powerup(lanai);
}
return 1;
}
/* return true if vci is currently unused, or if requested qos is
* compatible
*/
static int vci_is_ok(struct lanai_dev *lanai, vci_t vci,
const struct atm_vcc *atmvcc)
{
const struct atm_qos *qos = &atmvcc->qos;
const struct lanai_vcc *lvcc = lanai->vccs[vci];
if (vci == 0 && !vci0_is_ok(lanai, qos))
return 0;
if (unlikely(lvcc != NULL)) {
if (qos->rxtp.traffic_class != ATM_NONE &&
lvcc->rx.atmvcc != NULL && lvcc->rx.atmvcc != atmvcc)
return 0;
if (qos->txtp.traffic_class != ATM_NONE &&
lvcc->tx.atmvcc != NULL && lvcc->tx.atmvcc != atmvcc)
return 0;
if (qos->txtp.traffic_class == ATM_CBR &&
lanai->cbrvcc != NULL && lanai->cbrvcc != atmvcc)
return 0;
}
if (qos->aal == ATM_AAL0 && lanai->naal0 == 0 &&
qos->rxtp.traffic_class != ATM_NONE) {
const struct lanai_vcc *vci0 = lanai->vccs[0];
if (vci0 != NULL && vci0->rx.atmvcc != NULL)
return 0;
lanai->conf2 &= ~CONFIG2_VCI0_NORMAL;
conf2_write_if_powerup(lanai);
}
return 1;
}
static int lanai_normalize_ci(struct lanai_dev *lanai,
const struct atm_vcc *atmvcc, short *vpip, vci_t *vcip)
{
switch (*vpip) {
case ATM_VPI_ANY:
*vpip = 0;
/* FALLTHROUGH */
case 0:
break;
default:
return -EADDRINUSE;
}
switch (*vcip) {
case ATM_VCI_ANY:
for (*vcip = ATM_NOT_RSV_VCI; *vcip < lanai->num_vci;
(*vcip)++)
if (vci_is_ok(lanai, *vcip, atmvcc))
return 0;
return -EADDRINUSE;
default:
if (*vcip >= lanai->num_vci || *vcip < 0 ||
!vci_is_ok(lanai, *vcip, atmvcc))
return -EADDRINUSE;
}
return 0;
}
/* -------------------- MANAGE CBR: */
/*
* CBR ICG is stored as a fixed-point number with 4 fractional bits.
* Note that storing a number greater than 2046.0 will result in
* incorrect shaping
*/
#define CBRICG_FRAC_BITS (4)
#define CBRICG_MAX (2046 << CBRICG_FRAC_BITS)
/*
* ICG is related to PCR with the formula PCR = MAXPCR / (ICG + 1)
* where MAXPCR is (according to the docs) 25600000/(54*8),
* which is equal to (3125<<9)/27.
*
* Solving for ICG, we get:
* ICG = MAXPCR/PCR - 1
* ICG = (3125<<9)/(27*PCR) - 1
* ICG = ((3125<<9) - (27*PCR)) / (27*PCR)
*
* The end result is supposed to be a fixed-point number with FRAC_BITS
* bits of a fractional part, so we keep everything in the numerator
* shifted by that much as we compute
*
*/
static int pcr_to_cbricg(const struct atm_qos *qos)
{
int rounddown = 0; /* 1 = Round PCR down, i.e. round ICG _up_ */
int x, icg, pcr = atm_pcr_goal(&qos->txtp);
if (pcr == 0) /* Use maximum bandwidth */
return 0;
if (pcr < 0) {
rounddown = 1;
pcr = -pcr;
}
x = pcr * 27;
icg = (3125 << (9 + CBRICG_FRAC_BITS)) - (x << CBRICG_FRAC_BITS);
if (rounddown)
icg += x - 1;
icg /= x;
if (icg > CBRICG_MAX)
icg = CBRICG_MAX;
DPRINTK("pcr_to_cbricg: pcr=%d rounddown=%c icg=%d\n",
pcr, rounddown ? 'Y' : 'N', icg);
return icg;
}
static inline void lanai_cbr_setup(struct lanai_dev *lanai)
{
reg_write(lanai, pcr_to_cbricg(&lanai->cbrvcc->qos), CBR_ICG_Reg);
reg_write(lanai, lanai->cbrvcc->vci, CBR_PTR_Reg);
lanai->conf2 |= CONFIG2_CBR_ENABLE;
conf2_write(lanai);
}
static inline void lanai_cbr_shutdown(struct lanai_dev *lanai)
{
lanai->conf2 &= ~CONFIG2_CBR_ENABLE;
conf2_write(lanai);
}
/* -------------------- OPERATIONS: */
/* setup a newly detected device */
static int __devinit lanai_dev_open(struct atm_dev *atmdev)
{
struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data;
unsigned long raw_base;
int result;
DPRINTK("In lanai_dev_open()\n");
/* Basic device fields */
lanai->number = atmdev->number;
lanai->num_vci = NUM_VCI;
bitmap_zero(lanai->backlog_vccs, NUM_VCI);
bitmap_zero(lanai->transmit_ready, NUM_VCI);
lanai->naal0 = 0;
#ifdef USE_POWERDOWN
lanai->nbound = 0;
#endif
lanai->cbrvcc = NULL;
memset(&lanai->stats, 0, sizeof lanai->stats);
spin_lock_init(&lanai->endtxlock);
spin_lock_init(&lanai->servicelock);
atmdev->ci_range.vpi_bits = 0;
atmdev->ci_range.vci_bits = 0;
while (1 << atmdev->ci_range.vci_bits < lanai->num_vci)
atmdev->ci_range.vci_bits++;
atmdev->link_rate = ATM_25_PCR;
/* 3.2: PCI initialization */
if ((result = lanai_pci_start(lanai)) != 0)
goto error;
raw_base = lanai->pci->resource[0].start;
lanai->base = (bus_addr_t) ioremap(raw_base, LANAI_MAPPING_SIZE);
if (lanai->base == NULL) {
printk(KERN_ERR DEV_LABEL ": couldn't remap I/O space\n");
goto error_pci;
}
/* 3.3: Reset lanai and PHY */
reset_board(lanai);
lanai->conf1 = reg_read(lanai, Config1_Reg);
lanai->conf1 &= ~(CONFIG1_GPOUT1 | CONFIG1_POWERDOWN |
CONFIG1_MASK_LEDMODE);
lanai->conf1 |= CONFIG1_SET_LEDMODE(LEDMODE_NOT_SOOL);
reg_write(lanai, lanai->conf1 | CONFIG1_GPOUT1, Config1_Reg);
udelay(1000);
conf1_write(lanai);
/*
* 3.4: Turn on endian mode for big-endian hardware
* We don't actually want to do this - the actual bit fields
* in the endian register are not documented anywhere.
* Instead we do the bit-flipping ourselves on big-endian
* hardware.
*
* 3.5: get the board ID/rev by reading the reset register
*/
result = check_board_id_and_rev("register",
reg_read(lanai, Reset_Reg), &lanai->board_rev);
if (result != 0)
goto error_unmap;
/* 3.6: read EEPROM */
if ((result = eeprom_read(lanai)) != 0)
goto error_unmap;
if ((result = eeprom_validate(lanai)) != 0)
goto error_unmap;
/* 3.7: re-reset PHY, do loopback tests, setup PHY */
reg_write(lanai, lanai->conf1 | CONFIG1_GPOUT1, Config1_Reg);
udelay(1000);
conf1_write(lanai);
/* TODO - loopback tests */
lanai->conf1 |= (CONFIG1_GPOUT2 | CONFIG1_GPOUT3 | CONFIG1_DMA_ENABLE);
conf1_write(lanai);
/* 3.8/3.9: test and initialize card SRAM */
if ((result = sram_test_and_clear(lanai)) != 0)
goto error_unmap;
/* 3.10: initialize lanai registers */
lanai->conf1 |= CONFIG1_DMA_ENABLE;
conf1_write(lanai);
if ((result = service_buffer_allocate(lanai)) != 0)
goto error_unmap;
if ((result = vcc_table_allocate(lanai)) != 0)
goto error_service;
lanai->conf2 = (lanai->num_vci >= 512 ? CONFIG2_HOWMANY : 0) |
CONFIG2_HEC_DROP | /* ??? */ CONFIG2_PTI7_MODE;
conf2_write(lanai);
reg_write(lanai, TX_FIFO_DEPTH, TxDepth_Reg);
reg_write(lanai, 0, CBR_ICG_Reg); /* CBR defaults to no limit */
if ((result = request_irq(lanai->pci->irq, lanai_int, IRQF_SHARED,
DEV_LABEL, lanai)) != 0) {
printk(KERN_ERR DEV_LABEL ": can't allocate interrupt\n");
goto error_vcctable;
}
mb(); /* Make sure that all that made it */
intr_enable(lanai, INT_ALL & ~(INT_PING | INT_WAKE));
/* 3.11: initialize loop mode (i.e. turn looping off) */
lanai->conf1 = (lanai->conf1 & ~CONFIG1_MASK_LOOPMODE) |
CONFIG1_SET_LOOPMODE(LOOPMODE_NORMAL) |
CONFIG1_GPOUT2 | CONFIG1_GPOUT3;
conf1_write(lanai);
lanai->status = reg_read(lanai, Status_Reg);
/* We're now done initializing this card */
#ifdef USE_POWERDOWN
lanai->conf1 |= CONFIG1_POWERDOWN;
conf1_write(lanai);
#endif
memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN);
lanai_timed_poll_start(lanai);
printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u "
"(%02X-%02X-%02X-%02X-%02X-%02X)\n", lanai->number,
(int) lanai->pci->revision, (unsigned long) lanai->base,
lanai->pci->irq,
atmdev->esi[0], atmdev->esi[1], atmdev->esi[2],
atmdev->esi[3], atmdev->esi[4], atmdev->esi[5]);
printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), "
"board_rev=%d\n", lanai->number,
lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,
(unsigned int) lanai->serialno, lanai->board_rev);
return 0;
error_vcctable:
vcc_table_deallocate(lanai);
error_service:
service_buffer_deallocate(lanai);
error_unmap:
reset_board(lanai);
#ifdef USE_POWERDOWN
lanai->conf1 = reg_read(lanai, Config1_Reg) | CONFIG1_POWERDOWN;
conf1_write(lanai);
#endif
iounmap(lanai->base);
error_pci:
pci_disable_device(lanai->pci);
error:
return result;
}
/* called when device is being shutdown, and all vcc's are gone - higher
* levels will deallocate the atm device for us
*/
static void lanai_dev_close(struct atm_dev *atmdev)
{
struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data;
printk(KERN_INFO DEV_LABEL "(itf %d): shutting down interface\n",
lanai->number);
lanai_timed_poll_stop(lanai);
#ifdef USE_POWERDOWN
lanai->conf1 = reg_read(lanai, Config1_Reg) & ~CONFIG1_POWERDOWN;
conf1_write(lanai);
#endif
intr_disable(lanai, INT_ALL);
free_irq(lanai->pci->irq, lanai);
reset_board(lanai);
#ifdef USE_POWERDOWN
lanai->conf1 |= CONFIG1_POWERDOWN;
conf1_write(lanai);
#endif
pci_disable_device(lanai->pci);
vcc_table_deallocate(lanai);
service_buffer_deallocate(lanai);
iounmap(lanai->base);
kfree(lanai);
}
/* close a vcc */
static void lanai_close(struct atm_vcc *atmvcc)
{
struct lanai_vcc *lvcc = (struct lanai_vcc *) atmvcc->dev_data;
struct lanai_dev *lanai = (struct lanai_dev *) atmvcc->dev->dev_data;
if (lvcc == NULL)
return;
clear_bit(ATM_VF_READY, &atmvcc->flags);
clear_bit(ATM_VF_PARTIAL, &atmvcc->flags);
if (lvcc->rx.atmvcc == atmvcc) {
lanai_shutdown_rx_vci(lvcc);
if (atmvcc->qos.aal == ATM_AAL0) {
if (--lanai->naal0 <= 0)
aal0_buffer_free(lanai);
} else
lanai_buf_deallocate(&lvcc->rx.buf, lanai->pci);
lvcc->rx.atmvcc = NULL;
}
if (lvcc->tx.atmvcc == atmvcc) {
if (atmvcc == lanai->cbrvcc) {
if (lvcc->vbase != NULL)
lanai_cbr_shutdown(lanai);
lanai->cbrvcc = NULL;
}
lanai_shutdown_tx_vci(lanai, lvcc);
lanai_buf_deallocate(&lvcc->tx.buf, lanai->pci);
lvcc->tx.atmvcc = NULL;
}
if (--lvcc->nref == 0) {
host_vcc_unbind(lanai, lvcc);
kfree(lvcc);
}
atmvcc->dev_data = NULL;
clear_bit(ATM_VF_ADDR, &atmvcc->flags);
}
/* open a vcc on the card to vpi/vci */
static int lanai_open(struct atm_vcc *atmvcc)
{
struct lanai_dev *lanai;
struct lanai_vcc *lvcc;
int result = 0;
int vci = atmvcc->vci;
short vpi = atmvcc->vpi;
/* we don't support partial open - it's not really useful anyway */
if ((test_bit(ATM_VF_PARTIAL, &atmvcc->flags)) ||
(vpi == ATM_VPI_UNSPEC) || (vci == ATM_VCI_UNSPEC))
return -EINVAL;
lanai = (struct lanai_dev *) atmvcc->dev->dev_data;
result = lanai_normalize_ci(lanai, atmvcc, &vpi, &vci);
if (unlikely(result != 0))
goto out;
set_bit(ATM_VF_ADDR, &atmvcc->flags);
if (atmvcc->qos.aal != ATM_AAL0 && atmvcc->qos.aal != ATM_AAL5)
return -EINVAL;
DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n", lanai->number,
(int) vpi, vci);
lvcc = lanai->vccs[vci];
if (lvcc == NULL) {
lvcc = new_lanai_vcc();
if (unlikely(lvcc == NULL))
return -ENOMEM;
atmvcc->dev_data = lvcc;
}
lvcc->nref++;
if (atmvcc->qos.rxtp.traffic_class != ATM_NONE) {
APRINTK(lvcc->rx.atmvcc == NULL, "rx.atmvcc!=NULL, vci=%d\n",
vci);
if (atmvcc->qos.aal == ATM_AAL0) {
if (lanai->naal0 == 0)
result = aal0_buffer_allocate(lanai);
} else
result = lanai_setup_rx_vci_aal5(
lanai, lvcc, &atmvcc->qos);
if (unlikely(result != 0))
goto out_free;
lvcc->rx.atmvcc = atmvcc;
lvcc->stats.rx_nomem = 0;
lvcc->stats.x.aal5.rx_badlen = 0;
lvcc->stats.x.aal5.service_trash = 0;
lvcc->stats.x.aal5.service_stream = 0;
lvcc->stats.x.aal5.service_rxcrc = 0;
if (atmvcc->qos.aal == ATM_AAL0)
lanai->naal0++;
}
if (atmvcc->qos.txtp.traffic_class != ATM_NONE) {
APRINTK(lvcc->tx.atmvcc == NULL, "tx.atmvcc!=NULL, vci=%d\n",
vci);
result = lanai_setup_tx_vci(lanai, lvcc, &atmvcc->qos);
if (unlikely(result != 0))
goto out_free;
lvcc->tx.atmvcc = atmvcc;
if (atmvcc->qos.txtp.traffic_class == ATM_CBR) {
APRINTK(lanai->cbrvcc == NULL,
"cbrvcc!=NULL, vci=%d\n", vci);
lanai->cbrvcc = atmvcc;
}
}
host_vcc_bind(lanai, lvcc, vci);
/*
* Make sure everything made it to RAM before we tell the card about
* the VCC
*/
wmb();
if (atmvcc == lvcc->rx.atmvcc)
host_vcc_start_rx(lvcc);
if (atmvcc == lvcc->tx.atmvcc) {
host_vcc_start_tx(lvcc);
if (lanai->cbrvcc == atmvcc)
lanai_cbr_setup(lanai);
}
set_bit(ATM_VF_READY, &atmvcc->flags);
return 0;
out_free:
lanai_close(atmvcc);
out:
return result;
}
static int lanai_send(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
struct lanai_vcc *lvcc = (struct lanai_vcc *) atmvcc->dev_data;
struct lanai_dev *lanai = (struct lanai_dev *) atmvcc->dev->dev_data;
unsigned long flags;
if (unlikely(lvcc == NULL || lvcc->vbase == NULL ||
lvcc->tx.atmvcc != atmvcc))
goto einval;
#ifdef DEBUG
if (unlikely(skb == NULL)) {
DPRINTK("lanai_send: skb==NULL for vci=%d\n", atmvcc->vci);
goto einval;
}
if (unlikely(lanai == NULL)) {
DPRINTK("lanai_send: lanai==NULL for vci=%d\n", atmvcc->vci);
goto einval;
}
#endif
ATM_SKB(skb)->vcc = atmvcc;
switch (atmvcc->qos.aal) {
case ATM_AAL5:
read_lock_irqsave(&vcc_sklist_lock, flags);
vcc_tx_aal5(lanai, lvcc, skb);
read_unlock_irqrestore(&vcc_sklist_lock, flags);
return 0;
case ATM_AAL0:
if (unlikely(skb->len != ATM_CELL_SIZE-1))
goto einval;
/* NOTE - this next line is technically invalid - we haven't unshared skb */
cpu_to_be32s((u32 *) skb->data);
read_lock_irqsave(&vcc_sklist_lock, flags);
vcc_tx_aal0(lanai, lvcc, skb);
read_unlock_irqrestore(&vcc_sklist_lock, flags);
return 0;
}
DPRINTK("lanai_send: bad aal=%d on vci=%d\n", (int) atmvcc->qos.aal,
atmvcc->vci);
einval:
lanai_free_skb(atmvcc, skb);
return -EINVAL;
}
static int lanai_change_qos(struct atm_vcc *atmvcc,
/*const*/ struct atm_qos *qos, int flags)
{
return -EBUSY; /* TODO: need to write this */
}
#ifndef CONFIG_PROC_FS
#define lanai_proc_read NULL
#else
static int lanai_proc_read(struct atm_dev *atmdev, loff_t *pos, char *page)
{
struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data;
loff_t left = *pos;
struct lanai_vcc *lvcc;
if (left-- == 0)
return sprintf(page, DEV_LABEL "(itf %d): chip=LANAI%s, "
"serial=%u, magic=0x%08X, num_vci=%d\n",
atmdev->number, lanai->type==lanai2 ? "2" : "HB",
(unsigned int) lanai->serialno,
(unsigned int) lanai->magicno, lanai->num_vci);
if (left-- == 0)
return sprintf(page, "revision: board=%d, pci_if=%d\n",
lanai->board_rev, (int) lanai->pci->revision);
if (left-- == 0)
return sprintf(page, "EEPROM ESI: %pM\n",
&lanai->eeprom[EEPROM_MAC]);
if (left-- == 0)
return sprintf(page, "status: SOOL=%d, LOCD=%d, LED=%d, "
"GPIN=%d\n", (lanai->status & STATUS_SOOL) ? 1 : 0,
(lanai->status & STATUS_LOCD) ? 1 : 0,
(lanai->status & STATUS_LED) ? 1 : 0,
(lanai->status & STATUS_GPIN) ? 1 : 0);
if (left-- == 0)
return sprintf(page, "global buffer sizes: service=%Zu, "
"aal0_rx=%Zu\n", lanai_buf_size(&lanai->service),
lanai->naal0 ? lanai_buf_size(&lanai->aal0buf) : 0);
if (left-- == 0) {
get_statistics(lanai);
return sprintf(page, "cells in error: overflow=%u, "
"closed_vci=%u, bad_HEC=%u, rx_fifo=%u\n",
lanai->stats.ovfl_trash, lanai->stats.vci_trash,
lanai->stats.hec_err, lanai->stats.atm_ovfl);
}
if (left-- == 0)
return sprintf(page, "PCI errors: parity_detect=%u, "
"master_abort=%u, master_target_abort=%u,\n",
lanai->stats.pcierr_parity_detect,
lanai->stats.pcierr_serr_set,
lanai->stats.pcierr_m_target_abort);
if (left-- == 0)
return sprintf(page, " slave_target_abort=%u, "
"master_parity=%u\n", lanai->stats.pcierr_s_target_abort,
lanai->stats.pcierr_master_parity);
if (left-- == 0)
return sprintf(page, " no_tx=%u, "
"no_rx=%u, bad_rx_aal=%u\n", lanai->stats.service_norx,
lanai->stats.service_notx,
lanai->stats.service_rxnotaal5);
if (left-- == 0)
return sprintf(page, "resets: dma=%u, card=%u\n",
lanai->stats.dma_reenable, lanai->stats.card_reset);
/* At this point, "left" should be the VCI we're looking for */
read_lock(&vcc_sklist_lock);
for (; ; left++) {
if (left >= NUM_VCI) {
left = 0;
goto out;
}
if ((lvcc = lanai->vccs[left]) != NULL)
break;
(*pos)++;
}
/* Note that we re-use "left" here since we're done with it */
left = sprintf(page, "VCI %4d: nref=%d, rx_nomem=%u", (vci_t) left,
lvcc->nref, lvcc->stats.rx_nomem);
if (lvcc->rx.atmvcc != NULL) {
left += sprintf(&page[left], ",\n rx_AAL=%d",
lvcc->rx.atmvcc->qos.aal == ATM_AAL5 ? 5 : 0);
if (lvcc->rx.atmvcc->qos.aal == ATM_AAL5)
left += sprintf(&page[left], ", rx_buf_size=%Zu, "
"rx_bad_len=%u,\n rx_service_trash=%u, "
"rx_service_stream=%u, rx_bad_crc=%u",
lanai_buf_size(&lvcc->rx.buf),
lvcc->stats.x.aal5.rx_badlen,
lvcc->stats.x.aal5.service_trash,
lvcc->stats.x.aal5.service_stream,
lvcc->stats.x.aal5.service_rxcrc);
}
if (lvcc->tx.atmvcc != NULL)
left += sprintf(&page[left], ",\n tx_AAL=%d, "
"tx_buf_size=%Zu, tx_qos=%cBR, tx_backlogged=%c",
lvcc->tx.atmvcc->qos.aal == ATM_AAL5 ? 5 : 0,
lanai_buf_size(&lvcc->tx.buf),
lvcc->tx.atmvcc == lanai->cbrvcc ? 'C' : 'U',
vcc_is_backlogged(lvcc) ? 'Y' : 'N');
page[left++] = '\n';
page[left] = '\0';
out:
read_unlock(&vcc_sklist_lock);
return left;
}
#endif /* CONFIG_PROC_FS */
/* -------------------- HOOKS: */
static const struct atmdev_ops ops = {
.dev_close = lanai_dev_close,
.open = lanai_open,
.close = lanai_close,
.getsockopt = NULL,
.setsockopt = NULL,
.send = lanai_send,
.phy_put = NULL,
.phy_get = NULL,
.change_qos = lanai_change_qos,
.proc_read = lanai_proc_read,
.owner = THIS_MODULE
};
/* initialize one probed card */
static int __devinit lanai_init_one(struct pci_dev *pci,
const struct pci_device_id *ident)
{
struct lanai_dev *lanai;
struct atm_dev *atmdev;
int result;
lanai = kmalloc(sizeof(*lanai), GFP_KERNEL);
if (lanai == NULL) {
printk(KERN_ERR DEV_LABEL
": couldn't allocate dev_data structure!\n");
return -ENOMEM;
}
atmdev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
if (atmdev == NULL) {
printk(KERN_ERR DEV_LABEL
": couldn't register atm device!\n");
kfree(lanai);
return -EBUSY;
}
atmdev->dev_data = lanai;
lanai->pci = pci;
lanai->type = (enum lanai_type) ident->device;
result = lanai_dev_open(atmdev);
if (result != 0) {
DPRINTK("lanai_start() failed, err=%d\n", -result);
atm_dev_deregister(atmdev);
kfree(lanai);
}
return result;
}
static struct pci_device_id lanai_pci_tbl[] = {
{ PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAI2) },
{ PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAIHB) },
{ 0, } /* terminal entry */
};
MODULE_DEVICE_TABLE(pci, lanai_pci_tbl);
static struct pci_driver lanai_driver = {
.name = DEV_LABEL,
.id_table = lanai_pci_tbl,
.probe = lanai_init_one,
};
static int __init lanai_module_init(void)
{
int x;
x = pci_register_driver(&lanai_driver);
if (x != 0)
printk(KERN_ERR DEV_LABEL ": no adapter found\n");
return x;
}
static void __exit lanai_module_exit(void)
{
/* We'll only get called when all the interfaces are already
* gone, so there isn't much to do
*/
DPRINTK("cleanup_module()\n");
pci_unregister_driver(&lanai_driver);
}
module_init(lanai_module_init);
module_exit(lanai_module_exit);
MODULE_AUTHOR("Mitchell Blank Jr <mitch@sfgoth.com>");
MODULE_DESCRIPTION("Efficient Networks Speedstream 3010 driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
chui101/kernel-lge-msm7x30 | drivers/media/dvb/frontends/dibx000_common.c | 941 | 4849 | #include <linux/i2c.h>
#include "dibx000_common.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiBX000: "); printk(args); printk("\n"); } } while (0)
static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
{
u8 b[4] = {
(reg >> 8) & 0xff, reg & 0xff,
(val >> 8) & 0xff, val & 0xff,
};
struct i2c_msg msg = {
.addr = mst->i2c_addr,.flags = 0,.buf = b,.len = 4
};
return i2c_transfer(mst->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
}
static int dibx000_i2c_select_interface(struct dibx000_i2c_master *mst,
enum dibx000_i2c_interface intf)
{
if (mst->device_rev > DIB3000MC && mst->selected_interface != intf) {
dprintk("selecting interface: %d", intf);
mst->selected_interface = intf;
return dibx000_write_word(mst, mst->base_reg + 4, intf);
}
return 0;
}
static int dibx000_i2c_gate_ctrl(struct dibx000_i2c_master *mst, u8 tx[4],
u8 addr, int onoff)
{
u16 val;
if (onoff)
val = addr << 8; // bit 7 = use master or not, if 0, the gate is open
else
val = 1 << 7;
if (mst->device_rev > DIB7000)
val <<= 1;
tx[0] = (((mst->base_reg + 1) >> 8) & 0xff);
tx[1] = ((mst->base_reg + 1) & 0xff);
tx[2] = val >> 8;
tx[3] = val & 0xff;
return 0;
}
static u32 dibx000_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msg[], int num)
{
struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
struct i2c_msg m[2 + num];
u8 tx_open[4], tx_close[4];
memset(m, 0, sizeof(struct i2c_msg) * (2 + num));
dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER);
dibx000_i2c_gate_ctrl(mst, tx_open, msg[0].addr, 1);
m[0].addr = mst->i2c_addr;
m[0].buf = tx_open;
m[0].len = 4;
memcpy(&m[1], msg, sizeof(struct i2c_msg) * num);
dibx000_i2c_gate_ctrl(mst, tx_close, 0, 0);
m[num + 1].addr = mst->i2c_addr;
m[num + 1].buf = tx_close;
m[num + 1].len = 4;
return i2c_transfer(mst->i2c_adap, m, 2 + num) == 2 + num ? num : -EIO;
}
static struct i2c_algorithm dibx000_i2c_gated_tuner_algo = {
.master_xfer = dibx000_i2c_gated_tuner_xfer,
.functionality = dibx000_i2c_func,
};
struct i2c_adapter *dibx000_get_i2c_adapter(struct dibx000_i2c_master *mst,
enum dibx000_i2c_interface intf,
int gating)
{
struct i2c_adapter *i2c = NULL;
switch (intf) {
case DIBX000_I2C_INTERFACE_TUNER:
if (gating)
i2c = &mst->gated_tuner_i2c_adap;
break;
default:
printk(KERN_ERR "DiBX000: incorrect I2C interface selected\n");
break;
}
return i2c;
}
EXPORT_SYMBOL(dibx000_get_i2c_adapter);
void dibx000_reset_i2c_master(struct dibx000_i2c_master *mst)
{
/* initialize the i2c-master by closing the gate */
u8 tx[4];
struct i2c_msg m = {.addr = mst->i2c_addr,.buf = tx,.len = 4 };
dibx000_i2c_gate_ctrl(mst, tx, 0, 0);
i2c_transfer(mst->i2c_adap, &m, 1);
mst->selected_interface = 0xff; // the first time force a select of the I2C
dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER);
}
EXPORT_SYMBOL(dibx000_reset_i2c_master);
static int i2c_adapter_init(struct i2c_adapter *i2c_adap,
struct i2c_algorithm *algo, const char *name,
struct dibx000_i2c_master *mst)
{
strncpy(i2c_adap->name, name, sizeof(i2c_adap->name));
i2c_adap->class = I2C_CLASS_TV_DIGITAL, i2c_adap->algo = algo;
i2c_adap->algo_data = NULL;
i2c_set_adapdata(i2c_adap, mst);
if (i2c_add_adapter(i2c_adap) < 0)
return -ENODEV;
return 0;
}
int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
struct i2c_adapter *i2c_adap, u8 i2c_addr)
{
u8 tx[4];
struct i2c_msg m = {.addr = i2c_addr >> 1,.buf = tx,.len = 4 };
mst->device_rev = device_rev;
mst->i2c_adap = i2c_adap;
mst->i2c_addr = i2c_addr >> 1;
if (device_rev == DIB7000P || device_rev == DIB8000)
mst->base_reg = 1024;
else
mst->base_reg = 768;
if (i2c_adapter_init
(&mst->gated_tuner_i2c_adap, &dibx000_i2c_gated_tuner_algo,
"DiBX000 tuner I2C bus", mst) != 0)
printk(KERN_ERR
"DiBX000: could not initialize the tuner i2c_adapter\n");
/* initialize the i2c-master by closing the gate */
dibx000_i2c_gate_ctrl(mst, tx, 0, 0);
return i2c_transfer(i2c_adap, &m, 1) == 1;
}
EXPORT_SYMBOL(dibx000_init_i2c_master);
void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst)
{
i2c_del_adapter(&mst->gated_tuner_i2c_adap);
}
EXPORT_SYMBOL(dibx000_exit_i2c_master);
u32 systime(void)
{
struct timespec t;
t = current_kernel_time();
return (t.tv_sec * 10000) + (t.tv_nsec / 100000);
}
EXPORT_SYMBOL(systime);
MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
MODULE_DESCRIPTION("Common function the DiBcom demodulator family");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ljjehl-XHD/android_kernel_htc_msm8960 | drivers/gpu/drm/gma500/framebuffer.c | 1197 | 21318 | /**************************************************************************
* Copyright (c) 2007-2011, Intel Corporation.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
**************************************************************************/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/console.h>
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_intel_drv.h"
#include "framebuffer.h"
#include "gtt.h"
static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle);
static const struct drm_framebuffer_funcs psb_fb_funcs = {
.destroy = psb_user_framebuffer_destroy,
.create_handle = psb_user_framebuffer_create_handle,
};
#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
struct psb_fbdev *fbdev = info->par;
struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
uint32_t v;
if (!fb)
return -ENOMEM;
if (regno > 255)
return 1;
red = CMAP_TOHW(red, info->var.red.length);
blue = CMAP_TOHW(blue, info->var.blue.length);
green = CMAP_TOHW(green, info->var.green.length);
transp = CMAP_TOHW(transp, info->var.transp.length);
v = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset) |
(transp << info->var.transp.offset);
if (regno < 16) {
switch (fb->bits_per_pixel) {
case 16:
((uint32_t *) info->pseudo_palette)[regno] = v;
break;
case 24:
case 32:
((uint32_t *) info->pseudo_palette)[regno] = v;
break;
}
}
return 0;
}
static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct psb_fbdev *fbdev = info->par;
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_device *dev = psbfb->base.dev;
/*
* We have to poke our nose in here. The core fb code assumes
* panning is part of the hardware that can be invoked before
* the actual fb is mapped. In our case that isn't quite true.
*/
if (psbfb->gtt->npage) {
/* GTT roll shifts in 4K pages, we need to shift the right
number of pages */
int pages = info->fix.line_length >> 12;
psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
}
return 0;
}
static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct psb_framebuffer *psbfb = vma->vm_private_data;
struct drm_device *dev = psbfb->base.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
int page_num;
int i;
unsigned long address;
int ret;
unsigned long pfn;
/* FIXME: assumes fb at stolen base which may not be true */
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
for (i = 0; i < page_num; i++) {
pfn = (phys_addr >> PAGE_SHIFT);
ret = vm_insert_mixed(vma, address, pfn);
if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
break;
else if (unlikely(ret != 0)) {
ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
return ret;
}
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
}
return VM_FAULT_NOPAGE;
}
static void psbfb_vm_open(struct vm_area_struct *vma)
{
}
static void psbfb_vm_close(struct vm_area_struct *vma)
{
}
static struct vm_operations_struct psbfb_vm_ops = {
.fault = psbfb_vm_fault,
.open = psbfb_vm_open,
.close = psbfb_vm_close
};
static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct psb_fbdev *fbdev = info->par;
struct psb_framebuffer *psbfb = &fbdev->pfb;
if (vma->vm_pgoff != 0)
return -EINVAL;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
if (!psbfb->addr_space)
psbfb->addr_space = vma->vm_file->f_mapping;
/*
* If this is a GEM object then info->screen_base is the virtual
* kernel remapping of the object. FIXME: Review if this is
* suitable for our mmap work
*/
vma->vm_ops = &psbfb_vm_ops;
vma->vm_private_data = (void *)psbfb;
vma->vm_flags |= VM_RESERVED | VM_IO |
VM_MIXEDMAP | VM_DONTEXPAND;
return 0;
}
static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
return -ENOTTY;
}
static struct fb_ops psbfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = psbfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_mmap = psbfb_mmap,
.fb_sync = psbfb_sync,
.fb_ioctl = psbfb_ioctl,
};
static struct fb_ops psbfb_roll_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_pan_display = psbfb_pan,
.fb_mmap = psbfb_mmap,
.fb_ioctl = psbfb_ioctl,
};
static struct fb_ops psbfb_unaccel_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_mmap = psbfb_mmap,
.fb_ioctl = psbfb_ioctl,
};
/**
* psb_framebuffer_init - initialize a framebuffer
* @dev: our DRM device
* @fb: framebuffer to set up
* @mode_cmd: mode description
* @gt: backing object
*
* Configure and fill in the boilerplate for our frame buffer. Return
* 0 on success or an error code if we fail.
*/
static int psb_framebuffer_init(struct drm_device *dev,
struct psb_framebuffer *fb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct gtt_range *gt)
{
u32 bpp, depth;
int ret;
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
switch (bpp) {
case 8:
case 16:
case 24:
case 32:
break;
default:
return -EINVAL;
}
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
return ret;
}
drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
fb->gtt = gt;
return 0;
}
/**
* psb_framebuffer_create - create a framebuffer backed by gt
* @dev: our DRM device
* @mode_cmd: the description of the requested mode
* @gt: the backing object
*
* Create a framebuffer object backed by the gt, and fill in the
* boilerplate required
*
* TODO: review object references
*/
static struct drm_framebuffer *psb_framebuffer_create
(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct gtt_range *gt)
{
struct psb_framebuffer *fb;
int ret;
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (!fb)
return ERR_PTR(-ENOMEM);
ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
if (ret) {
kfree(fb);
return ERR_PTR(ret);
}
return &fb->base;
}
/**
* psbfb_alloc - allocate frame buffer memory
* @dev: the DRM device
* @aligned_size: space needed
* @force: fall back to GEM buffers if need be
*
* Allocate the frame buffer. In the usual case we get a GTT range that
* is stolen memory backed and life is simple. If there isn't sufficient
* we fail as we don't have the virtual mapping space to really vmap it
* and the kernel console code can't handle non linear framebuffers.
*
* Re-address this as and if the framebuffer layer grows this ability.
*/
static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
{
struct gtt_range *backing;
/* Begin by trying to use stolen memory backing */
backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
if (backing) {
if (drm_gem_private_object_init(dev,
&backing->gem, aligned_size) == 0)
return backing;
psb_gtt_free_range(dev, backing);
}
return NULL;
}
/**
* psbfb_create - create a framebuffer
* @fbdev: the framebuffer device
* @sizes: specification of the layout
*
* Create a framebuffer to the specifications provided
*/
static int psbfb_create(struct psb_fbdev *fbdev,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fbdev->psb_fb_helper.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_mode_fb_cmd2 mode_cmd;
struct device *device = &dev->pdev->dev;
int size;
int ret;
struct gtt_range *backing;
u32 bpp, depth;
int gtt_roll = 0;
int pitch_lines = 0;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
bpp = sizes->surface_bpp;
depth = sizes->surface_depth;
/* No 24bit packed */
if (bpp == 24)
bpp = 32;
do {
/*
* Acceleration via the GTT requires pitch to be
* power of two aligned. Preferably page but less
* is ok with some fonts
*/
mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
/* Allocate the fb in the GTT with stolen page backing */
backing = psbfb_alloc(dev, size);
if (pitch_lines)
pitch_lines *= 2;
else
pitch_lines = 1;
gtt_roll++;
} while (backing == NULL && pitch_lines <= 16);
/* The final pitch we accepted if we succeeded */
pitch_lines /= 2;
if (backing == NULL) {
/*
* We couldn't get the space we wanted, fall back to the
* display engine requirement instead. The HW requires
* the pitch to be 64 byte aligned
*/
gtt_roll = 0; /* Don't use GTT accelerated scrolling */
pitch_lines = 64;
mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
/* Allocate the framebuffer in the GTT with stolen page backing */
backing = psbfb_alloc(dev, size);
if (backing == NULL)
return -ENOMEM;
}
mutex_lock(&dev->struct_mutex);
info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
goto out_err1;
}
info->par = fbdev;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
if (ret)
goto out_unref;
fb = &psbfb->base;
psbfb->fbdev = info;
fbdev->psb_fb_helper.fb = fb;
fbdev->psb_fb_helper.fbdev = info;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
strcpy(info->fix.id, "psbfb");
info->flags = FBINFO_DEFAULT;
if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
info->fbops = &psbfb_ops;
else if (gtt_roll) { /* GTT rolling seems best */
info->fbops = &psbfb_roll_ops;
info->flags |= FBINFO_HWACCEL_YPAN;
} else /* Software */
info->fbops = &psbfb_unaccel_ops;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out_unref;
}
info->fix.smem_start = dev->mode_config.fb_base;
info->fix.smem_len = size;
info->fix.ywrapstep = gtt_roll;
info->fix.ypanstep = 0;
/* Accessed stolen memory directly */
info->screen_base = (char *)dev_priv->vram_addr +
backing->offset;
info->screen_size = size;
if (dev_priv->gtt.stolen_size) {
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
goto out_unref;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
sizes->fb_width, sizes->fb_height);
info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
dev_info(dev->dev, "allocated %dx%d fb\n",
psbfb->base.width, psbfb->base.height);
mutex_unlock(&dev->struct_mutex);
return 0;
out_unref:
if (backing->stolen)
psb_gtt_free_range(dev, backing);
else
drm_gem_object_unreference(&backing->gem);
out_err1:
mutex_unlock(&dev->struct_mutex);
psb_gtt_free_range(dev, backing);
return ret;
}
/**
* psb_user_framebuffer_create - create framebuffer
* @dev: our DRM device
* @filp: client file
* @cmd: mode request
*
* Create a new framebuffer backed by a userspace GEM object
*/
static struct drm_framebuffer *psb_user_framebuffer_create
(struct drm_device *dev, struct drm_file *filp,
struct drm_mode_fb_cmd2 *cmd)
{
struct gtt_range *r;
struct drm_gem_object *obj;
/*
* Find the GEM object and thus the gtt range object that is
* to back this space
*/
obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
/* Let the core code do all the work */
r = container_of(obj, struct gtt_range, gem);
return psb_framebuffer_create(dev, cmd, r);
}
static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
intel_crtc->lut_r[regno] = red >> 8;
intel_crtc->lut_g[regno] = green >> 8;
intel_crtc->lut_b[regno] = blue >> 8;
}
static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
u16 *green, u16 *blue, int regno)
{
struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
*red = intel_crtc->lut_r[regno] << 8;
*green = intel_crtc->lut_g[regno] << 8;
*blue = intel_crtc->lut_b[regno] << 8;
}
static int psbfb_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
int new_fb = 0;
int ret;
if (!helper->fb) {
ret = psbfb_create(psb_fbdev, sizes);
if (ret)
return ret;
new_fb = 1;
}
return new_fb;
}
struct drm_fb_helper_funcs psb_fb_helper_funcs = {
.gamma_set = psbfb_gamma_set,
.gamma_get = psbfb_gamma_get,
.fb_probe = psbfb_probe,
};
static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
{
struct fb_info *info;
struct psb_framebuffer *psbfb = &fbdev->pfb;
if (fbdev->psb_fb_helper.fbdev) {
info = fbdev->psb_fb_helper.fbdev;
unregister_framebuffer(info);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
drm_fb_helper_fini(&fbdev->psb_fb_helper);
drm_framebuffer_cleanup(&psbfb->base);
if (psbfb->gtt)
drm_gem_object_unreference(&psbfb->gtt->gem);
return 0;
}
int psb_fbdev_init(struct drm_device *dev)
{
struct psb_fbdev *fbdev;
struct drm_psb_private *dev_priv = dev->dev_private;
fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
if (!fbdev) {
dev_err(dev->dev, "no memory\n");
return -ENOMEM;
}
dev_priv->fbdev = fbdev;
fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
INTELFB_CONN_LIMIT);
drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
return 0;
}
static void psb_fbdev_fini(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
if (!dev_priv->fbdev)
return;
psb_fbdev_destroy(dev, dev_priv->fbdev);
kfree(dev_priv->fbdev);
dev_priv->fbdev = NULL;
}
static void psbfb_output_poll_changed(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
}
/**
* psb_user_framebuffer_create_handle - add hamdle to a framebuffer
* @fb: framebuffer
* @file_priv: our DRM file
* @handle: returned handle
*
* Our framebuffer object is a GTT range which also contains a GEM
* object. We need to turn it into a handle for userspace. GEM will do
* the work for us
*/
static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
struct psb_framebuffer *psbfb = to_psb_fb(fb);
struct gtt_range *r = psbfb->gtt;
return drm_gem_handle_create(file_priv, &r->gem, handle);
}
/**
* psb_user_framebuffer_destroy - destruct user created fb
* @fb: framebuffer
*
* User framebuffers are backed by GEM objects so all we have to do is
* clean up a bit and drop the reference, GEM will handle the fallout
*/
static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct psb_framebuffer *psbfb = to_psb_fb(fb);
struct gtt_range *r = psbfb->gtt;
struct drm_device *dev = fb->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_fbdev *fbdev = dev_priv->fbdev;
struct drm_crtc *crtc;
int reset = 0;
/* Should never get stolen memory for a user fb */
WARN_ON(r->stolen);
/* Check if we are erroneously live */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
if (crtc->fb == fb)
reset = 1;
if (reset)
/*
* Now force a sane response before we permit the DRM CRTC
* layer to do stupid things like blank the display. Instead
* we reset this framebuffer as if the user had forced a reset.
* We must do this before the cleanup so that the DRM layer
* doesn't get a chance to stick its oar in where it isn't
* wanted.
*/
drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
/* Let DRM do its clean up */
drm_framebuffer_cleanup(fb);
/* We are no longer using the resource in GEM */
drm_gem_object_unreference_unlocked(&r->gem);
kfree(fb);
}
static const struct drm_mode_config_funcs psb_mode_funcs = {
.fb_create = psb_user_framebuffer_create,
.output_poll_changed = psbfb_output_poll_changed,
};
static int psb_create_backlight_property(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_property *backlight;
if (dev_priv->backlight_property)
return 0;
backlight = drm_property_create_range(dev, 0, "backlight", 0, 100);
dev_priv->backlight_property = backlight;
return 0;
}
static void psb_setup_outputs(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
drm_mode_create_scaling_mode_property(dev);
psb_create_backlight_property(dev);
dev_priv->ops->output_init(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
struct psb_intel_encoder *psb_intel_encoder =
psb_intel_attached_encoder(connector);
struct drm_encoder *encoder = &psb_intel_encoder->base;
int crtc_mask = 0, clone_mask = 0;
/* valid crtcs */
switch (psb_intel_encoder->type) {
case INTEL_OUTPUT_ANALOG:
crtc_mask = (1 << 0);
clone_mask = (1 << INTEL_OUTPUT_ANALOG);
break;
case INTEL_OUTPUT_SDVO:
crtc_mask = ((1 << 0) | (1 << 1));
clone_mask = (1 << INTEL_OUTPUT_SDVO);
break;
case INTEL_OUTPUT_LVDS:
if (IS_MRST(dev))
crtc_mask = (1 << 0);
else
crtc_mask = (1 << 1);
clone_mask = (1 << INTEL_OUTPUT_LVDS);
break;
case INTEL_OUTPUT_MIPI:
crtc_mask = (1 << 0);
clone_mask = (1 << INTEL_OUTPUT_MIPI);
break;
case INTEL_OUTPUT_MIPI2:
crtc_mask = (1 << 2);
clone_mask = (1 << INTEL_OUTPUT_MIPI2);
break;
case INTEL_OUTPUT_HDMI:
if (IS_MFLD(dev))
crtc_mask = (1 << 1);
else
crtc_mask = (1 << 0);
clone_mask = (1 << INTEL_OUTPUT_HDMI);
break;
}
encoder->possible_crtcs = crtc_mask;
encoder->possible_clones =
psb_intel_connector_clones(dev, clone_mask);
}
}
void psb_modeset_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
int i;
drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.funcs = (void *) &psb_mode_funcs;
/* set memory base */
/* Oaktrail and Poulsbo should use BAR 2*/
pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
&(dev->mode_config.fb_base));
/* num pipes is 2 for PSB but 1 for Mrst */
for (i = 0; i < dev_priv->num_pipe; i++)
psb_intel_crtc_init(dev, i, mode_dev);
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
psb_setup_outputs(dev);
}
void psb_modeset_cleanup(struct drm_device *dev)
{
mutex_lock(&dev->struct_mutex);
drm_kms_helper_poll_fini(dev);
psb_fbdev_fini(dev);
drm_mode_config_cleanup(dev);
mutex_unlock(&dev->struct_mutex);
}
| gpl-2.0 |
TeamBliss-Devices/android_kernel_samsung_v2wifixx | net/core/net_namespace.c | 1197 | 14861 | #include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/idr.h>
#include <linux/rculist.h>
#include <linux/nsproxy.h>
#include <linux/proc_fs.h>
#include <linux/file.h>
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
/*
* Our network namespace constructor/destructor lists
*/
static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
static DEFINE_MUTEX(net_mutex);
LIST_HEAD(net_namespace_list);
EXPORT_SYMBOL_GPL(net_namespace_list);
struct net init_net = {
.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
};
EXPORT_SYMBOL(init_net);
#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
static struct net_generic *net_alloc_generic(void)
{
struct net_generic *ng;
size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
ng = kzalloc(generic_size, GFP_KERNEL);
if (ng)
ng->len = max_gen_ptrs;
return ng;
}
static int net_assign_generic(struct net *net, int id, void *data)
{
struct net_generic *ng, *old_ng;
BUG_ON(!mutex_is_locked(&net_mutex));
BUG_ON(id == 0);
old_ng = rcu_dereference_protected(net->gen,
lockdep_is_held(&net_mutex));
ng = old_ng;
if (old_ng->len >= id)
goto assign;
ng = net_alloc_generic();
if (ng == NULL)
return -ENOMEM;
/*
* Some synchronisation notes:
*
* The net_generic explores the net->gen array inside rcu
* read section. Besides once set the net->gen->ptr[x]
* pointer never changes (see rules in netns/generic.h).
*
* That said, we simply duplicate this array and schedule
* the old copy for kfree after a grace period.
*/
memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
rcu_assign_pointer(net->gen, ng);
kfree_rcu(old_ng, rcu);
assign:
ng->ptr[id - 1] = data;
return 0;
}
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
int err = -ENOMEM;
void *data = NULL;
if (ops->id && ops->size) {
data = kzalloc(ops->size, GFP_KERNEL);
if (!data)
goto out;
err = net_assign_generic(net, *ops->id, data);
if (err)
goto cleanup;
}
err = 0;
if (ops->init)
err = ops->init(net);
if (!err)
return 0;
cleanup:
kfree(data);
out:
return err;
}
static void ops_free(const struct pernet_operations *ops, struct net *net)
{
if (ops->id && ops->size) {
int id = *ops->id;
kfree(net_generic(net, id));
}
}
static void ops_exit_list(const struct pernet_operations *ops,
struct list_head *net_exit_list)
{
struct net *net;
if (ops->exit) {
list_for_each_entry(net, net_exit_list, exit_list)
ops->exit(net);
}
if (ops->exit_batch)
ops->exit_batch(net_exit_list);
}
static void ops_free_list(const struct pernet_operations *ops,
struct list_head *net_exit_list)
{
struct net *net;
if (ops->size && ops->id) {
list_for_each_entry(net, net_exit_list, exit_list)
ops_free(ops, net);
}
}
/*
* setup_net runs the initializers for the network namespace object.
*/
static __net_init int setup_net(struct net *net)
{
/* Must be called with net_mutex held */
const struct pernet_operations *ops, *saved_ops;
int error = 0;
LIST_HEAD(net_exit_list);
atomic_set(&net->count, 1);
atomic_set(&net->passive, 1);
net->dev_base_seq = 1;
#ifdef NETNS_REFCNT_DEBUG
atomic_set(&net->use_count, 0);
#endif
list_for_each_entry(ops, &pernet_list, list) {
error = ops_init(ops, net);
if (error < 0)
goto out_undo;
}
out:
return error;
out_undo:
/* Walk through the list backwards calling the exit functions
* for the pernet modules whose init functions did not fail.
*/
list_add(&net->exit_list, &net_exit_list);
saved_ops = ops;
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list);
ops = saved_ops;
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list);
rcu_barrier();
goto out;
}
#ifdef CONFIG_NET_NS
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;
static struct net *net_alloc(void)
{
struct net *net = NULL;
struct net_generic *ng;
ng = net_alloc_generic();
if (!ng)
goto out;
net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
if (!net)
goto out_free;
rcu_assign_pointer(net->gen, ng);
out:
return net;
out_free:
kfree(ng);
goto out;
}
static void net_free(struct net *net)
{
#ifdef NETNS_REFCNT_DEBUG
if (unlikely(atomic_read(&net->use_count) != 0)) {
printk(KERN_EMERG "network namespace not free! Usage: %d\n",
atomic_read(&net->use_count));
return;
}
#endif
kfree(net->gen);
kmem_cache_free(net_cachep, net);
}
void net_drop_ns(void *p)
{
struct net *ns = p;
if (ns && atomic_dec_and_test(&ns->passive))
net_free(ns);
}
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
{
struct net *net;
int rv;
if (!(flags & CLONE_NEWNET))
return get_net(old_net);
net = net_alloc();
if (!net)
return ERR_PTR(-ENOMEM);
mutex_lock(&net_mutex);
rv = setup_net(net);
if (rv == 0) {
rtnl_lock();
list_add_tail_rcu(&net->list, &net_namespace_list);
rtnl_unlock();
}
mutex_unlock(&net_mutex);
if (rv < 0) {
net_drop_ns(net);
return ERR_PTR(rv);
}
return net;
}
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
static void cleanup_net(struct work_struct *work)
{
const struct pernet_operations *ops;
struct net *net, *tmp;
LIST_HEAD(net_kill_list);
LIST_HEAD(net_exit_list);
/* Atomically snapshot the list of namespaces to cleanup */
spin_lock_irq(&cleanup_list_lock);
list_replace_init(&cleanup_list, &net_kill_list);
spin_unlock_irq(&cleanup_list_lock);
mutex_lock(&net_mutex);
/* Don't let anyone else find us. */
rtnl_lock();
list_for_each_entry(net, &net_kill_list, cleanup_list) {
list_del_rcu(&net->list);
list_add_tail(&net->exit_list, &net_exit_list);
}
rtnl_unlock();
/*
* Another CPU might be rcu-iterating the list, wait for it.
* This needs to be before calling the exit() notifiers, so
* the rcu_barrier() below isn't sufficient alone.
*/
synchronize_rcu();
/* Run all of the network namespace exit methods */
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list);
/* Free the net generic variables */
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list);
mutex_unlock(&net_mutex);
/* Ensure there are no outstanding rcu callbacks using this
* network namespace.
*/
rcu_barrier();
/* Finally it is safe to free my network namespace structure */
list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
list_del_init(&net->exit_list);
net_drop_ns(net);
}
}
static DECLARE_WORK(net_cleanup_work, cleanup_net);
void __put_net(struct net *net)
{
/* Cleanup the network namespace in process context */
unsigned long flags;
spin_lock_irqsave(&cleanup_list_lock, flags);
list_add(&net->cleanup_list, &cleanup_list);
spin_unlock_irqrestore(&cleanup_list_lock, flags);
queue_work(netns_wq, &net_cleanup_work);
}
EXPORT_SYMBOL_GPL(__put_net);
struct net *get_net_ns_by_fd(int fd)
{
struct proc_inode *ei;
struct file *file;
struct net *net;
file = proc_ns_fget(fd);
if (IS_ERR(file))
return ERR_CAST(file);
ei = PROC_I(file->f_dentry->d_inode);
if (ei->ns_ops == &netns_operations)
net = get_net(ei->ns);
else
net = ERR_PTR(-EINVAL);
fput(file);
return net;
}
#else
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
{
if (flags & CLONE_NEWNET)
return ERR_PTR(-EINVAL);
return old_net;
}
struct net *get_net_ns_by_fd(int fd)
{
return ERR_PTR(-EINVAL);
}
#endif
struct net *get_net_ns_by_pid(pid_t pid)
{
struct task_struct *tsk;
struct net *net;
/* Lookup the network namespace */
net = ERR_PTR(-ESRCH);
rcu_read_lock();
tsk = find_task_by_vpid(pid);
if (tsk) {
struct nsproxy *nsproxy;
nsproxy = task_nsproxy(tsk);
if (nsproxy)
net = get_net(nsproxy->net_ns);
}
rcu_read_unlock();
return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
static int __init net_ns_init(void)
{
struct net_generic *ng;
#ifdef CONFIG_NET_NS
net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
SMP_CACHE_BYTES,
SLAB_PANIC, NULL);
/* Create workqueue for cleanup */
netns_wq = create_singlethread_workqueue("netns");
if (!netns_wq)
panic("Could not create netns workq");
#endif
ng = net_alloc_generic();
if (!ng)
panic("Could not allocate generic netns");
rcu_assign_pointer(init_net.gen, ng);
mutex_lock(&net_mutex);
if (setup_net(&init_net))
panic("Could not setup the initial network namespace");
rtnl_lock();
list_add_tail_rcu(&init_net.list, &net_namespace_list);
rtnl_unlock();
mutex_unlock(&net_mutex);
return 0;
}
pure_initcall(net_ns_init);
#ifdef CONFIG_NET_NS
static int __register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
struct net *net;
int error;
LIST_HEAD(net_exit_list);
list_add_tail(&ops->list, list);
if (ops->init || (ops->id && ops->size)) {
for_each_net(net) {
error = ops_init(ops, net);
if (error)
goto out_undo;
list_add_tail(&net->exit_list, &net_exit_list);
}
}
return 0;
out_undo:
/* If I have an error cleanup all namespaces I initialized */
list_del(&ops->list);
ops_exit_list(ops, &net_exit_list);
ops_free_list(ops, &net_exit_list);
return error;
}
static void __unregister_pernet_operations(struct pernet_operations *ops)
{
struct net *net;
LIST_HEAD(net_exit_list);
list_del(&ops->list);
for_each_net(net)
list_add_tail(&net->exit_list, &net_exit_list);
ops_exit_list(ops, &net_exit_list);
ops_free_list(ops, &net_exit_list);
}
#else
static int __register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
return ops_init(ops, &init_net);
}
static void __unregister_pernet_operations(struct pernet_operations *ops)
{
LIST_HEAD(net_exit_list);
list_add(&init_net.exit_list, &net_exit_list);
ops_exit_list(ops, &net_exit_list);
ops_free_list(ops, &net_exit_list);
}
#endif /* CONFIG_NET_NS */
static DEFINE_IDA(net_generic_ids);
static int register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
int error;
if (ops->id) {
again:
error = ida_get_new_above(&net_generic_ids, 1, ops->id);
if (error < 0) {
if (error == -EAGAIN) {
ida_pre_get(&net_generic_ids, GFP_KERNEL);
goto again;
}
return error;
}
max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
}
error = __register_pernet_operations(list, ops);
if (error) {
rcu_barrier();
if (ops->id)
ida_remove(&net_generic_ids, *ops->id);
}
return error;
}
static void unregister_pernet_operations(struct pernet_operations *ops)
{
__unregister_pernet_operations(ops);
rcu_barrier();
if (ops->id)
ida_remove(&net_generic_ids, *ops->id);
}
/**
* register_pernet_subsys - register a network namespace subsystem
* @ops: pernet operations structure for the subsystem
*
* Register a subsystem which has init and exit functions
* that are called when network namespaces are created and
* destroyed respectively.
*
* When registered all network namespace init functions are
* called for every existing network namespace. Allowing kernel
* modules to have a race free view of the set of network namespaces.
*
* When a new network namespace is created all of the init
* methods are called in the order in which they were registered.
*
* When a network namespace is destroyed all of the exit methods
* are called in the reverse of the order with which they were
* registered.
*/
int register_pernet_subsys(struct pernet_operations *ops)
{
int error;
mutex_lock(&net_mutex);
error = register_pernet_operations(first_device, ops);
mutex_unlock(&net_mutex);
return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);
/**
* unregister_pernet_subsys - unregister a network namespace subsystem
* @ops: pernet operations structure to manipulate
*
* Remove the pernet operations structure from the list to be
* used when network namespaces are created or destroyed. In
* addition run the exit method for all existing network
* namespaces.
*/
void unregister_pernet_subsys(struct pernet_operations *ops)
{
mutex_lock(&net_mutex);
unregister_pernet_operations(ops);
mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
/**
* register_pernet_device - register a network namespace device
* @ops: pernet operations structure for the subsystem
*
* Register a device which has init and exit functions
* that are called when network namespaces are created and
* destroyed respectively.
*
* When registered all network namespace init functions are
* called for every existing network namespace. Allowing kernel
* modules to have a race free view of the set of network namespaces.
*
* When a new network namespace is created all of the init
* methods are called in the order in which they were registered.
*
* When a network namespace is destroyed all of the exit methods
* are called in the reverse of the order with which they were
* registered.
*/
int register_pernet_device(struct pernet_operations *ops)
{
int error;
mutex_lock(&net_mutex);
error = register_pernet_operations(&pernet_list, ops);
if (!error && (first_device == &pernet_list))
first_device = &ops->list;
mutex_unlock(&net_mutex);
return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);
/**
* unregister_pernet_device - unregister a network namespace netdevice
* @ops: pernet operations structure to manipulate
*
* Remove the pernet operations structure from the list to be
* used when network namespaces are created or destroyed. In
* addition run the exit method for all existing network
* namespaces.
*/
void unregister_pernet_device(struct pernet_operations *ops)
{
mutex_lock(&net_mutex);
if (&ops->list == first_device)
first_device = first_device->next;
unregister_pernet_operations(ops);
mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
#ifdef CONFIG_NET_NS
static void *netns_get(struct task_struct *task)
{
struct net *net = NULL;
struct nsproxy *nsproxy;
rcu_read_lock();
nsproxy = task_nsproxy(task);
if (nsproxy)
net = get_net(nsproxy->net_ns);
rcu_read_unlock();
return net;
}
static void netns_put(void *ns)
{
put_net(ns);
}
static int netns_install(struct nsproxy *nsproxy, void *ns)
{
put_net(nsproxy->net_ns);
nsproxy->net_ns = get_net(ns);
return 0;
}
const struct proc_ns_operations netns_operations = {
.name = "net",
.type = CLONE_NEWNET,
.get = netns_get,
.put = netns_put,
.install = netns_install,
};
#endif
| gpl-2.0 |
MaccKing/CBx | drivers/video/au1100fb.c | 1965 | 18433 | /*
* BRIEF MODULE DESCRIPTION
* Au1100 LCD Driver.
*
* Rewritten for 2.6 by Embedded Alley Solutions
* <source@embeddedalley.com>, based on submissions by
* Karl Lessard <klessard@sunrisetelecom.com>
* <c.pellegrin@exadron.com>
*
* PM support added by Rodolfo Giometti <giometti@linux.it>
* Cursor enable/disable by Rodolfo Giometti <giometti@linux.it>
*
* Copyright 2002 MontaVista Software
* Author: MontaVista Software, Inc.
* ppopov@mvista.com or source@mvista.com
*
* Copyright 2002 Alchemy Semiconductor
* Author: Alchemy Semiconductor
*
* Based on:
* linux/drivers/video/skeletonfb.c -- Skeleton for a frame buffer device
* Created 28 Dec 1997 by Geert Uytterhoeven
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/mach-au1x00/au1000.h>
#define DEBUG 0
#include "au1100fb.h"
#define DRIVER_NAME "au1100fb"
#define DRIVER_DESC "LCD controller driver for AU1100 processors"
#define to_au1100fb_device(_info) \
(_info ? container_of(_info, struct au1100fb_device, info) : NULL);
/* Bitfields format supported by the controller. Note that the order of formats
* SHOULD be the same as in the LCD_CONTROL_SBPPF field, so we can retrieve the
* right pixel format by doing rgb_bitfields[LCD_CONTROL_SBPPF_XXX >> LCD_CONTROL_SBPPF]
*/
struct fb_bitfield rgb_bitfields[][4] =
{
/* Red, Green, Blue, Transp */
{ { 10, 6, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
{ { 11, 5, 0 }, { 5, 6, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
{ { 11, 5, 0 }, { 6, 5, 0 }, { 0, 6, 0 }, { 0, 0, 0 } },
{ { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 15, 1, 0 } },
{ { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 1, 0 } },
/* The last is used to describe 12bpp format */
{ { 8, 4, 0 }, { 4, 4, 0 }, { 0, 4, 0 }, { 0, 0, 0 } },
};
static struct fb_fix_screeninfo au1100fb_fix __devinitdata = {
.id = "AU1100 FB",
.xpanstep = 1,
.ypanstep = 1,
.type = FB_TYPE_PACKED_PIXELS,
.accel = FB_ACCEL_NONE,
};
static struct fb_var_screeninfo au1100fb_var __devinitdata = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.vmode = FB_VMODE_NONINTERLACED,
};
/* fb_blank
* Blank the screen. Depending on the mode, the screen will be
* activated with the backlight color, or desactivated
*/
static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
{
struct au1100fb_device *fbdev = to_au1100fb_device(fbi);
print_dbg("fb_blank %d %p", blank_mode, fbi);
switch (blank_mode) {
case VESA_NO_BLANKING:
/* Turn on panel */
fbdev->regs->lcd_control |= LCD_CONTROL_GO;
#ifdef CONFIG_MIPS_PB1100
if (fbdev->panel_idx == 1) {
au_writew(au_readw(PB1100_G_CONTROL)
| (PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
PB1100_G_CONTROL);
}
#endif
au_sync();
break;
case VESA_VSYNC_SUSPEND:
case VESA_HSYNC_SUSPEND:
case VESA_POWERDOWN:
/* Turn off panel */
fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
#ifdef CONFIG_MIPS_PB1100
if (fbdev->panel_idx == 1) {
au_writew(au_readw(PB1100_G_CONTROL)
& ~(PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
PB1100_G_CONTROL);
}
#endif
au_sync();
break;
default:
break;
}
return 0;
}
/*
* Set hardware with var settings. This will enable the controller with a specific
* mode, normally validated with the fb_check_var method
*/
int au1100fb_setmode(struct au1100fb_device *fbdev)
{
struct fb_info *info = &fbdev->info;
u32 words;
int index;
if (!fbdev)
return -EINVAL;
/* Update var-dependent FB info */
if (panel_is_active(fbdev->panel) || panel_is_color(fbdev->panel)) {
if (info->var.bits_per_pixel <= 8) {
/* palettized */
info->var.red.offset = 0;
info->var.red.length = info->var.bits_per_pixel;
info->var.red.msb_right = 0;
info->var.green.offset = 0;
info->var.green.length = info->var.bits_per_pixel;
info->var.green.msb_right = 0;
info->var.blue.offset = 0;
info->var.blue.length = info->var.bits_per_pixel;
info->var.blue.msb_right = 0;
info->var.transp.offset = 0;
info->var.transp.length = 0;
info->var.transp.msb_right = 0;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
info->fix.line_length = info->var.xres_virtual /
(8/info->var.bits_per_pixel);
} else {
/* non-palettized */
index = (fbdev->panel->control_base & LCD_CONTROL_SBPPF_MASK) >> LCD_CONTROL_SBPPF_BIT;
info->var.red = rgb_bitfields[index][0];
info->var.green = rgb_bitfields[index][1];
info->var.blue = rgb_bitfields[index][2];
info->var.transp = rgb_bitfields[index][3];
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.line_length = info->var.xres_virtual << 1; /* depth=16 */
}
} else {
/* mono */
info->fix.visual = FB_VISUAL_MONO10;
info->fix.line_length = info->var.xres_virtual / 8;
}
info->screen_size = info->fix.line_length * info->var.yres_virtual;
info->var.rotate = ((fbdev->panel->control_base&LCD_CONTROL_SM_MASK) \
>> LCD_CONTROL_SM_BIT) * 90;
/* Determine BPP mode and format */
fbdev->regs->lcd_control = fbdev->panel->control_base;
fbdev->regs->lcd_horztiming = fbdev->panel->horztiming;
fbdev->regs->lcd_verttiming = fbdev->panel->verttiming;
fbdev->regs->lcd_clkcontrol = fbdev->panel->clkcontrol_base;
fbdev->regs->lcd_intenable = 0;
fbdev->regs->lcd_intstatus = 0;
fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(fbdev->fb_phys);
if (panel_is_dual(fbdev->panel)) {
/* Second panel display seconf half of screen if possible,
* otherwise display the same as the first panel */
if (info->var.yres_virtual >= (info->var.yres << 1)) {
fbdev->regs->lcd_dmaaddr1 = LCD_DMA_SA_N(fbdev->fb_phys +
(info->fix.line_length *
(info->var.yres_virtual >> 1)));
} else {
fbdev->regs->lcd_dmaaddr1 = LCD_DMA_SA_N(fbdev->fb_phys);
}
}
words = info->fix.line_length / sizeof(u32);
if (!info->var.rotate || (info->var.rotate == 180)) {
words *= info->var.yres_virtual;
if (info->var.rotate /* 180 */) {
words -= (words % 8); /* should be divisable by 8 */
}
}
fbdev->regs->lcd_words = LCD_WRD_WRDS_N(words);
fbdev->regs->lcd_pwmdiv = 0;
fbdev->regs->lcd_pwmhi = 0;
/* Resume controller */
fbdev->regs->lcd_control |= LCD_CONTROL_GO;
mdelay(10);
au1100fb_fb_blank(VESA_NO_BLANKING, info);
return 0;
}
/* fb_setcolreg
* Set color in LCD palette.
*/
int au1100fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *fbi)
{
struct au1100fb_device *fbdev;
u32 *palette;
u32 value;
fbdev = to_au1100fb_device(fbi);
palette = fbdev->regs->lcd_pallettebase;
if (regno > (AU1100_LCD_NBR_PALETTE_ENTRIES - 1))
return -EINVAL;
if (fbi->var.grayscale) {
/* Convert color to grayscale */
red = green = blue =
(19595 * red + 38470 * green + 7471 * blue) >> 16;
}
if (fbi->fix.visual == FB_VISUAL_TRUECOLOR) {
/* Place color in the pseudopalette */
if (regno > 16)
return -EINVAL;
palette = (u32*)fbi->pseudo_palette;
red >>= (16 - fbi->var.red.length);
green >>= (16 - fbi->var.green.length);
blue >>= (16 - fbi->var.blue.length);
value = (red << fbi->var.red.offset) |
(green << fbi->var.green.offset)|
(blue << fbi->var.blue.offset);
value &= 0xFFFF;
} else if (panel_is_active(fbdev->panel)) {
/* COLOR TFT PALLETTIZED (use RGB 565) */
value = (red & 0xF800)|((green >> 5) & 0x07E0)|((blue >> 11) & 0x001F);
value &= 0xFFFF;
} else if (panel_is_color(fbdev->panel)) {
/* COLOR STN MODE */
value = (((panel_swap_rgb(fbdev->panel) ? blue : red) >> 12) & 0x000F) |
((green >> 8) & 0x00F0) |
(((panel_swap_rgb(fbdev->panel) ? red : blue) >> 4) & 0x0F00);
value &= 0xFFF;
} else {
/* MONOCHROME MODE */
value = (green >> 12) & 0x000F;
value &= 0xF;
}
palette[regno] = value;
return 0;
}
/* fb_pan_display
* Pan display in x and/or y as specified
*/
int au1100fb_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi)
{
struct au1100fb_device *fbdev;
int dy;
fbdev = to_au1100fb_device(fbi);
print_dbg("fb_pan_display %p %p", var, fbi);
if (!var || !fbdev) {
return -EINVAL;
}
if (var->xoffset - fbi->var.xoffset) {
/* No support for X panning for now! */
return -EINVAL;
}
print_dbg("fb_pan_display 2 %p %p", var, fbi);
dy = var->yoffset - fbi->var.yoffset;
if (dy) {
u32 dmaaddr;
print_dbg("Panning screen of %d lines", dy);
dmaaddr = fbdev->regs->lcd_dmaaddr0;
dmaaddr += (fbi->fix.line_length * dy);
/* TODO: Wait for current frame to finished */
fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(dmaaddr);
if (panel_is_dual(fbdev->panel)) {
dmaaddr = fbdev->regs->lcd_dmaaddr1;
dmaaddr += (fbi->fix.line_length * dy);
fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(dmaaddr);
}
}
print_dbg("fb_pan_display 3 %p %p", var, fbi);
return 0;
}
/* fb_rotate
* Rotate the display of this angle. This doesn't seems to be used by the core,
* but as our hardware supports it, so why not implementing it...
*/
void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
{
struct au1100fb_device *fbdev = to_au1100fb_device(fbi);
print_dbg("fb_rotate %p %d", fbi, angle);
if (fbdev && (angle > 0) && !(angle % 90)) {
fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
fbdev->regs->lcd_control &= ~(LCD_CONTROL_SM_MASK);
fbdev->regs->lcd_control |= ((angle/90) << LCD_CONTROL_SM_BIT);
fbdev->regs->lcd_control |= LCD_CONTROL_GO;
}
}
/* fb_mmap
* Map video memory in user space. We don't use the generic fb_mmap method mainly
* to allow the use of the TLB streaming flag (CCA=6)
*/
int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
{
struct au1100fb_device *fbdev;
fbdev = to_au1100fb_device(fbi);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
}
static struct fb_ops au1100fb_ops =
{
.owner = THIS_MODULE,
.fb_setcolreg = au1100fb_fb_setcolreg,
.fb_blank = au1100fb_fb_blank,
.fb_pan_display = au1100fb_fb_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_rotate = au1100fb_fb_rotate,
.fb_mmap = au1100fb_fb_mmap,
};
/*-------------------------------------------------------------------------*/
static int au1100fb_setup(struct au1100fb_device *fbdev)
{
char *this_opt, *options;
int num_panels = ARRAY_SIZE(known_lcd_panels);
if (num_panels <= 0) {
print_err("No LCD panels supported by driver!");
return -ENODEV;
}
if (fb_get_options(DRIVER_NAME, &options))
return -ENODEV;
if (!options)
return -ENODEV;
while ((this_opt = strsep(&options, ",")) != NULL) {
/* Panel option */
if (!strncmp(this_opt, "panel:", 6)) {
int i;
this_opt += 6;
for (i = 0; i < num_panels; i++) {
if (!strncmp(this_opt, known_lcd_panels[i].name,
strlen(this_opt))) {
fbdev->panel = &known_lcd_panels[i];
fbdev->panel_idx = i;
break;
}
}
if (i >= num_panels) {
print_warn("Panel '%s' not supported!", this_opt);
return -ENODEV;
}
}
/* Unsupported option */
else
print_warn("Unsupported option \"%s\"", this_opt);
}
print_info("Panel=%s", fbdev->panel->name);
return 0;
}
static int __devinit au1100fb_drv_probe(struct platform_device *dev)
{
struct au1100fb_device *fbdev = NULL;
struct resource *regs_res;
unsigned long page;
u32 sys_clksrc;
/* Allocate new device private */
fbdev = devm_kzalloc(&dev->dev, sizeof(struct au1100fb_device),
GFP_KERNEL);
if (!fbdev) {
print_err("fail to allocate device private record");
return -ENOMEM;
}
if (au1100fb_setup(fbdev))
goto failed;
platform_set_drvdata(dev, (void *)fbdev);
/* Allocate region for our registers and map them */
regs_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!regs_res) {
print_err("fail to retrieve registers resource");
return -EFAULT;
}
au1100fb_fix.mmio_start = regs_res->start;
au1100fb_fix.mmio_len = resource_size(regs_res);
if (!devm_request_mem_region(&dev->dev,
au1100fb_fix.mmio_start,
au1100fb_fix.mmio_len,
DRIVER_NAME)) {
print_err("fail to lock memory region at 0x%08lx",
au1100fb_fix.mmio_start);
return -EBUSY;
}
fbdev->regs = (struct au1100fb_regs*)KSEG1ADDR(au1100fb_fix.mmio_start);
print_dbg("Register memory map at %p", fbdev->regs);
print_dbg("phys=0x%08x, size=%d", fbdev->regs_phys, fbdev->regs_len);
/* Allocate the framebuffer to the maximum screen size * nbr of video buffers */
fbdev->fb_len = fbdev->panel->xres * fbdev->panel->yres *
(fbdev->panel->bpp >> 3) * AU1100FB_NBR_VIDEO_BUFFERS;
fbdev->fb_mem = dmam_alloc_coherent(&dev->dev,
PAGE_ALIGN(fbdev->fb_len),
&fbdev->fb_phys, GFP_KERNEL);
if (!fbdev->fb_mem) {
print_err("fail to allocate frambuffer (size: %dK))",
fbdev->fb_len / 1024);
return -ENOMEM;
}
au1100fb_fix.smem_start = fbdev->fb_phys;
au1100fb_fix.smem_len = fbdev->fb_len;
/*
* Set page reserved so that mmap will work. This is necessary
* since we'll be remapping normal memory.
*/
for (page = (unsigned long)fbdev->fb_mem;
page < PAGE_ALIGN((unsigned long)fbdev->fb_mem + fbdev->fb_len);
page += PAGE_SIZE) {
#if CONFIG_DMA_NONCOHERENT
SetPageReserved(virt_to_page(CAC_ADDR((void *)page)));
#else
SetPageReserved(virt_to_page(page));
#endif
}
print_dbg("Framebuffer memory map at %p", fbdev->fb_mem);
print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
/* Setup LCD clock to AUX (48 MHz) */
sys_clksrc = au_readl(SYS_CLKSRC) & ~(SYS_CS_ML_MASK | SYS_CS_DL | SYS_CS_CL);
au_writel((sys_clksrc | (1 << SYS_CS_ML_BIT)), SYS_CLKSRC);
/* load the panel info into the var struct */
au1100fb_var.bits_per_pixel = fbdev->panel->bpp;
au1100fb_var.xres = fbdev->panel->xres;
au1100fb_var.xres_virtual = au1100fb_var.xres;
au1100fb_var.yres = fbdev->panel->yres;
au1100fb_var.yres_virtual = au1100fb_var.yres;
fbdev->info.screen_base = fbdev->fb_mem;
fbdev->info.fbops = &au1100fb_ops;
fbdev->info.fix = au1100fb_fix;
fbdev->info.pseudo_palette =
devm_kzalloc(&dev->dev, sizeof(u32) * 16, GFP_KERNEL);
if (!fbdev->info.pseudo_palette)
return -ENOMEM;
if (fb_alloc_cmap(&fbdev->info.cmap, AU1100_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
print_err("Fail to allocate colormap (%d entries)",
AU1100_LCD_NBR_PALETTE_ENTRIES);
return -EFAULT;
}
fbdev->info.var = au1100fb_var;
/* Set h/w registers */
au1100fb_setmode(fbdev);
/* Register new framebuffer */
if (register_framebuffer(&fbdev->info) < 0) {
print_err("cannot register new framebuffer");
goto failed;
}
return 0;
failed:
if (fbdev->fb_mem) {
dma_free_noncoherent(&dev->dev, fbdev->fb_len, fbdev->fb_mem,
fbdev->fb_phys);
}
if (fbdev->info.cmap.len != 0) {
fb_dealloc_cmap(&fbdev->info.cmap);
}
platform_set_drvdata(dev, NULL);
return -ENODEV;
}
int au1100fb_drv_remove(struct platform_device *dev)
{
struct au1100fb_device *fbdev = NULL;
if (!dev)
return -ENODEV;
fbdev = (struct au1100fb_device *) platform_get_drvdata(dev);
#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
#endif
fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
/* Clean up all probe data */
unregister_framebuffer(&fbdev->info);
fb_dealloc_cmap(&fbdev->info.cmap);
return 0;
}
#ifdef CONFIG_PM
static u32 sys_clksrc;
static struct au1100fb_regs fbregs;
int au1100fb_drv_suspend(struct platform_device *dev, pm_message_t state)
{
struct au1100fb_device *fbdev = platform_get_drvdata(dev);
if (!fbdev)
return 0;
/* Save the clock source state */
sys_clksrc = au_readl(SYS_CLKSRC);
/* Blank the LCD */
au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
/* Stop LCD clocking */
au_writel(sys_clksrc & ~SYS_CS_ML_MASK, SYS_CLKSRC);
memcpy(&fbregs, fbdev->regs, sizeof(struct au1100fb_regs));
return 0;
}
int au1100fb_drv_resume(struct platform_device *dev)
{
struct au1100fb_device *fbdev = platform_get_drvdata(dev);
if (!fbdev)
return 0;
memcpy(fbdev->regs, &fbregs, sizeof(struct au1100fb_regs));
/* Restart LCD clocking */
au_writel(sys_clksrc, SYS_CLKSRC);
/* Unblank the LCD */
au1100fb_fb_blank(VESA_NO_BLANKING, &fbdev->info);
return 0;
}
#else
#define au1100fb_drv_suspend NULL
#define au1100fb_drv_resume NULL
#endif
static struct platform_driver au1100fb_driver = {
.driver = {
.name = "au1100-lcd",
.owner = THIS_MODULE,
},
.probe = au1100fb_drv_probe,
.remove = au1100fb_drv_remove,
.suspend = au1100fb_drv_suspend,
.resume = au1100fb_drv_resume,
};
static int __init au1100fb_load(void)
{
return platform_driver_register(&au1100fb_driver);
}
static void __exit au1100fb_unload(void)
{
platform_driver_unregister(&au1100fb_driver);
}
module_init(au1100fb_load);
module_exit(au1100fb_unload);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
W4TCH0UT/zz_sprout | drivers/net/ethernet/8390/zorro8390.c | 2477 | 12837 | /*
* Amiga Linux/m68k and Linux/PPC Zorro NS8390 Ethernet Driver
*
* (C) Copyright 1998-2000 by some Elitist 680x0 Users(TM)
*
* ---------------------------------------------------------------------------
*
* This program is based on all the other NE2000 drivers for Linux
*
* ---------------------------------------------------------------------------
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of the Linux
* distribution for more details.
*
* ---------------------------------------------------------------------------
*
* The Ariadne II and X-Surf are Zorro-II boards containing Realtek RTL8019AS
* Ethernet Controllers.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/zorro.h>
#include <linux/jiffies.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#define EI_SHIFT(x) (ei_local->reg_offset[x])
#define ei_inb(port) in_8(port)
#define ei_outb(val, port) out_8(port, val)
#define ei_inb_p(port) in_8(port)
#define ei_outb_p(val, port) out_8(port, val)
static const char version[] =
"8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
#include "lib8390.c"
#define DRV_NAME "zorro8390"
#define NE_BASE (dev->base_addr)
#define NE_CMD (0x00 * 2)
#define NE_DATAPORT (0x10 * 2) /* NatSemi-defined port window offset */
#define NE_RESET (0x1f * 2) /* Issue a read to reset,
* a write to clear. */
#define NE_IO_EXTENT (0x20 * 2)
#define NE_EN0_ISR (0x07 * 2)
#define NE_EN0_DCFG (0x0e * 2)
#define NE_EN0_RSARLO (0x08 * 2)
#define NE_EN0_RSARHI (0x09 * 2)
#define NE_EN0_RCNTLO (0x0a * 2)
#define NE_EN0_RXCR (0x0c * 2)
#define NE_EN0_TXCR (0x0d * 2)
#define NE_EN0_RCNTHI (0x0b * 2)
#define NE_EN0_IMR (0x0f * 2)
#define NESM_START_PG 0x40 /* First page of TX buffer */
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
#define WORDSWAP(a) ((((a) >> 8) & 0xff) | ((a) << 8))
static struct card_info {
zorro_id id;
const char *name;
unsigned int offset;
} cards[] = {
{ ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, "Ariadne II", 0x0600 },
{ ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, "X-Surf", 0x8600 },
};
/* Hard reset the card. This used to pause for the same period that a
* 8390 reset command required, but that shouldn't be necessary.
*/
static void zorro8390_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
if (ei_debug > 1)
netdev_dbg(dev, "resetting - t=%ld...\n", jiffies);
z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
ei_status.txing = 0;
ei_status.dmaing = 0;
/* This check _should_not_ be necessary, omit eventually. */
while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
netdev_warn(dev, "%s: did not complete\n", __func__);
break;
}
z_writeb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr */
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
* we don't need to be concerned with ring wrap as the header will be at
* the start of a page, so we optimize accordingly.
*/
static void zorro8390_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page)
{
int nic_base = dev->base_addr;
int cnt;
short *ptrs;
/* This *shouldn't* happen.
* If it does, it's the last thing you'll see
*/
if (ei_status.dmaing) {
netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
__func__, ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
z_writeb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
z_writeb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO);
z_writeb(0, nic_base + NE_EN0_RCNTHI);
z_writeb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */
z_writeb(ring_page, nic_base + NE_EN0_RSARHI);
z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
ptrs = (short *)hdr;
for (cnt = 0; cnt < sizeof(struct e8390_pkt_hdr) >> 1; cnt++)
*ptrs++ = z_readw(NE_BASE + NE_DATAPORT);
z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */
hdr->count = WORDSWAP(hdr->count);
ei_status.dmaing &= ~0x01;
}
/* Block input and output, similar to the Crynwr packet driver.
* If you are porting to a new ethercard, look at the packet driver source
* for hints. The NEx000 doesn't share the on-board packet memory --
* you have to put the packet out through the "remote DMA" dataport
* using z_writeb.
*/
static void zorro8390_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
{
int nic_base = dev->base_addr;
char *buf = skb->data;
short *ptrs;
int cnt;
/* This *shouldn't* happen.
* If it does, it's the last thing you'll see
*/
if (ei_status.dmaing) {
netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
__func__, ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
z_writeb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO);
z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI);
z_writeb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO);
z_writeb(ring_offset >> 8, nic_base + NE_EN0_RSARHI);
z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
ptrs = (short *)buf;
for (cnt = 0; cnt < count >> 1; cnt++)
*ptrs++ = z_readw(NE_BASE + NE_DATAPORT);
if (count & 0x01)
buf[count - 1] = z_readb(NE_BASE + NE_DATAPORT);
z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */
ei_status.dmaing &= ~0x01;
}
static void zorro8390_block_output(struct net_device *dev, int count,
const unsigned char *buf,
const int start_page)
{
int nic_base = NE_BASE;
unsigned long dma_start;
short *ptrs;
int cnt;
/* Round the count up for word writes. Do we need to do this?
* What effect will an odd byte count have on the 8390?
* I should check someday.
*/
if (count & 0x01)
count++;
/* This *shouldn't* happen.
* If it does, it's the last thing you'll see
*/
if (ei_status.dmaing) {
netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
__func__, ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
/* We should already be in page 0, but to be safe... */
z_writeb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
/* Now the normal output. */
z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO);
z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI);
z_writeb(0x00, nic_base + NE_EN0_RSARLO);
z_writeb(start_page, nic_base + NE_EN0_RSARHI);
z_writeb(E8390_RWRITE + E8390_START, nic_base + NE_CMD);
ptrs = (short *)buf;
for (cnt = 0; cnt < count >> 1; cnt++)
z_writew(*ptrs++, NE_BASE + NE_DATAPORT);
dma_start = jiffies;
while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2 * HZ / 100)) {
/* 20ms */
netdev_err(dev, "timeout waiting for Tx RDC\n");
zorro8390_reset_8390(dev);
__NS8390_init(dev, 1);
break;
}
z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */
ei_status.dmaing &= ~0x01;
}
static int zorro8390_open(struct net_device *dev)
{
__ei_open(dev);
return 0;
}
static int zorro8390_close(struct net_device *dev)
{
if (ei_debug > 1)
netdev_dbg(dev, "Shutting down ethercard\n");
__ei_close(dev);
return 0;
}
static void zorro8390_remove_one(struct zorro_dev *z)
{
struct net_device *dev = zorro_get_drvdata(z);
unregister_netdev(dev);
free_irq(IRQ_AMIGA_PORTS, dev);
release_mem_region(ZTWO_PADDR(dev->base_addr), NE_IO_EXTENT * 2);
free_netdev(dev);
}
static struct zorro_device_id zorro8390_zorro_tbl[] = {
{ ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, },
{ ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, },
{ 0 }
};
MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl);
static const struct net_device_ops zorro8390_netdev_ops = {
.ndo_open = zorro8390_open,
.ndo_stop = zorro8390_close,
.ndo_start_xmit = __ei_start_xmit,
.ndo_tx_timeout = __ei_tx_timeout,
.ndo_get_stats = __ei_get_stats,
.ndo_set_rx_mode = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = __ei_poll,
#endif
};
static int zorro8390_init(struct net_device *dev, unsigned long board,
const char *name, unsigned long ioaddr)
{
int i;
int err;
unsigned char SA_prom[32];
int start_page, stop_page;
static u32 zorro8390_offsets[16] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
};
/* Reset card. Who knows what dain-bramaged state it was left in. */
{
unsigned long reset_start_time = jiffies;
z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET);
while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies,
reset_start_time + 2 * HZ / 100)) {
netdev_warn(dev, "not found (no reset ack)\n");
return -ENODEV;
}
z_writeb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */
}
/* Read the 16 bytes of station address PROM.
* We must first initialize registers,
* similar to NS8390_init(eifdev, 0).
* We can't reliably read the SAPROM address without this.
* (I learned the hard way!).
*/
{
static const struct {
u32 value;
u32 offset;
} program_seq[] = {
{E8390_NODMA + E8390_PAGE0 + E8390_STOP, NE_CMD},
/* Select page 0 */
{0x48, NE_EN0_DCFG}, /* 0x48: Set byte-wide access */
{0x00, NE_EN0_RCNTLO}, /* Clear the count regs */
{0x00, NE_EN0_RCNTHI},
{0x00, NE_EN0_IMR}, /* Mask completion irq */
{0xFF, NE_EN0_ISR},
{E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */
{E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode */
{32, NE_EN0_RCNTLO},
{0x00, NE_EN0_RCNTHI},
{0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000 */
{0x00, NE_EN0_RSARHI},
{E8390_RREAD + E8390_START, NE_CMD},
};
for (i = 0; i < ARRAY_SIZE(program_seq); i++)
z_writeb(program_seq[i].value,
ioaddr + program_seq[i].offset);
}
for (i = 0; i < 16; i++) {
SA_prom[i] = z_readb(ioaddr + NE_DATAPORT);
(void)z_readb(ioaddr + NE_DATAPORT);
}
/* We must set the 8390 for word mode. */
z_writeb(0x49, ioaddr + NE_EN0_DCFG);
start_page = NESM_START_PG;
stop_page = NESM_STOP_PG;
dev->base_addr = ioaddr;
dev->irq = IRQ_AMIGA_PORTS;
/* Install the Interrupt handler */
i = request_irq(IRQ_AMIGA_PORTS, __ei_interrupt,
IRQF_SHARED, DRV_NAME, dev);
if (i)
return i;
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
pr_debug("Found ethernet address: %pM\n", dev->dev_addr);
ei_status.name = name;
ei_status.tx_start_page = start_page;
ei_status.stop_page = stop_page;
ei_status.word16 = 1;
ei_status.rx_start_page = start_page + TX_PAGES;
ei_status.reset_8390 = zorro8390_reset_8390;
ei_status.block_input = zorro8390_block_input;
ei_status.block_output = zorro8390_block_output;
ei_status.get_8390_hdr = zorro8390_get_8390_hdr;
ei_status.reg_offset = zorro8390_offsets;
dev->netdev_ops = &zorro8390_netdev_ops;
__NS8390_init(dev, 0);
err = register_netdev(dev);
if (err) {
free_irq(IRQ_AMIGA_PORTS, dev);
return err;
}
netdev_info(dev, "%s at 0x%08lx, Ethernet Address %pM\n",
name, board, dev->dev_addr);
return 0;
}
static int zorro8390_init_one(struct zorro_dev *z,
const struct zorro_device_id *ent)
{
struct net_device *dev;
unsigned long board, ioaddr;
int err, i;
for (i = ARRAY_SIZE(cards) - 1; i >= 0; i--)
if (z->id == cards[i].id)
break;
if (i < 0)
return -ENODEV;
board = z->resource.start;
ioaddr = board + cards[i].offset;
dev = ____alloc_ei_netdev(0);
if (!dev)
return -ENOMEM;
if (!request_mem_region(ioaddr, NE_IO_EXTENT * 2, DRV_NAME)) {
free_netdev(dev);
return -EBUSY;
}
err = zorro8390_init(dev, board, cards[i].name, ZTWO_VADDR(ioaddr));
if (err) {
release_mem_region(ioaddr, NE_IO_EXTENT * 2);
free_netdev(dev);
return err;
}
zorro_set_drvdata(z, dev);
return 0;
}
static struct zorro_driver zorro8390_driver = {
.name = "zorro8390",
.id_table = zorro8390_zorro_tbl,
.probe = zorro8390_init_one,
.remove = zorro8390_remove_one,
};
static int __init zorro8390_init_module(void)
{
return zorro_register_driver(&zorro8390_driver);
}
static void __exit zorro8390_cleanup_module(void)
{
zorro_unregister_driver(&zorro8390_driver);
}
module_init(zorro8390_init_module);
module_exit(zorro8390_cleanup_module);
MODULE_LICENSE("GPL");
| gpl-2.0 |
alwaysadeel/AJ-Kernel | net/compat-wireless/drivers/net/wireless/p54/p54pci.c | 2733 | 16734 |
/*
* Linux device driver for PCI based Prism54
*
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2008, Christian Lamparter <chunkeey@web.de>
*
* Based on the islsm (softmac prism54) driver, which is:
* Copyright 2004-2006 Jean-Baptiste Note <jean-baptiste.note@m4x.org>, et al.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <net/mac80211.h>
#include "p54.h"
#include "lmac.h"
#include "p54pci.h"
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
MODULE_DESCRIPTION("Prism54 PCI wireless driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("prism54pci");
MODULE_FIRMWARE("isl3886pci");
static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
{ PCI_DEVICE(0x1260, 0x3890) },
/* 3COM 3CRWE154G72 Wireless LAN adapter */
{ PCI_DEVICE(0x10b7, 0x6001) },
/* Intersil PRISM Indigo Wireless LAN adapter */
{ PCI_DEVICE(0x1260, 0x3877) },
/* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
{ PCI_DEVICE(0x1260, 0x3886) },
/* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
{ PCI_DEVICE(0x1260, 0xffff) },
{ },
};
MODULE_DEVICE_TABLE(pci, p54p_table);
static int p54p_upload_firmware(struct ieee80211_hw *dev)
{
struct p54p_priv *priv = dev->priv;
__le32 reg;
int err;
__le32 *data;
u32 remains, left, device_addr;
P54P_WRITE(int_enable, cpu_to_le32(0));
P54P_READ(int_enable);
udelay(10);
reg = P54P_READ(ctrl_stat);
reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT);
P54P_WRITE(ctrl_stat, reg);
P54P_READ(ctrl_stat);
udelay(10);
reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
P54P_WRITE(ctrl_stat, reg);
wmb();
udelay(10);
reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
P54P_WRITE(ctrl_stat, reg);
wmb();
/* wait for the firmware to reset properly */
mdelay(10);
err = p54_parse_firmware(dev, priv->firmware);
if (err)
return err;
if (priv->common.fw_interface != FW_LM86) {
dev_err(&priv->pdev->dev, "wrong firmware, "
"please get a LM86(PCI) firmware a try again.\n");
return -EINVAL;
}
data = (__le32 *) priv->firmware->data;
remains = priv->firmware->size;
device_addr = ISL38XX_DEV_FIRMWARE_ADDR;
while (remains) {
u32 i = 0;
left = min((u32)0x1000, remains);
P54P_WRITE(direct_mem_base, cpu_to_le32(device_addr));
P54P_READ(int_enable);
device_addr += 0x1000;
while (i < left) {
P54P_WRITE(direct_mem_win[i], *data++);
i += sizeof(u32);
}
remains -= left;
P54P_READ(int_enable);
}
reg = P54P_READ(ctrl_stat);
reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN);
reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT);
P54P_WRITE(ctrl_stat, reg);
P54P_READ(ctrl_stat);
udelay(10);
reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
P54P_WRITE(ctrl_stat, reg);
wmb();
udelay(10);
reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
P54P_WRITE(ctrl_stat, reg);
wmb();
udelay(10);
/* wait for the firmware to boot properly */
mdelay(100);
return 0;
}
static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
int ring_index, struct p54p_desc *ring, u32 ring_limit,
struct sk_buff **rx_buf, u32 index)
{
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
u32 limit, idx, i;
idx = le32_to_cpu(ring_control->host_idx[ring_index]);
limit = idx;
limit -= index;
limit = ring_limit - limit;
i = idx % ring_limit;
while (limit-- > 1) {
struct p54p_desc *desc = &ring[i];
if (!desc->host_addr) {
struct sk_buff *skb;
dma_addr_t mapping;
skb = dev_alloc_skb(priv->common.rx_mtu + 32);
if (!skb)
break;
mapping = pci_map_single(priv->pdev,
skb_tail_pointer(skb),
priv->common.rx_mtu + 32,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(priv->pdev, mapping)) {
dev_kfree_skb_any(skb);
dev_err(&priv->pdev->dev,
"RX DMA Mapping error\n");
break;
}
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = 0; // FIXME: necessary?
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
desc->flags = 0;
rx_buf[i] = skb;
}
i++;
idx++;
i %= ring_limit;
}
wmb();
ring_control->host_idx[ring_index] = cpu_to_le32(idx);
}
static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
int ring_index, struct p54p_desc *ring, u32 ring_limit,
struct sk_buff **rx_buf)
{
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
struct p54p_desc *desc;
u32 idx, i;
i = (*index) % ring_limit;
(*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
idx %= ring_limit;
while (i != idx) {
u16 len;
struct sk_buff *skb;
dma_addr_t dma_addr;
desc = &ring[i];
len = le16_to_cpu(desc->len);
skb = rx_buf[i];
if (!skb) {
i++;
i %= ring_limit;
continue;
}
if (unlikely(len > priv->common.rx_mtu)) {
if (net_ratelimit())
dev_err(&priv->pdev->dev, "rx'd frame size "
"exceeds length threshold.\n");
len = priv->common.rx_mtu;
}
dma_addr = le32_to_cpu(desc->host_addr);
pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
skb_put(skb, len);
if (p54_rx(dev, skb)) {
pci_unmap_single(priv->pdev, dma_addr,
priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
rx_buf[i] = NULL;
desc->host_addr = cpu_to_le32(0);
} else {
skb_trim(skb, 0);
pci_dma_sync_single_for_device(priv->pdev, dma_addr,
priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
}
i++;
i %= ring_limit;
}
p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
}
static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
int ring_index, struct p54p_desc *ring, u32 ring_limit,
struct sk_buff **tx_buf)
{
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
struct p54p_desc *desc;
struct sk_buff *skb;
u32 idx, i;
i = (*index) % ring_limit;
(*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
idx %= ring_limit;
while (i != idx) {
desc = &ring[i];
skb = tx_buf[i];
tx_buf[i] = NULL;
pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
le16_to_cpu(desc->len), PCI_DMA_TODEVICE);
desc->host_addr = 0;
desc->device_addr = 0;
desc->len = 0;
desc->flags = 0;
if (skb && FREE_AFTER_TX(skb))
p54_free_skb(dev, skb);
i++;
i %= ring_limit;
}
}
static void p54p_tasklet(unsigned long dev_id)
{
struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id;
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
ARRAY_SIZE(ring_control->tx_mgmt),
priv->tx_buf_mgmt);
p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
ARRAY_SIZE(ring_control->tx_data),
priv->tx_buf_data);
p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
p54p_check_rx_ring(dev, &priv->rx_idx_data, 0, ring_control->rx_data,
ARRAY_SIZE(ring_control->rx_data), priv->rx_buf_data);
wmb();
P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
}
static irqreturn_t p54p_interrupt(int irq, void *dev_id)
{
struct ieee80211_hw *dev = dev_id;
struct p54p_priv *priv = dev->priv;
__le32 reg;
reg = P54P_READ(int_ident);
if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) {
goto out;
}
P54P_WRITE(int_ack, reg);
reg &= P54P_READ(int_enable);
if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE))
tasklet_schedule(&priv->tasklet);
else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
complete(&priv->boot_comp);
out:
return reg ? IRQ_HANDLED : IRQ_NONE;
}
static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
unsigned long flags;
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
struct p54p_desc *desc;
dma_addr_t mapping;
u32 idx, i;
spin_lock_irqsave(&priv->lock, flags);
idx = le32_to_cpu(ring_control->host_idx[1]);
i = idx % ARRAY_SIZE(ring_control->tx_data);
mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(priv->pdev, mapping)) {
spin_unlock_irqrestore(&priv->lock, flags);
p54_free_skb(dev, skb);
dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
return ;
}
priv->tx_buf_data[i] = skb;
desc = &ring_control->tx_data[i];
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
desc->len = cpu_to_le16(skb->len);
desc->flags = 0;
wmb();
ring_control->host_idx[1] = cpu_to_le32(idx + 1);
spin_unlock_irqrestore(&priv->lock, flags);
P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
P54P_READ(dev_int);
}
static void p54p_stop(struct ieee80211_hw *dev)
{
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
unsigned int i;
struct p54p_desc *desc;
P54P_WRITE(int_enable, cpu_to_le32(0));
P54P_READ(int_enable);
udelay(10);
free_irq(priv->pdev->irq, dev);
tasklet_kill(&priv->tasklet);
P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
desc = &ring_control->rx_data[i];
if (desc->host_addr)
pci_unmap_single(priv->pdev,
le32_to_cpu(desc->host_addr),
priv->common.rx_mtu + 32,
PCI_DMA_FROMDEVICE);
kfree_skb(priv->rx_buf_data[i]);
priv->rx_buf_data[i] = NULL;
}
for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) {
desc = &ring_control->rx_mgmt[i];
if (desc->host_addr)
pci_unmap_single(priv->pdev,
le32_to_cpu(desc->host_addr),
priv->common.rx_mtu + 32,
PCI_DMA_FROMDEVICE);
kfree_skb(priv->rx_buf_mgmt[i]);
priv->rx_buf_mgmt[i] = NULL;
}
for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) {
desc = &ring_control->tx_data[i];
if (desc->host_addr)
pci_unmap_single(priv->pdev,
le32_to_cpu(desc->host_addr),
le16_to_cpu(desc->len),
PCI_DMA_TODEVICE);
p54_free_skb(dev, priv->tx_buf_data[i]);
priv->tx_buf_data[i] = NULL;
}
for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) {
desc = &ring_control->tx_mgmt[i];
if (desc->host_addr)
pci_unmap_single(priv->pdev,
le32_to_cpu(desc->host_addr),
le16_to_cpu(desc->len),
PCI_DMA_TODEVICE);
p54_free_skb(dev, priv->tx_buf_mgmt[i]);
priv->tx_buf_mgmt[i] = NULL;
}
memset(ring_control, 0, sizeof(*ring_control));
}
static int p54p_open(struct ieee80211_hw *dev)
{
struct p54p_priv *priv = dev->priv;
int err;
init_completion(&priv->boot_comp);
err = request_irq(priv->pdev->irq, p54p_interrupt,
IRQF_SHARED, "p54pci", dev);
if (err) {
dev_err(&priv->pdev->dev, "failed to register IRQ handler\n");
return err;
}
memset(priv->ring_control, 0, sizeof(*priv->ring_control));
err = p54p_upload_firmware(dev);
if (err) {
free_irq(priv->pdev->irq, dev);
return err;
}
priv->rx_idx_data = priv->tx_idx_data = 0;
priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0);
p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0);
P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
P54P_READ(ring_control_base);
wmb();
udelay(10);
P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
P54P_READ(int_enable);
wmb();
udelay(10);
P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
P54P_READ(dev_int);
if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
p54p_stop(dev);
return -ETIMEDOUT;
}
P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
P54P_READ(int_enable);
wmb();
udelay(10);
P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
P54P_READ(dev_int);
wmb();
udelay(10);
return 0;
}
static int __devinit p54p_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct p54p_priv *priv;
struct ieee80211_hw *dev;
unsigned long mem_addr, mem_len;
int err;
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable new PCI device\n");
return err;
}
mem_addr = pci_resource_start(pdev, 0);
mem_len = pci_resource_len(pdev, 0);
if (mem_len < sizeof(struct p54p_csr)) {
dev_err(&pdev->dev, "Too short PCI resources\n");
goto err_disable_dev;
}
err = pci_request_regions(pdev, "p54pci");
if (err) {
dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
goto err_disable_dev;
}
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
dev_err(&pdev->dev, "No suitable DMA available\n");
goto err_free_reg;
}
pci_set_master(pdev);
pci_try_set_mwi(pdev);
pci_write_config_byte(pdev, 0x40, 0);
pci_write_config_byte(pdev, 0x41, 0);
dev = p54_init_common(sizeof(*priv));
if (!dev) {
dev_err(&pdev->dev, "ieee80211 alloc failed\n");
err = -ENOMEM;
goto err_free_reg;
}
priv = dev->priv;
priv->pdev = pdev;
SET_IEEE80211_DEV(dev, &pdev->dev);
pci_set_drvdata(pdev, dev);
priv->map = ioremap(mem_addr, mem_len);
if (!priv->map) {
dev_err(&pdev->dev, "Cannot map device memory\n");
err = -ENOMEM;
goto err_free_dev;
}
priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control),
&priv->ring_control_dma);
if (!priv->ring_control) {
dev_err(&pdev->dev, "Cannot allocate rings\n");
err = -ENOMEM;
goto err_iounmap;
}
priv->common.open = p54p_open;
priv->common.stop = p54p_stop;
priv->common.tx = p54p_tx;
spin_lock_init(&priv->lock);
tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
err = request_firmware(&priv->firmware, "isl3886pci",
&priv->pdev->dev);
if (err) {
dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
err = request_firmware(&priv->firmware, "isl3886",
&priv->pdev->dev);
if (err)
goto err_free_common;
}
err = p54p_open(dev);
if (err)
goto err_free_common;
err = p54_read_eeprom(dev);
p54p_stop(dev);
if (err)
goto err_free_common;
err = p54_register_common(dev, &pdev->dev);
if (err)
goto err_free_common;
return 0;
err_free_common:
release_firmware(priv->firmware);
pci_free_consistent(pdev, sizeof(*priv->ring_control),
priv->ring_control, priv->ring_control_dma);
err_iounmap:
iounmap(priv->map);
err_free_dev:
pci_set_drvdata(pdev, NULL);
p54_free_common(dev);
err_free_reg:
pci_release_regions(pdev);
err_disable_dev:
pci_disable_device(pdev);
return err;
}
static void __devexit p54p_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *dev = pci_get_drvdata(pdev);
struct p54p_priv *priv;
if (!dev)
return;
p54_unregister_common(dev);
priv = dev->priv;
release_firmware(priv->firmware);
pci_free_consistent(pdev, sizeof(*priv->ring_control),
priv->ring_control, priv->ring_control_dma);
iounmap(priv->map);
pci_release_regions(pdev);
pci_disable_device(pdev);
p54_free_common(dev);
}
#ifdef CONFIG_PM
static int p54p_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct ieee80211_hw *dev = pci_get_drvdata(pdev);
struct p54p_priv *priv = dev->priv;
if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) {
ieee80211_stop_queues(dev);
p54p_stop(dev);
}
pci_save_state(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
static int p54p_resume(struct pci_dev *pdev)
{
struct ieee80211_hw *dev = pci_get_drvdata(pdev);
struct p54p_priv *priv = dev->priv;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) {
p54p_open(dev);
ieee80211_wake_queues(dev);
}
return 0;
}
#endif /* CONFIG_PM */
static struct pci_driver p54p_driver = {
.name = "p54pci",
.id_table = p54p_table,
.probe = p54p_probe,
.remove = __devexit_p(p54p_remove),
#ifdef CONFIG_PM
.suspend = p54p_suspend,
.resume = p54p_resume,
#endif /* CONFIG_PM */
};
static int __init p54p_init(void)
{
return pci_register_driver(&p54p_driver);
}
static void __exit p54p_exit(void)
{
pci_unregister_driver(&p54p_driver);
}
module_init(p54p_init);
module_exit(p54p_exit);
| gpl-2.0 |
5victor/linux | drivers/usb/storage/sddr09.c | 3245 | 45973 | /* Driver for SanDisk SDDR-09 SmartMedia reader
*
* (c) 2000, 2001 Robert Baruch (autophile@starband.net)
* (c) 2002 Andries Brouwer (aeb@cwi.nl)
* Developed with the assistance of:
* (c) 2002 Alan Stern <stern@rowland.org>
*
* The SanDisk SDDR-09 SmartMedia reader uses the Shuttle EUSB-01 chip.
* This chip is a programmable USB controller. In the SDDR-09, it has
* been programmed to obey a certain limited set of SCSI commands.
* This driver translates the "real" SCSI commands to the SDDR-09 SCSI
* commands.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* Known vendor commands: 12 bytes, first byte is opcode
*
* E7: read scatter gather
* E8: read
* E9: write
* EA: erase
* EB: reset
* EC: read status
* ED: read ID
* EE: write CIS (?)
* EF: compute checksum (?)
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
MODULE_DESCRIPTION("Driver for SanDisk SDDR-09 SmartMedia reader");
MODULE_AUTHOR("Andries Brouwer <aeb@cwi.nl>, Robert Baruch <autophile@starband.net>");
MODULE_LICENSE("GPL");
static int usb_stor_sddr09_dpcm_init(struct us_data *us);
static int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us);
static int usb_stor_sddr09_init(struct us_data *us);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
struct usb_device_id sddr09_usb_ids[] = {
# include "unusual_sddr09.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, sddr09_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev sddr09_unusual_dev_list[] = {
# include "unusual_sddr09.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
#define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) )
#define LSB_of(s) ((s)&0xFF)
#define MSB_of(s) ((s)>>8)
/* #define US_DEBUGP printk */
/*
* First some stuff that does not belong here:
* data on SmartMedia and other cards, completely
* unrelated to this driver.
* Similar stuff occurs in <linux/mtd/nand_ids.h>.
*/
struct nand_flash_dev {
int model_id;
int chipshift; /* 1<<cs bytes total capacity */
char pageshift; /* 1<<ps bytes in a page */
char blockshift; /* 1<<bs pages in an erase block */
char zoneshift; /* 1<<zs blocks in a zone */
/* # of logical blocks is 125/128 of this */
char pageadrlen; /* length of an address in bytes - 1 */
};
/*
* NAND Flash Manufacturer ID Codes
*/
#define NAND_MFR_AMD 0x01
#define NAND_MFR_NATSEMI 0x8f
#define NAND_MFR_TOSHIBA 0x98
#define NAND_MFR_SAMSUNG 0xec
static inline char *nand_flash_manufacturer(int manuf_id) {
switch(manuf_id) {
case NAND_MFR_AMD:
return "AMD";
case NAND_MFR_NATSEMI:
return "NATSEMI";
case NAND_MFR_TOSHIBA:
return "Toshiba";
case NAND_MFR_SAMSUNG:
return "Samsung";
default:
return "unknown";
}
}
/*
* It looks like it is unnecessary to attach manufacturer to the
* remaining data: SSFDC prescribes manufacturer-independent id codes.
*
* 256 MB NAND flash has a 5-byte ID with 2nd byte 0xaa, 0xba, 0xca or 0xda.
*/
static struct nand_flash_dev nand_flash_ids[] = {
/* NAND flash */
{ 0x6e, 20, 8, 4, 8, 2}, /* 1 MB */
{ 0xe8, 20, 8, 4, 8, 2}, /* 1 MB */
{ 0xec, 20, 8, 4, 8, 2}, /* 1 MB */
{ 0x64, 21, 8, 4, 9, 2}, /* 2 MB */
{ 0xea, 21, 8, 4, 9, 2}, /* 2 MB */
{ 0x6b, 22, 9, 4, 9, 2}, /* 4 MB */
{ 0xe3, 22, 9, 4, 9, 2}, /* 4 MB */
{ 0xe5, 22, 9, 4, 9, 2}, /* 4 MB */
{ 0xe6, 23, 9, 4, 10, 2}, /* 8 MB */
{ 0x73, 24, 9, 5, 10, 2}, /* 16 MB */
{ 0x75, 25, 9, 5, 10, 2}, /* 32 MB */
{ 0x76, 26, 9, 5, 10, 3}, /* 64 MB */
{ 0x79, 27, 9, 5, 10, 3}, /* 128 MB */
/* MASK ROM */
{ 0x5d, 21, 9, 4, 8, 2}, /* 2 MB */
{ 0xd5, 22, 9, 4, 9, 2}, /* 4 MB */
{ 0xd6, 23, 9, 4, 10, 2}, /* 8 MB */
{ 0x57, 24, 9, 4, 11, 2}, /* 16 MB */
{ 0x58, 25, 9, 4, 12, 2}, /* 32 MB */
{ 0,}
};
static struct nand_flash_dev *
nand_find_id(unsigned char id) {
int i;
for (i = 0; i < ARRAY_SIZE(nand_flash_ids); i++)
if (nand_flash_ids[i].model_id == id)
return &(nand_flash_ids[i]);
return NULL;
}
/*
* ECC computation.
*/
static unsigned char parity[256];
static unsigned char ecc2[256];
static void nand_init_ecc(void) {
int i, j, a;
parity[0] = 0;
for (i = 1; i < 256; i++)
parity[i] = (parity[i&(i-1)] ^ 1);
for (i = 0; i < 256; i++) {
a = 0;
for (j = 0; j < 8; j++) {
if (i & (1<<j)) {
if ((j & 1) == 0)
a ^= 0x04;
if ((j & 2) == 0)
a ^= 0x10;
if ((j & 4) == 0)
a ^= 0x40;
}
}
ecc2[i] = ~(a ^ (a<<1) ^ (parity[i] ? 0xa8 : 0));
}
}
/* compute 3-byte ecc on 256 bytes */
static void nand_compute_ecc(unsigned char *data, unsigned char *ecc) {
int i, j, a;
unsigned char par, bit, bits[8];
par = 0;
for (j = 0; j < 8; j++)
bits[j] = 0;
/* collect 16 checksum bits */
for (i = 0; i < 256; i++) {
par ^= data[i];
bit = parity[data[i]];
for (j = 0; j < 8; j++)
if ((i & (1<<j)) == 0)
bits[j] ^= bit;
}
/* put 4+4+4 = 12 bits in the ecc */
a = (bits[3] << 6) + (bits[2] << 4) + (bits[1] << 2) + bits[0];
ecc[0] = ~(a ^ (a<<1) ^ (parity[par] ? 0xaa : 0));
a = (bits[7] << 6) + (bits[6] << 4) + (bits[5] << 2) + bits[4];
ecc[1] = ~(a ^ (a<<1) ^ (parity[par] ? 0xaa : 0));
ecc[2] = ecc2[par];
}
static int nand_compare_ecc(unsigned char *data, unsigned char *ecc) {
return (data[0] == ecc[0] && data[1] == ecc[1] && data[2] == ecc[2]);
}
static void nand_store_ecc(unsigned char *data, unsigned char *ecc) {
memcpy(data, ecc, 3);
}
/*
* The actual driver starts here.
*/
struct sddr09_card_info {
unsigned long capacity; /* Size of card in bytes */
int pagesize; /* Size of page in bytes */
int pageshift; /* log2 of pagesize */
int blocksize; /* Size of block in pages */
int blockshift; /* log2 of blocksize */
int blockmask; /* 2^blockshift - 1 */
int *lba_to_pba; /* logical to physical map */
int *pba_to_lba; /* physical to logical map */
int lbact; /* number of available pages */
int flags;
#define SDDR09_WP 1 /* write protected */
};
/*
* On my 16MB card, control blocks have size 64 (16 real control bytes,
* and 48 junk bytes). In reality of course the card uses 16 control bytes,
* so the reader makes up the remaining 48. Don't know whether these numbers
* depend on the card. For now a constant.
*/
#define CONTROL_SHIFT 6
/*
* On my Combo CF/SM reader, the SM reader has LUN 1.
* (and things fail with LUN 0).
* It seems LUN is irrelevant for others.
*/
#define LUN 1
#define LUNBITS (LUN << 5)
/*
* LBA and PBA are unsigned ints. Special values.
*/
#define UNDEF 0xffffffff
#define SPARE 0xfffffffe
#define UNUSABLE 0xfffffffd
static const int erase_bad_lba_entries = 0;
/* send vendor interface command (0x41) */
/* called for requests 0, 1, 8 */
static int
sddr09_send_command(struct us_data *us,
unsigned char request,
unsigned char direction,
unsigned char *xfer_data,
unsigned int xfer_len) {
unsigned int pipe;
unsigned char requesttype = (0x41 | direction);
int rc;
// Get the receive or send control pipe number
if (direction == USB_DIR_IN)
pipe = us->recv_ctrl_pipe;
else
pipe = us->send_ctrl_pipe;
rc = usb_stor_ctrl_transfer(us, pipe, request, requesttype,
0, 0, xfer_data, xfer_len);
switch (rc) {
case USB_STOR_XFER_GOOD: return 0;
case USB_STOR_XFER_STALLED: return -EPIPE;
default: return -EIO;
}
}
static int
sddr09_send_scsi_command(struct us_data *us,
unsigned char *command,
unsigned int command_len) {
return sddr09_send_command(us, 0, USB_DIR_OUT, command, command_len);
}
#if 0
/*
* Test Unit Ready Command: 12 bytes.
* byte 0: opcode: 00
*/
static int
sddr09_test_unit_ready(struct us_data *us) {
unsigned char *command = us->iobuf;
int result;
memset(command, 0, 6);
command[1] = LUNBITS;
result = sddr09_send_scsi_command(us, command, 6);
US_DEBUGP("sddr09_test_unit_ready returns %d\n", result);
return result;
}
#endif
/*
* Request Sense Command: 12 bytes.
* byte 0: opcode: 03
* byte 4: data length
*/
static int
sddr09_request_sense(struct us_data *us, unsigned char *sensebuf, int buflen) {
unsigned char *command = us->iobuf;
int result;
memset(command, 0, 12);
command[0] = 0x03;
command[1] = LUNBITS;
command[4] = buflen;
result = sddr09_send_scsi_command(us, command, 12);
if (result)
return result;
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
sensebuf, buflen, NULL);
return (result == USB_STOR_XFER_GOOD ? 0 : -EIO);
}
/*
* Read Command: 12 bytes.
* byte 0: opcode: E8
* byte 1: last two bits: 00: read data, 01: read blockwise control,
* 10: read both, 11: read pagewise control.
* It turns out we need values 20, 21, 22, 23 here (LUN 1).
* bytes 2-5: address (interpretation depends on byte 1, see below)
* bytes 10-11: count (idem)
*
* A page has 512 data bytes and 64 control bytes (16 control and 48 junk).
* A read data command gets data in 512-byte pages.
* A read control command gets control in 64-byte chunks.
* A read both command gets data+control in 576-byte chunks.
*
* Blocks are groups of 32 pages, and read blockwise control jumps to the
* next block, while read pagewise control jumps to the next page after
* reading a group of 64 control bytes.
* [Here 512 = 1<<pageshift, 32 = 1<<blockshift, 64 is constant?]
*
* (1 MB and 2 MB cards are a bit different, but I have only a 16 MB card.)
*/
static int
sddr09_readX(struct us_data *us, int x, unsigned long fromaddress,
int nr_of_pages, int bulklen, unsigned char *buf,
int use_sg) {
unsigned char *command = us->iobuf;
int result;
command[0] = 0xE8;
command[1] = LUNBITS | x;
command[2] = MSB_of(fromaddress>>16);
command[3] = LSB_of(fromaddress>>16);
command[4] = MSB_of(fromaddress & 0xFFFF);
command[5] = LSB_of(fromaddress & 0xFFFF);
command[6] = 0;
command[7] = 0;
command[8] = 0;
command[9] = 0;
command[10] = MSB_of(nr_of_pages);
command[11] = LSB_of(nr_of_pages);
result = sddr09_send_scsi_command(us, command, 12);
if (result) {
US_DEBUGP("Result for send_control in sddr09_read2%d %d\n",
x, result);
return result;
}
result = usb_stor_bulk_transfer_sg(us, us->recv_bulk_pipe,
buf, bulklen, use_sg, NULL);
if (result != USB_STOR_XFER_GOOD) {
US_DEBUGP("Result for bulk_transfer in sddr09_read2%d %d\n",
x, result);
return -EIO;
}
return 0;
}
/*
* Read Data
*
* fromaddress counts data shorts:
* increasing it by 256 shifts the bytestream by 512 bytes;
* the last 8 bits are ignored.
*
* nr_of_pages counts pages of size (1 << pageshift).
*/
static int
sddr09_read20(struct us_data *us, unsigned long fromaddress,
int nr_of_pages, int pageshift, unsigned char *buf, int use_sg) {
int bulklen = nr_of_pages << pageshift;
/* The last 8 bits of fromaddress are ignored. */
return sddr09_readX(us, 0, fromaddress, nr_of_pages, bulklen,
buf, use_sg);
}
/*
* Read Blockwise Control
*
* fromaddress gives the starting position (as in read data;
* the last 8 bits are ignored); increasing it by 32*256 shifts
* the output stream by 64 bytes.
*
* count counts control groups of size (1 << controlshift).
* For me, controlshift = 6. Is this constant?
*
* After getting one control group, jump to the next block
* (fromaddress += 8192).
*/
static int
sddr09_read21(struct us_data *us, unsigned long fromaddress,
int count, int controlshift, unsigned char *buf, int use_sg) {
int bulklen = (count << controlshift);
return sddr09_readX(us, 1, fromaddress, count, bulklen,
buf, use_sg);
}
/*
* Read both Data and Control
*
* fromaddress counts data shorts, ignoring control:
* increasing it by 256 shifts the bytestream by 576 = 512+64 bytes;
* the last 8 bits are ignored.
*
* nr_of_pages counts pages of size (1 << pageshift) + (1 << controlshift).
*/
static int
sddr09_read22(struct us_data *us, unsigned long fromaddress,
int nr_of_pages, int pageshift, unsigned char *buf, int use_sg) {
int bulklen = (nr_of_pages << pageshift) + (nr_of_pages << CONTROL_SHIFT);
US_DEBUGP("sddr09_read22: reading %d pages, %d bytes\n",
nr_of_pages, bulklen);
return sddr09_readX(us, 2, fromaddress, nr_of_pages, bulklen,
buf, use_sg);
}
#if 0
/*
* Read Pagewise Control
*
* fromaddress gives the starting position (as in read data;
* the last 8 bits are ignored); increasing it by 256 shifts
* the output stream by 64 bytes.
*
* count counts control groups of size (1 << controlshift).
* For me, controlshift = 6. Is this constant?
*
* After getting one control group, jump to the next page
* (fromaddress += 256).
*/
static int
sddr09_read23(struct us_data *us, unsigned long fromaddress,
int count, int controlshift, unsigned char *buf, int use_sg) {
int bulklen = (count << controlshift);
return sddr09_readX(us, 3, fromaddress, count, bulklen,
buf, use_sg);
}
#endif
/*
* Erase Command: 12 bytes.
* byte 0: opcode: EA
* bytes 6-9: erase address (big-endian, counting shorts, sector aligned).
*
* Always precisely one block is erased; bytes 2-5 and 10-11 are ignored.
* The byte address being erased is 2*Eaddress.
* The CIS cannot be erased.
*/
static int
sddr09_erase(struct us_data *us, unsigned long Eaddress) {
unsigned char *command = us->iobuf;
int result;
US_DEBUGP("sddr09_erase: erase address %lu\n", Eaddress);
memset(command, 0, 12);
command[0] = 0xEA;
command[1] = LUNBITS;
command[6] = MSB_of(Eaddress>>16);
command[7] = LSB_of(Eaddress>>16);
command[8] = MSB_of(Eaddress & 0xFFFF);
command[9] = LSB_of(Eaddress & 0xFFFF);
result = sddr09_send_scsi_command(us, command, 12);
if (result)
US_DEBUGP("Result for send_control in sddr09_erase %d\n",
result);
return result;
}
/*
* Write CIS Command: 12 bytes.
* byte 0: opcode: EE
* bytes 2-5: write address in shorts
* bytes 10-11: sector count
*
* This writes at the indicated address. Don't know how it differs
* from E9. Maybe it does not erase? However, it will also write to
* the CIS.
*
* When two such commands on the same page follow each other directly,
* the second one is not done.
*/
/*
* Write Command: 12 bytes.
* byte 0: opcode: E9
* bytes 2-5: write address (big-endian, counting shorts, sector aligned).
* bytes 6-9: erase address (big-endian, counting shorts, sector aligned).
* bytes 10-11: sector count (big-endian, in 512-byte sectors).
*
* If write address equals erase address, the erase is done first,
* otherwise the write is done first. When erase address equals zero
* no erase is done?
*/
static int
sddr09_writeX(struct us_data *us,
unsigned long Waddress, unsigned long Eaddress,
int nr_of_pages, int bulklen, unsigned char *buf, int use_sg) {
unsigned char *command = us->iobuf;
int result;
command[0] = 0xE9;
command[1] = LUNBITS;
command[2] = MSB_of(Waddress>>16);
command[3] = LSB_of(Waddress>>16);
command[4] = MSB_of(Waddress & 0xFFFF);
command[5] = LSB_of(Waddress & 0xFFFF);
command[6] = MSB_of(Eaddress>>16);
command[7] = LSB_of(Eaddress>>16);
command[8] = MSB_of(Eaddress & 0xFFFF);
command[9] = LSB_of(Eaddress & 0xFFFF);
command[10] = MSB_of(nr_of_pages);
command[11] = LSB_of(nr_of_pages);
result = sddr09_send_scsi_command(us, command, 12);
if (result) {
US_DEBUGP("Result for send_control in sddr09_writeX %d\n",
result);
return result;
}
result = usb_stor_bulk_transfer_sg(us, us->send_bulk_pipe,
buf, bulklen, use_sg, NULL);
if (result != USB_STOR_XFER_GOOD) {
US_DEBUGP("Result for bulk_transfer in sddr09_writeX %d\n",
result);
return -EIO;
}
return 0;
}
/* erase address, write same address */
static int
sddr09_write_inplace(struct us_data *us, unsigned long address,
int nr_of_pages, int pageshift, unsigned char *buf,
int use_sg) {
int bulklen = (nr_of_pages << pageshift) + (nr_of_pages << CONTROL_SHIFT);
return sddr09_writeX(us, address, address, nr_of_pages, bulklen,
buf, use_sg);
}
#if 0
/*
* Read Scatter Gather Command: 3+4n bytes.
* byte 0: opcode E7
* byte 2: n
* bytes 4i-1,4i,4i+1: page address
* byte 4i+2: page count
* (i=1..n)
*
* This reads several pages from the card to a single memory buffer.
* The last two bits of byte 1 have the same meaning as for E8.
*/
static int
sddr09_read_sg_test_only(struct us_data *us) {
unsigned char *command = us->iobuf;
int result, bulklen, nsg, ct;
unsigned char *buf;
unsigned long address;
nsg = bulklen = 0;
command[0] = 0xE7;
command[1] = LUNBITS;
command[2] = 0;
address = 040000; ct = 1;
nsg++;
bulklen += (ct << 9);
command[4*nsg+2] = ct;
command[4*nsg+1] = ((address >> 9) & 0xFF);
command[4*nsg+0] = ((address >> 17) & 0xFF);
command[4*nsg-1] = ((address >> 25) & 0xFF);
address = 0340000; ct = 1;
nsg++;
bulklen += (ct << 9);
command[4*nsg+2] = ct;
command[4*nsg+1] = ((address >> 9) & 0xFF);
command[4*nsg+0] = ((address >> 17) & 0xFF);
command[4*nsg-1] = ((address >> 25) & 0xFF);
address = 01000000; ct = 2;
nsg++;
bulklen += (ct << 9);
command[4*nsg+2] = ct;
command[4*nsg+1] = ((address >> 9) & 0xFF);
command[4*nsg+0] = ((address >> 17) & 0xFF);
command[4*nsg-1] = ((address >> 25) & 0xFF);
command[2] = nsg;
result = sddr09_send_scsi_command(us, command, 4*nsg+3);
if (result) {
US_DEBUGP("Result for send_control in sddr09_read_sg %d\n",
result);
return result;
}
buf = kmalloc(bulklen, GFP_NOIO);
if (!buf)
return -ENOMEM;
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
buf, bulklen, NULL);
kfree(buf);
if (result != USB_STOR_XFER_GOOD) {
US_DEBUGP("Result for bulk_transfer in sddr09_read_sg %d\n",
result);
return -EIO;
}
return 0;
}
#endif
/*
* Read Status Command: 12 bytes.
* byte 0: opcode: EC
*
* Returns 64 bytes, all zero except for the first.
* bit 0: 1: Error
* bit 5: 1: Suspended
* bit 6: 1: Ready
* bit 7: 1: Not write-protected
*/
static int
sddr09_read_status(struct us_data *us, unsigned char *status) {
unsigned char *command = us->iobuf;
unsigned char *data = us->iobuf;
int result;
US_DEBUGP("Reading status...\n");
memset(command, 0, 12);
command[0] = 0xEC;
command[1] = LUNBITS;
result = sddr09_send_scsi_command(us, command, 12);
if (result)
return result;
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
data, 64, NULL);
*status = data[0];
return (result == USB_STOR_XFER_GOOD ? 0 : -EIO);
}
static int
sddr09_read_data(struct us_data *us,
unsigned long address,
unsigned int sectors) {
struct sddr09_card_info *info = (struct sddr09_card_info *) us->extra;
unsigned char *buffer;
unsigned int lba, maxlba, pba;
unsigned int page, pages;
unsigned int len, offset;
struct scatterlist *sg;
int result;
// Figure out the initial LBA and page
lba = address >> info->blockshift;
page = (address & info->blockmask);
maxlba = info->capacity >> (info->pageshift + info->blockshift);
if (lba >= maxlba)
return -EIO;
// Since we only read in one block at a time, we have to create
// a bounce buffer and move the data a piece at a time between the
// bounce buffer and the actual transfer buffer.
len = min(sectors, (unsigned int) info->blocksize) * info->pagesize;
buffer = kmalloc(len, GFP_NOIO);
if (buffer == NULL) {
printk(KERN_WARNING "sddr09_read_data: Out of memory\n");
return -ENOMEM;
}
// This could be made much more efficient by checking for
// contiguous LBA's. Another exercise left to the student.
result = 0;
offset = 0;
sg = NULL;
while (sectors > 0) {
/* Find number of pages we can read in this block */
pages = min(sectors, info->blocksize - page);
len = pages << info->pageshift;
/* Not overflowing capacity? */
if (lba >= maxlba) {
US_DEBUGP("Error: Requested lba %u exceeds "
"maximum %u\n", lba, maxlba);
result = -EIO;
break;
}
/* Find where this lba lives on disk */
pba = info->lba_to_pba[lba];
if (pba == UNDEF) { /* this lba was never written */
US_DEBUGP("Read %d zero pages (LBA %d) page %d\n",
pages, lba, page);
/* This is not really an error. It just means
that the block has never been written.
Instead of returning an error
it is better to return all zero data. */
memset(buffer, 0, len);
} else {
US_DEBUGP("Read %d pages, from PBA %d"
" (LBA %d) page %d\n",
pages, pba, lba, page);
address = ((pba << info->blockshift) + page) <<
info->pageshift;
result = sddr09_read20(us, address>>1,
pages, info->pageshift, buffer, 0);
if (result)
break;
}
// Store the data in the transfer buffer
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &offset, TO_XFER_BUF);
page = 0;
lba++;
sectors -= pages;
}
kfree(buffer);
return result;
}
static unsigned int
sddr09_find_unused_pba(struct sddr09_card_info *info, unsigned int lba) {
static unsigned int lastpba = 1;
int zonestart, end, i;
zonestart = (lba/1000) << 10;
end = info->capacity >> (info->blockshift + info->pageshift);
end -= zonestart;
if (end > 1024)
end = 1024;
for (i = lastpba+1; i < end; i++) {
if (info->pba_to_lba[zonestart+i] == UNDEF) {
lastpba = i;
return zonestart+i;
}
}
for (i = 0; i <= lastpba; i++) {
if (info->pba_to_lba[zonestart+i] == UNDEF) {
lastpba = i;
return zonestart+i;
}
}
return 0;
}
static int
sddr09_write_lba(struct us_data *us, unsigned int lba,
unsigned int page, unsigned int pages,
unsigned char *ptr, unsigned char *blockbuffer) {
struct sddr09_card_info *info = (struct sddr09_card_info *) us->extra;
unsigned long address;
unsigned int pba, lbap;
unsigned int pagelen;
unsigned char *bptr, *cptr, *xptr;
unsigned char ecc[3];
int i, result, isnew;
lbap = ((lba % 1000) << 1) | 0x1000;
if (parity[MSB_of(lbap) ^ LSB_of(lbap)])
lbap ^= 1;
pba = info->lba_to_pba[lba];
isnew = 0;
if (pba == UNDEF) {
pba = sddr09_find_unused_pba(info, lba);
if (!pba) {
printk(KERN_WARNING
"sddr09_write_lba: Out of unused blocks\n");
return -ENOSPC;
}
info->pba_to_lba[pba] = lba;
info->lba_to_pba[lba] = pba;
isnew = 1;
}
if (pba == 1) {
/* Maybe it is impossible to write to PBA 1.
Fake success, but don't do anything. */
printk(KERN_WARNING "sddr09: avoid writing to pba 1\n");
return 0;
}
pagelen = (1 << info->pageshift) + (1 << CONTROL_SHIFT);
/* read old contents */
address = (pba << (info->pageshift + info->blockshift));
result = sddr09_read22(us, address>>1, info->blocksize,
info->pageshift, blockbuffer, 0);
if (result)
return result;
/* check old contents and fill lba */
for (i = 0; i < info->blocksize; i++) {
bptr = blockbuffer + i*pagelen;
cptr = bptr + info->pagesize;
nand_compute_ecc(bptr, ecc);
if (!nand_compare_ecc(cptr+13, ecc)) {
US_DEBUGP("Warning: bad ecc in page %d- of pba %d\n",
i, pba);
nand_store_ecc(cptr+13, ecc);
}
nand_compute_ecc(bptr+(info->pagesize / 2), ecc);
if (!nand_compare_ecc(cptr+8, ecc)) {
US_DEBUGP("Warning: bad ecc in page %d+ of pba %d\n",
i, pba);
nand_store_ecc(cptr+8, ecc);
}
cptr[6] = cptr[11] = MSB_of(lbap);
cptr[7] = cptr[12] = LSB_of(lbap);
}
/* copy in new stuff and compute ECC */
xptr = ptr;
for (i = page; i < page+pages; i++) {
bptr = blockbuffer + i*pagelen;
cptr = bptr + info->pagesize;
memcpy(bptr, xptr, info->pagesize);
xptr += info->pagesize;
nand_compute_ecc(bptr, ecc);
nand_store_ecc(cptr+13, ecc);
nand_compute_ecc(bptr+(info->pagesize / 2), ecc);
nand_store_ecc(cptr+8, ecc);
}
US_DEBUGP("Rewrite PBA %d (LBA %d)\n", pba, lba);
result = sddr09_write_inplace(us, address>>1, info->blocksize,
info->pageshift, blockbuffer, 0);
US_DEBUGP("sddr09_write_inplace returns %d\n", result);
#if 0
{
unsigned char status = 0;
int result2 = sddr09_read_status(us, &status);
if (result2)
US_DEBUGP("sddr09_write_inplace: cannot read status\n");
else if (status != 0xc0)
US_DEBUGP("sddr09_write_inplace: status after write: 0x%x\n",
status);
}
#endif
#if 0
{
int result2 = sddr09_test_unit_ready(us);
}
#endif
return result;
}
static int
sddr09_write_data(struct us_data *us,
unsigned long address,
unsigned int sectors) {
struct sddr09_card_info *info = (struct sddr09_card_info *) us->extra;
unsigned int lba, maxlba, page, pages;
unsigned int pagelen, blocklen;
unsigned char *blockbuffer;
unsigned char *buffer;
unsigned int len, offset;
struct scatterlist *sg;
int result;
// Figure out the initial LBA and page
lba = address >> info->blockshift;
page = (address & info->blockmask);
maxlba = info->capacity >> (info->pageshift + info->blockshift);
if (lba >= maxlba)
return -EIO;
// blockbuffer is used for reading in the old data, overwriting
// with the new data, and performing ECC calculations
/* TODO: instead of doing kmalloc/kfree for each write,
add a bufferpointer to the info structure */
pagelen = (1 << info->pageshift) + (1 << CONTROL_SHIFT);
blocklen = (pagelen << info->blockshift);
blockbuffer = kmalloc(blocklen, GFP_NOIO);
if (!blockbuffer) {
printk(KERN_WARNING "sddr09_write_data: Out of memory\n");
return -ENOMEM;
}
// Since we don't write the user data directly to the device,
// we have to create a bounce buffer and move the data a piece
// at a time between the bounce buffer and the actual transfer buffer.
len = min(sectors, (unsigned int) info->blocksize) * info->pagesize;
buffer = kmalloc(len, GFP_NOIO);
if (buffer == NULL) {
printk(KERN_WARNING "sddr09_write_data: Out of memory\n");
kfree(blockbuffer);
return -ENOMEM;
}
result = 0;
offset = 0;
sg = NULL;
while (sectors > 0) {
// Write as many sectors as possible in this block
pages = min(sectors, info->blocksize - page);
len = (pages << info->pageshift);
/* Not overflowing capacity? */
if (lba >= maxlba) {
US_DEBUGP("Error: Requested lba %u exceeds "
"maximum %u\n", lba, maxlba);
result = -EIO;
break;
}
// Get the data from the transfer buffer
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &offset, FROM_XFER_BUF);
result = sddr09_write_lba(us, lba, page, pages,
buffer, blockbuffer);
if (result)
break;
page = 0;
lba++;
sectors -= pages;
}
kfree(buffer);
kfree(blockbuffer);
return result;
}
static int
sddr09_read_control(struct us_data *us,
unsigned long address,
unsigned int blocks,
unsigned char *content,
int use_sg) {
US_DEBUGP("Read control address %lu, blocks %d\n",
address, blocks);
return sddr09_read21(us, address, blocks,
CONTROL_SHIFT, content, use_sg);
}
/*
* Read Device ID Command: 12 bytes.
* byte 0: opcode: ED
*
* Returns 2 bytes: Manufacturer ID and Device ID.
* On more recent cards 3 bytes: the third byte is an option code A5
* signifying that the secret command to read an 128-bit ID is available.
* On still more recent cards 4 bytes: the fourth byte C0 means that
* a second read ID cmd is available.
*/
static int
sddr09_read_deviceID(struct us_data *us, unsigned char *deviceID) {
unsigned char *command = us->iobuf;
unsigned char *content = us->iobuf;
int result, i;
memset(command, 0, 12);
command[0] = 0xED;
command[1] = LUNBITS;
result = sddr09_send_scsi_command(us, command, 12);
if (result)
return result;
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
content, 64, NULL);
for (i = 0; i < 4; i++)
deviceID[i] = content[i];
return (result == USB_STOR_XFER_GOOD ? 0 : -EIO);
}
static int
sddr09_get_wp(struct us_data *us, struct sddr09_card_info *info) {
int result;
unsigned char status;
result = sddr09_read_status(us, &status);
if (result) {
US_DEBUGP("sddr09_get_wp: read_status fails\n");
return result;
}
US_DEBUGP("sddr09_get_wp: status 0x%02X", status);
if ((status & 0x80) == 0) {
info->flags |= SDDR09_WP; /* write protected */
US_DEBUGP(" WP");
}
if (status & 0x40)
US_DEBUGP(" Ready");
if (status & LUNBITS)
US_DEBUGP(" Suspended");
if (status & 0x1)
US_DEBUGP(" Error");
US_DEBUGP("\n");
return 0;
}
#if 0
/*
* Reset Command: 12 bytes.
* byte 0: opcode: EB
*/
static int
sddr09_reset(struct us_data *us) {
unsigned char *command = us->iobuf;
memset(command, 0, 12);
command[0] = 0xEB;
command[1] = LUNBITS;
return sddr09_send_scsi_command(us, command, 12);
}
#endif
static struct nand_flash_dev *
sddr09_get_cardinfo(struct us_data *us, unsigned char flags) {
struct nand_flash_dev *cardinfo;
unsigned char deviceID[4];
char blurbtxt[256];
int result;
US_DEBUGP("Reading capacity...\n");
result = sddr09_read_deviceID(us, deviceID);
if (result) {
US_DEBUGP("Result of read_deviceID is %d\n", result);
printk(KERN_WARNING "sddr09: could not read card info\n");
return NULL;
}
sprintf(blurbtxt, "sddr09: Found Flash card, ID = %02X %02X %02X %02X",
deviceID[0], deviceID[1], deviceID[2], deviceID[3]);
/* Byte 0 is the manufacturer */
sprintf(blurbtxt + strlen(blurbtxt),
": Manuf. %s",
nand_flash_manufacturer(deviceID[0]));
/* Byte 1 is the device type */
cardinfo = nand_find_id(deviceID[1]);
if (cardinfo) {
/* MB or MiB? It is neither. A 16 MB card has
17301504 raw bytes, of which 16384000 are
usable for user data. */
sprintf(blurbtxt + strlen(blurbtxt),
", %d MB", 1<<(cardinfo->chipshift - 20));
} else {
sprintf(blurbtxt + strlen(blurbtxt),
", type unrecognized");
}
/* Byte 2 is code to signal availability of 128-bit ID */
if (deviceID[2] == 0xa5) {
sprintf(blurbtxt + strlen(blurbtxt),
", 128-bit ID");
}
/* Byte 3 announces the availability of another read ID command */
if (deviceID[3] == 0xc0) {
sprintf(blurbtxt + strlen(blurbtxt),
", extra cmd");
}
if (flags & SDDR09_WP)
sprintf(blurbtxt + strlen(blurbtxt),
", WP");
printk(KERN_WARNING "%s\n", blurbtxt);
return cardinfo;
}
static int
sddr09_read_map(struct us_data *us) {
struct sddr09_card_info *info = (struct sddr09_card_info *) us->extra;
int numblocks, alloc_len, alloc_blocks;
int i, j, result;
unsigned char *buffer, *buffer_end, *ptr;
unsigned int lba, lbact;
if (!info->capacity)
return -1;
// size of a block is 1 << (blockshift + pageshift) bytes
// divide into the total capacity to get the number of blocks
numblocks = info->capacity >> (info->blockshift + info->pageshift);
// read 64 bytes for every block (actually 1 << CONTROL_SHIFT)
// but only use a 64 KB buffer
// buffer size used must be a multiple of (1 << CONTROL_SHIFT)
#define SDDR09_READ_MAP_BUFSZ 65536
alloc_blocks = min(numblocks, SDDR09_READ_MAP_BUFSZ >> CONTROL_SHIFT);
alloc_len = (alloc_blocks << CONTROL_SHIFT);
buffer = kmalloc(alloc_len, GFP_NOIO);
if (buffer == NULL) {
printk(KERN_WARNING "sddr09_read_map: out of memory\n");
result = -1;
goto done;
}
buffer_end = buffer + alloc_len;
#undef SDDR09_READ_MAP_BUFSZ
kfree(info->lba_to_pba);
kfree(info->pba_to_lba);
info->lba_to_pba = kmalloc(numblocks*sizeof(int), GFP_NOIO);
info->pba_to_lba = kmalloc(numblocks*sizeof(int), GFP_NOIO);
if (info->lba_to_pba == NULL || info->pba_to_lba == NULL) {
printk(KERN_WARNING "sddr09_read_map: out of memory\n");
result = -1;
goto done;
}
for (i = 0; i < numblocks; i++)
info->lba_to_pba[i] = info->pba_to_lba[i] = UNDEF;
/*
* Define lba-pba translation table
*/
ptr = buffer_end;
for (i = 0; i < numblocks; i++) {
ptr += (1 << CONTROL_SHIFT);
if (ptr >= buffer_end) {
unsigned long address;
address = i << (info->pageshift + info->blockshift);
result = sddr09_read_control(
us, address>>1,
min(alloc_blocks, numblocks - i),
buffer, 0);
if (result) {
result = -1;
goto done;
}
ptr = buffer;
}
if (i == 0 || i == 1) {
info->pba_to_lba[i] = UNUSABLE;
continue;
}
/* special PBAs have control field 0^16 */
for (j = 0; j < 16; j++)
if (ptr[j] != 0)
goto nonz;
info->pba_to_lba[i] = UNUSABLE;
printk(KERN_WARNING "sddr09: PBA %d has no logical mapping\n",
i);
continue;
nonz:
/* unwritten PBAs have control field FF^16 */
for (j = 0; j < 16; j++)
if (ptr[j] != 0xff)
goto nonff;
continue;
nonff:
/* normal PBAs start with six FFs */
if (j < 6) {
printk(KERN_WARNING
"sddr09: PBA %d has no logical mapping: "
"reserved area = %02X%02X%02X%02X "
"data status %02X block status %02X\n",
i, ptr[0], ptr[1], ptr[2], ptr[3],
ptr[4], ptr[5]);
info->pba_to_lba[i] = UNUSABLE;
continue;
}
if ((ptr[6] >> 4) != 0x01) {
printk(KERN_WARNING
"sddr09: PBA %d has invalid address field "
"%02X%02X/%02X%02X\n",
i, ptr[6], ptr[7], ptr[11], ptr[12]);
info->pba_to_lba[i] = UNUSABLE;
continue;
}
/* check even parity */
if (parity[ptr[6] ^ ptr[7]]) {
printk(KERN_WARNING
"sddr09: Bad parity in LBA for block %d"
" (%02X %02X)\n", i, ptr[6], ptr[7]);
info->pba_to_lba[i] = UNUSABLE;
continue;
}
lba = short_pack(ptr[7], ptr[6]);
lba = (lba & 0x07FF) >> 1;
/*
* Every 1024 physical blocks ("zone"), the LBA numbers
* go back to zero, but are within a higher block of LBA's.
* Also, there is a maximum of 1000 LBA's per zone.
* In other words, in PBA 1024-2047 you will find LBA 0-999
* which are really LBA 1000-1999. This allows for 24 bad
* or special physical blocks per zone.
*/
if (lba >= 1000) {
printk(KERN_WARNING
"sddr09: Bad low LBA %d for block %d\n",
lba, i);
goto possibly_erase;
}
lba += 1000*(i/0x400);
if (info->lba_to_pba[lba] != UNDEF) {
printk(KERN_WARNING
"sddr09: LBA %d seen for PBA %d and %d\n",
lba, info->lba_to_pba[lba], i);
goto possibly_erase;
}
info->pba_to_lba[i] = lba;
info->lba_to_pba[lba] = i;
continue;
possibly_erase:
if (erase_bad_lba_entries) {
unsigned long address;
address = (i << (info->pageshift + info->blockshift));
sddr09_erase(us, address>>1);
info->pba_to_lba[i] = UNDEF;
} else
info->pba_to_lba[i] = UNUSABLE;
}
/*
* Approximate capacity. This is not entirely correct yet,
* since a zone with less than 1000 usable pages leads to
* missing LBAs. Especially if it is the last zone, some
* LBAs can be past capacity.
*/
lbact = 0;
for (i = 0; i < numblocks; i += 1024) {
int ct = 0;
for (j = 0; j < 1024 && i+j < numblocks; j++) {
if (info->pba_to_lba[i+j] != UNUSABLE) {
if (ct >= 1000)
info->pba_to_lba[i+j] = SPARE;
else
ct++;
}
}
lbact += ct;
}
info->lbact = lbact;
US_DEBUGP("Found %d LBA's\n", lbact);
result = 0;
done:
if (result != 0) {
kfree(info->lba_to_pba);
kfree(info->pba_to_lba);
info->lba_to_pba = NULL;
info->pba_to_lba = NULL;
}
kfree(buffer);
return result;
}
static void
sddr09_card_info_destructor(void *extra) {
struct sddr09_card_info *info = (struct sddr09_card_info *)extra;
if (!info)
return;
kfree(info->lba_to_pba);
kfree(info->pba_to_lba);
}
static int
sddr09_common_init(struct us_data *us) {
int result;
/* set the configuration -- STALL is an acceptable response here */
if (us->pusb_dev->actconfig->desc.bConfigurationValue != 1) {
US_DEBUGP("active config #%d != 1 ??\n", us->pusb_dev
->actconfig->desc.bConfigurationValue);
return -EINVAL;
}
result = usb_reset_configuration(us->pusb_dev);
US_DEBUGP("Result of usb_reset_configuration is %d\n", result);
if (result == -EPIPE) {
US_DEBUGP("-- stall on control interface\n");
} else if (result != 0) {
/* it's not a stall, but another error -- time to bail */
US_DEBUGP("-- Unknown error. Rejecting device\n");
return -EINVAL;
}
us->extra = kzalloc(sizeof(struct sddr09_card_info), GFP_NOIO);
if (!us->extra)
return -ENOMEM;
us->extra_destructor = sddr09_card_info_destructor;
nand_init_ecc();
return 0;
}
/*
* This is needed at a very early stage. If this is not listed in the
* unusual devices list but called from here then LUN 0 of the combo reader
* is not recognized. But I do not know what precisely these calls do.
*/
static int
usb_stor_sddr09_dpcm_init(struct us_data *us) {
int result;
unsigned char *data = us->iobuf;
result = sddr09_common_init(us);
if (result)
return result;
result = sddr09_send_command(us, 0x01, USB_DIR_IN, data, 2);
if (result) {
US_DEBUGP("sddr09_init: send_command fails\n");
return result;
}
US_DEBUGP("SDDR09init: %02X %02X\n", data[0], data[1]);
// get 07 02
result = sddr09_send_command(us, 0x08, USB_DIR_IN, data, 2);
if (result) {
US_DEBUGP("sddr09_init: 2nd send_command fails\n");
return result;
}
US_DEBUGP("SDDR09init: %02X %02X\n", data[0], data[1]);
// get 07 00
result = sddr09_request_sense(us, data, 18);
if (result == 0 && data[2] != 0) {
int j;
for (j=0; j<18; j++)
printk(" %02X", data[j]);
printk("\n");
// get 70 00 00 00 00 00 00 * 00 00 00 00 00 00
// 70: current command
// sense key 0, sense code 0, extd sense code 0
// additional transfer length * = sizeof(data) - 7
// Or: 70 00 06 00 00 00 00 0b 00 00 00 00 28 00 00 00 00 00
// sense key 06, sense code 28: unit attention,
// not ready to ready transition
}
// test unit ready
return 0; /* not result */
}
/*
* Transport for the Microtech DPCM-USB
*/
static int dpcm_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int ret;
US_DEBUGP("dpcm_transport: LUN=%d\n", srb->device->lun);
switch (srb->device->lun) {
case 0:
/*
* LUN 0 corresponds to the CompactFlash card reader.
*/
ret = usb_stor_CB_transport(srb, us);
break;
case 1:
/*
* LUN 1 corresponds to the SmartMedia card reader.
*/
/*
* Set the LUN to 0 (just in case).
*/
srb->device->lun = 0;
ret = sddr09_transport(srb, us);
srb->device->lun = 1;
break;
default:
US_DEBUGP("dpcm_transport: Invalid LUN %d\n",
srb->device->lun);
ret = USB_STOR_TRANSPORT_ERROR;
break;
}
return ret;
}
/*
* Transport for the Sandisk SDDR-09
*/
static int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us)
{
static unsigned char sensekey = 0, sensecode = 0;
static unsigned char havefakesense = 0;
int result, i;
unsigned char *ptr = us->iobuf;
unsigned long capacity;
unsigned int page, pages;
struct sddr09_card_info *info;
static unsigned char inquiry_response[8] = {
0x00, 0x80, 0x00, 0x02, 0x1F, 0x00, 0x00, 0x00
};
/* note: no block descriptor support */
static unsigned char mode_page_01[19] = {
0x00, 0x0F, 0x00, 0x0, 0x0, 0x0, 0x00,
0x01, 0x0A,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
info = (struct sddr09_card_info *)us->extra;
if (srb->cmnd[0] == REQUEST_SENSE && havefakesense) {
/* for a faked command, we have to follow with a faked sense */
memset(ptr, 0, 18);
ptr[0] = 0x70;
ptr[2] = sensekey;
ptr[7] = 11;
ptr[12] = sensecode;
usb_stor_set_xfer_buf(ptr, 18, srb);
sensekey = sensecode = havefakesense = 0;
return USB_STOR_TRANSPORT_GOOD;
}
havefakesense = 1;
/* Dummy up a response for INQUIRY since SDDR09 doesn't
respond to INQUIRY commands */
if (srb->cmnd[0] == INQUIRY) {
memcpy(ptr, inquiry_response, 8);
fill_inquiry_response(us, ptr, 36);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == READ_CAPACITY) {
struct nand_flash_dev *cardinfo;
sddr09_get_wp(us, info); /* read WP bit */
cardinfo = sddr09_get_cardinfo(us, info->flags);
if (!cardinfo) {
/* probably no media */
init_error:
sensekey = 0x02; /* not ready */
sensecode = 0x3a; /* medium not present */
return USB_STOR_TRANSPORT_FAILED;
}
info->capacity = (1 << cardinfo->chipshift);
info->pageshift = cardinfo->pageshift;
info->pagesize = (1 << info->pageshift);
info->blockshift = cardinfo->blockshift;
info->blocksize = (1 << info->blockshift);
info->blockmask = info->blocksize - 1;
// map initialization, must follow get_cardinfo()
if (sddr09_read_map(us)) {
/* probably out of memory */
goto init_error;
}
// Report capacity
capacity = (info->lbact << info->blockshift) - 1;
((__be32 *) ptr)[0] = cpu_to_be32(capacity);
// Report page size
((__be32 *) ptr)[1] = cpu_to_be32(info->pagesize);
usb_stor_set_xfer_buf(ptr, 8, srb);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == MODE_SENSE_10) {
int modepage = (srb->cmnd[2] & 0x3F);
/* They ask for the Read/Write error recovery page,
or for all pages. */
/* %% We should check DBD %% */
if (modepage == 0x01 || modepage == 0x3F) {
US_DEBUGP("SDDR09: Dummy up request for "
"mode page 0x%x\n", modepage);
memcpy(ptr, mode_page_01, sizeof(mode_page_01));
((__be16*)ptr)[0] = cpu_to_be16(sizeof(mode_page_01) - 2);
ptr[3] = (info->flags & SDDR09_WP) ? 0x80 : 0;
usb_stor_set_xfer_buf(ptr, sizeof(mode_page_01), srb);
return USB_STOR_TRANSPORT_GOOD;
}
sensekey = 0x05; /* illegal request */
sensecode = 0x24; /* invalid field in CDB */
return USB_STOR_TRANSPORT_FAILED;
}
if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL)
return USB_STOR_TRANSPORT_GOOD;
havefakesense = 0;
if (srb->cmnd[0] == READ_10) {
page = short_pack(srb->cmnd[3], srb->cmnd[2]);
page <<= 16;
page |= short_pack(srb->cmnd[5], srb->cmnd[4]);
pages = short_pack(srb->cmnd[8], srb->cmnd[7]);
US_DEBUGP("READ_10: read page %d pagect %d\n",
page, pages);
result = sddr09_read_data(us, page, pages);
return (result == 0 ? USB_STOR_TRANSPORT_GOOD :
USB_STOR_TRANSPORT_ERROR);
}
if (srb->cmnd[0] == WRITE_10) {
page = short_pack(srb->cmnd[3], srb->cmnd[2]);
page <<= 16;
page |= short_pack(srb->cmnd[5], srb->cmnd[4]);
pages = short_pack(srb->cmnd[8], srb->cmnd[7]);
US_DEBUGP("WRITE_10: write page %d pagect %d\n",
page, pages);
result = sddr09_write_data(us, page, pages);
return (result == 0 ? USB_STOR_TRANSPORT_GOOD :
USB_STOR_TRANSPORT_ERROR);
}
/* catch-all for all other commands, except
* pass TEST_UNIT_READY and REQUEST_SENSE through
*/
if (srb->cmnd[0] != TEST_UNIT_READY &&
srb->cmnd[0] != REQUEST_SENSE) {
sensekey = 0x05; /* illegal request */
sensecode = 0x20; /* invalid command */
havefakesense = 1;
return USB_STOR_TRANSPORT_FAILED;
}
for (; srb->cmd_len<12; srb->cmd_len++)
srb->cmnd[srb->cmd_len] = 0;
srb->cmnd[1] = LUNBITS;
ptr[0] = 0;
for (i=0; i<12; i++)
sprintf(ptr+strlen(ptr), "%02X ", srb->cmnd[i]);
US_DEBUGP("SDDR09: Send control for command %s\n", ptr);
result = sddr09_send_scsi_command(us, srb->cmnd, 12);
if (result) {
US_DEBUGP("sddr09_transport: sddr09_send_scsi_command "
"returns %d\n", result);
return USB_STOR_TRANSPORT_ERROR;
}
if (scsi_bufflen(srb) == 0)
return USB_STOR_TRANSPORT_GOOD;
if (srb->sc_data_direction == DMA_TO_DEVICE ||
srb->sc_data_direction == DMA_FROM_DEVICE) {
unsigned int pipe = (srb->sc_data_direction == DMA_TO_DEVICE)
? us->send_bulk_pipe : us->recv_bulk_pipe;
US_DEBUGP("SDDR09: %s %d bytes\n",
(srb->sc_data_direction == DMA_TO_DEVICE) ?
"sending" : "receiving",
scsi_bufflen(srb));
result = usb_stor_bulk_srb(us, pipe, srb);
return (result == USB_STOR_XFER_GOOD ?
USB_STOR_TRANSPORT_GOOD : USB_STOR_TRANSPORT_ERROR);
}
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Initialization routine for the sddr09 subdriver
*/
static int
usb_stor_sddr09_init(struct us_data *us) {
return sddr09_common_init(us);
}
static int sddr09_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - sddr09_usb_ids) + sddr09_unusual_dev_list);
if (result)
return result;
if (us->protocol == USB_PR_DPCM_USB) {
us->transport_name = "Control/Bulk-EUSB/SDDR09";
us->transport = dpcm_transport;
us->transport_reset = usb_stor_CB_reset;
us->max_lun = 1;
} else {
us->transport_name = "EUSB/SDDR09";
us->transport = sddr09_transport;
us->transport_reset = usb_stor_CB_reset;
us->max_lun = 0;
}
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver sddr09_driver = {
.name = "ums-sddr09",
.probe = sddr09_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = sddr09_usb_ids,
.soft_unbind = 1,
};
static int __init sddr09_init(void)
{
return usb_register(&sddr09_driver);
}
static void __exit sddr09_exit(void)
{
usb_deregister(&sddr09_driver);
}
module_init(sddr09_init);
module_exit(sddr09_exit);
| gpl-2.0 |
TeamJB/kernel_samsung_smdk4210 | drivers/input/misc/powermate.c | 3245 | 14995 | /*
* A driver for the Griffin Technology, Inc. "PowerMate" USB controller dial.
*
* v1.1, (c)2002 William R Sowerbutts <will@sowerbutts.com>
*
* This device is a anodised aluminium knob which connects over USB. It can measure
* clockwise and anticlockwise rotation. The dial also acts as a pushbutton with
* a spring for automatic release. The base contains a pair of LEDs which illuminate
* the translucent base. It rotates without limit and reports its relative rotation
* back to the host when polled by the USB controller.
*
* Testing with the knob I have has shown that it measures approximately 94 "clicks"
* for one full rotation. Testing with my High Speed Rotation Actuator (ok, it was
* a variable speed cordless electric drill) has shown that the device can measure
* speeds of up to 7 clicks either clockwise or anticlockwise between pollings from
* the host. If it counts more than 7 clicks before it is polled, it will wrap back
* to zero and start counting again. This was at quite high speed, however, almost
* certainly faster than the human hand could turn it. Griffin say that it loses a
* pulse or two on a direction change; the granularity is so fine that I never
* noticed this in practice.
*
* The device's microcontroller can be programmed to set the LED to either a constant
* intensity, or to a rhythmic pulsing. Several patterns and speeds are available.
*
* Griffin were very happy to provide documentation and free hardware for development.
*
* Some userspace tools are available on the web: http://sowerbutts.com/powermate/
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/usb/input.h>
#define POWERMATE_VENDOR 0x077d /* Griffin Technology, Inc. */
#define POWERMATE_PRODUCT_NEW 0x0410 /* Griffin PowerMate */
#define POWERMATE_PRODUCT_OLD 0x04AA /* Griffin soundKnob */
#define CONTOUR_VENDOR 0x05f3 /* Contour Design, Inc. */
#define CONTOUR_JOG 0x0240 /* Jog and Shuttle */
/* these are the command codes we send to the device */
#define SET_STATIC_BRIGHTNESS 0x01
#define SET_PULSE_ASLEEP 0x02
#define SET_PULSE_AWAKE 0x03
#define SET_PULSE_MODE 0x04
/* these refer to bits in the powermate_device's requires_update field. */
#define UPDATE_STATIC_BRIGHTNESS (1<<0)
#define UPDATE_PULSE_ASLEEP (1<<1)
#define UPDATE_PULSE_AWAKE (1<<2)
#define UPDATE_PULSE_MODE (1<<3)
/* at least two versions of the hardware exist, with differing payload
sizes. the first three bytes always contain the "interesting" data in
the relevant format. */
#define POWERMATE_PAYLOAD_SIZE_MAX 6
#define POWERMATE_PAYLOAD_SIZE_MIN 3
struct powermate_device {
signed char *data;
dma_addr_t data_dma;
struct urb *irq, *config;
struct usb_ctrlrequest *configcr;
struct usb_device *udev;
struct input_dev *input;
spinlock_t lock;
int static_brightness;
int pulse_speed;
int pulse_table;
int pulse_asleep;
int pulse_awake;
int requires_update; // physical settings which are out of sync
char phys[64];
};
static char pm_name_powermate[] = "Griffin PowerMate";
static char pm_name_soundknob[] = "Griffin SoundKnob";
static void powermate_config_complete(struct urb *urb);
/* Callback for data arriving from the PowerMate over the USB interrupt pipe */
static void powermate_irq(struct urb *urb)
{
struct powermate_device *pm = urb->context;
int retval;
switch (urb->status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d", __func__, urb->status);
return;
default:
dbg("%s - nonzero urb status received: %d", __func__, urb->status);
goto exit;
}
/* handle updates to device state */
input_report_key(pm->input, BTN_0, pm->data[0] & 0x01);
input_report_rel(pm->input, REL_DIAL, pm->data[1]);
input_sync(pm->input);
exit:
retval = usb_submit_urb (urb, GFP_ATOMIC);
if (retval)
err ("%s - usb_submit_urb failed with result %d",
__func__, retval);
}
/* Decide if we need to issue a control message and do so. Must be called with pm->lock taken */
static void powermate_sync_state(struct powermate_device *pm)
{
if (pm->requires_update == 0)
return; /* no updates are required */
if (pm->config->status == -EINPROGRESS)
return; /* an update is already in progress; it'll issue this update when it completes */
if (pm->requires_update & UPDATE_PULSE_ASLEEP){
pm->configcr->wValue = cpu_to_le16( SET_PULSE_ASLEEP );
pm->configcr->wIndex = cpu_to_le16( pm->pulse_asleep ? 1 : 0 );
pm->requires_update &= ~UPDATE_PULSE_ASLEEP;
}else if (pm->requires_update & UPDATE_PULSE_AWAKE){
pm->configcr->wValue = cpu_to_le16( SET_PULSE_AWAKE );
pm->configcr->wIndex = cpu_to_le16( pm->pulse_awake ? 1 : 0 );
pm->requires_update &= ~UPDATE_PULSE_AWAKE;
}else if (pm->requires_update & UPDATE_PULSE_MODE){
int op, arg;
/* the powermate takes an operation and an argument for its pulse algorithm.
the operation can be:
0: divide the speed
1: pulse at normal speed
2: multiply the speed
the argument only has an effect for operations 0 and 2, and ranges between
1 (least effect) to 255 (maximum effect).
thus, several states are equivalent and are coalesced into one state.
we map this onto a range from 0 to 510, with:
0 -- 254 -- use divide (0 = slowest)
255 -- use normal speed
256 -- 510 -- use multiple (510 = fastest).
Only values of 'arg' quite close to 255 are particularly useful/spectacular.
*/
if (pm->pulse_speed < 255) {
op = 0; // divide
arg = 255 - pm->pulse_speed;
} else if (pm->pulse_speed > 255) {
op = 2; // multiply
arg = pm->pulse_speed - 255;
} else {
op = 1; // normal speed
arg = 0; // can be any value
}
pm->configcr->wValue = cpu_to_le16( (pm->pulse_table << 8) | SET_PULSE_MODE );
pm->configcr->wIndex = cpu_to_le16( (arg << 8) | op );
pm->requires_update &= ~UPDATE_PULSE_MODE;
} else if (pm->requires_update & UPDATE_STATIC_BRIGHTNESS) {
pm->configcr->wValue = cpu_to_le16( SET_STATIC_BRIGHTNESS );
pm->configcr->wIndex = cpu_to_le16( pm->static_brightness );
pm->requires_update &= ~UPDATE_STATIC_BRIGHTNESS;
} else {
printk(KERN_ERR "powermate: unknown update required");
pm->requires_update = 0; /* fudge the bug */
return;
}
/* printk("powermate: %04x %04x\n", pm->configcr->wValue, pm->configcr->wIndex); */
pm->configcr->bRequestType = 0x41; /* vendor request */
pm->configcr->bRequest = 0x01;
pm->configcr->wLength = 0;
usb_fill_control_urb(pm->config, pm->udev, usb_sndctrlpipe(pm->udev, 0),
(void *) pm->configcr, NULL, 0,
powermate_config_complete, pm);
if (usb_submit_urb(pm->config, GFP_ATOMIC))
printk(KERN_ERR "powermate: usb_submit_urb(config) failed");
}
/* Called when our asynchronous control message completes. We may need to issue another immediately */
static void powermate_config_complete(struct urb *urb)
{
struct powermate_device *pm = urb->context;
unsigned long flags;
if (urb->status)
printk(KERN_ERR "powermate: config urb returned %d\n", urb->status);
spin_lock_irqsave(&pm->lock, flags);
powermate_sync_state(pm);
spin_unlock_irqrestore(&pm->lock, flags);
}
/* Set the LED up as described and begin the sync with the hardware if required */
static void powermate_pulse_led(struct powermate_device *pm, int static_brightness, int pulse_speed,
int pulse_table, int pulse_asleep, int pulse_awake)
{
unsigned long flags;
if (pulse_speed < 0)
pulse_speed = 0;
if (pulse_table < 0)
pulse_table = 0;
if (pulse_speed > 510)
pulse_speed = 510;
if (pulse_table > 2)
pulse_table = 2;
pulse_asleep = !!pulse_asleep;
pulse_awake = !!pulse_awake;
spin_lock_irqsave(&pm->lock, flags);
/* mark state updates which are required */
if (static_brightness != pm->static_brightness) {
pm->static_brightness = static_brightness;
pm->requires_update |= UPDATE_STATIC_BRIGHTNESS;
}
if (pulse_asleep != pm->pulse_asleep) {
pm->pulse_asleep = pulse_asleep;
pm->requires_update |= (UPDATE_PULSE_ASLEEP | UPDATE_STATIC_BRIGHTNESS);
}
if (pulse_awake != pm->pulse_awake) {
pm->pulse_awake = pulse_awake;
pm->requires_update |= (UPDATE_PULSE_AWAKE | UPDATE_STATIC_BRIGHTNESS);
}
if (pulse_speed != pm->pulse_speed || pulse_table != pm->pulse_table) {
pm->pulse_speed = pulse_speed;
pm->pulse_table = pulse_table;
pm->requires_update |= UPDATE_PULSE_MODE;
}
powermate_sync_state(pm);
spin_unlock_irqrestore(&pm->lock, flags);
}
/* Callback from the Input layer when an event arrives from userspace to configure the LED */
static int powermate_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int _value)
{
unsigned int command = (unsigned int)_value;
struct powermate_device *pm = input_get_drvdata(dev);
if (type == EV_MSC && code == MSC_PULSELED){
/*
bits 0- 7: 8 bits: LED brightness
bits 8-16: 9 bits: pulsing speed modifier (0 ... 510); 0-254 = slower, 255 = standard, 256-510 = faster.
bits 17-18: 2 bits: pulse table (0, 1, 2 valid)
bit 19: 1 bit : pulse whilst asleep?
bit 20: 1 bit : pulse constantly?
*/
int static_brightness = command & 0xFF; // bits 0-7
int pulse_speed = (command >> 8) & 0x1FF; // bits 8-16
int pulse_table = (command >> 17) & 0x3; // bits 17-18
int pulse_asleep = (command >> 19) & 0x1; // bit 19
int pulse_awake = (command >> 20) & 0x1; // bit 20
powermate_pulse_led(pm, static_brightness, pulse_speed, pulse_table, pulse_asleep, pulse_awake);
}
return 0;
}
static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_device *pm)
{
pm->data = usb_alloc_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX,
GFP_ATOMIC, &pm->data_dma);
if (!pm->data)
return -1;
pm->configcr = kmalloc(sizeof(*(pm->configcr)), GFP_KERNEL);
if (!pm->configcr)
return -ENOMEM;
return 0;
}
static void powermate_free_buffers(struct usb_device *udev, struct powermate_device *pm)
{
usb_free_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX,
pm->data, pm->data_dma);
kfree(pm->configcr);
}
/* Called whenever a USB device matching one in our supported devices table is connected */
static int powermate_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev (intf);
struct usb_host_interface *interface;
struct usb_endpoint_descriptor *endpoint;
struct powermate_device *pm;
struct input_dev *input_dev;
int pipe, maxp;
int error = -ENOMEM;
interface = intf->cur_altsetting;
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -EIO;
usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x0a, USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, interface->desc.bInterfaceNumber, NULL, 0,
USB_CTRL_SET_TIMEOUT);
pm = kzalloc(sizeof(struct powermate_device), GFP_KERNEL);
input_dev = input_allocate_device();
if (!pm || !input_dev)
goto fail1;
if (powermate_alloc_buffers(udev, pm))
goto fail2;
pm->irq = usb_alloc_urb(0, GFP_KERNEL);
if (!pm->irq)
goto fail2;
pm->config = usb_alloc_urb(0, GFP_KERNEL);
if (!pm->config)
goto fail3;
pm->udev = udev;
pm->input = input_dev;
usb_make_path(udev, pm->phys, sizeof(pm->phys));
strlcat(pm->phys, "/input0", sizeof(pm->phys));
spin_lock_init(&pm->lock);
switch (le16_to_cpu(udev->descriptor.idProduct)) {
case POWERMATE_PRODUCT_NEW:
input_dev->name = pm_name_powermate;
break;
case POWERMATE_PRODUCT_OLD:
input_dev->name = pm_name_soundknob;
break;
default:
input_dev->name = pm_name_soundknob;
printk(KERN_WARNING "powermate: unknown product id %04x\n",
le16_to_cpu(udev->descriptor.idProduct));
}
input_dev->phys = pm->phys;
usb_to_input_id(udev, &input_dev->id);
input_dev->dev.parent = &intf->dev;
input_set_drvdata(input_dev, pm);
input_dev->event = powermate_input_event;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) |
BIT_MASK(EV_MSC);
input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0);
input_dev->relbit[BIT_WORD(REL_DIAL)] = BIT_MASK(REL_DIAL);
input_dev->mscbit[BIT_WORD(MSC_PULSELED)] = BIT_MASK(MSC_PULSELED);
/* get a handle to the interrupt data pipe */
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
if (maxp < POWERMATE_PAYLOAD_SIZE_MIN || maxp > POWERMATE_PAYLOAD_SIZE_MAX) {
printk(KERN_WARNING "powermate: Expected payload of %d--%d bytes, found %d bytes!\n",
POWERMATE_PAYLOAD_SIZE_MIN, POWERMATE_PAYLOAD_SIZE_MAX, maxp);
maxp = POWERMATE_PAYLOAD_SIZE_MAX;
}
usb_fill_int_urb(pm->irq, udev, pipe, pm->data,
maxp, powermate_irq,
pm, endpoint->bInterval);
pm->irq->transfer_dma = pm->data_dma;
pm->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* register our interrupt URB with the USB system */
if (usb_submit_urb(pm->irq, GFP_KERNEL)) {
error = -EIO;
goto fail4;
}
error = input_register_device(pm->input);
if (error)
goto fail5;
/* force an update of everything */
pm->requires_update = UPDATE_PULSE_ASLEEP | UPDATE_PULSE_AWAKE | UPDATE_PULSE_MODE | UPDATE_STATIC_BRIGHTNESS;
powermate_pulse_led(pm, 0x80, 255, 0, 1, 0); // set default pulse parameters
usb_set_intfdata(intf, pm);
return 0;
fail5: usb_kill_urb(pm->irq);
fail4: usb_free_urb(pm->config);
fail3: usb_free_urb(pm->irq);
fail2: powermate_free_buffers(udev, pm);
fail1: input_free_device(input_dev);
kfree(pm);
return error;
}
/* Called when a USB device we've accepted ownership of is removed */
static void powermate_disconnect(struct usb_interface *intf)
{
struct powermate_device *pm = usb_get_intfdata (intf);
usb_set_intfdata(intf, NULL);
if (pm) {
pm->requires_update = 0;
usb_kill_urb(pm->irq);
input_unregister_device(pm->input);
usb_free_urb(pm->irq);
usb_free_urb(pm->config);
powermate_free_buffers(interface_to_usbdev(intf), pm);
kfree(pm);
}
}
static struct usb_device_id powermate_devices [] = {
{ USB_DEVICE(POWERMATE_VENDOR, POWERMATE_PRODUCT_NEW) },
{ USB_DEVICE(POWERMATE_VENDOR, POWERMATE_PRODUCT_OLD) },
{ USB_DEVICE(CONTOUR_VENDOR, CONTOUR_JOG) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, powermate_devices);
static struct usb_driver powermate_driver = {
.name = "powermate",
.probe = powermate_probe,
.disconnect = powermate_disconnect,
.id_table = powermate_devices,
};
static int __init powermate_init(void)
{
return usb_register(&powermate_driver);
}
static void __exit powermate_cleanup(void)
{
usb_deregister(&powermate_driver);
}
module_init(powermate_init);
module_exit(powermate_cleanup);
MODULE_AUTHOR( "William R Sowerbutts" );
MODULE_DESCRIPTION( "Griffin Technology, Inc PowerMate driver" );
MODULE_LICENSE("GPL");
| gpl-2.0 |
blade-af3/android_kernel_zte_P731A20 | net/tipc/addr.c | 3501 | 3315 | /*
* net/tipc/addr.c: TIPC address utility routines
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2004-2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "addr.h"
/**
* tipc_addr_domain_valid - validates a network domain address
*
* Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>,
* where Z, C, and N are non-zero.
*
* Returns 1 if domain address is valid, otherwise 0
*/
int tipc_addr_domain_valid(u32 addr)
{
u32 n = tipc_node(addr);
u32 c = tipc_cluster(addr);
u32 z = tipc_zone(addr);
if (n && (!z || !c))
return 0;
if (c && !z)
return 0;
return 1;
}
/**
* tipc_addr_node_valid - validates a proposed network address for this node
*
* Accepts <Z.C.N>, where Z, C, and N are non-zero.
*
* Returns 1 if address can be used, otherwise 0
*/
int tipc_addr_node_valid(u32 addr)
{
return tipc_addr_domain_valid(addr) && tipc_node(addr);
}
int tipc_in_scope(u32 domain, u32 addr)
{
if (!domain || (domain == addr))
return 1;
if (domain == tipc_cluster_mask(addr)) /* domain <Z.C.0> */
return 1;
if (domain == tipc_zone_mask(addr)) /* domain <Z.0.0> */
return 1;
return 0;
}
/**
* tipc_addr_scope - convert message lookup domain to a 2-bit scope value
*/
int tipc_addr_scope(u32 domain)
{
if (likely(!domain))
return TIPC_ZONE_SCOPE;
if (tipc_node(domain))
return TIPC_NODE_SCOPE;
if (tipc_cluster(domain))
return TIPC_CLUSTER_SCOPE;
return TIPC_ZONE_SCOPE;
}
char *tipc_addr_string_fill(char *string, u32 addr)
{
snprintf(string, 16, "<%u.%u.%u>",
tipc_zone(addr), tipc_cluster(addr), tipc_node(addr));
return string;
}
| gpl-2.0 |
StephenRJ/cm12_kernel_moto_shamu | fs/squashfs/inode.c | 4269 | 12721 | /*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <phillip@squashfs.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2,
* or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* inode.c
*/
/*
* This file implements code to create and read inodes from disk.
*
* Inodes in Squashfs are identified by a 48-bit inode which encodes the
* location of the compressed metadata block containing the inode, and the byte
* offset into that block where the inode is placed (<block, offset>).
*
* To maximise compression there are different inodes for each file type
* (regular file, directory, device, etc.), the inode contents and length
* varying with the type.
*
* To further maximise compression, two types of regular file inode and
* directory inode are defined: inodes optimised for frequently occurring
* regular files and directories, and extended types where extra
* information has to be stored.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/xattr.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
#include "xattr.h"
/*
* Initialise VFS inode with the base inode information common to all
* Squashfs inode types. Sqsh_ino contains the unswapped base inode
* off disk.
*/
static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
struct squashfs_base_inode *sqsh_ino)
{
uid_t i_uid;
gid_t i_gid;
int err;
err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid);
if (err)
return err;
err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->guid), &i_gid);
if (err)
return err;
i_uid_write(inode, i_uid);
i_gid_write(inode, i_gid);
inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
inode->i_mtime.tv_sec = le32_to_cpu(sqsh_ino->mtime);
inode->i_atime.tv_sec = inode->i_mtime.tv_sec;
inode->i_ctime.tv_sec = inode->i_mtime.tv_sec;
inode->i_mode = le16_to_cpu(sqsh_ino->mode);
inode->i_size = 0;
return err;
}
struct inode *squashfs_iget(struct super_block *sb, long long ino,
unsigned int ino_number)
{
struct inode *inode = iget_locked(sb, ino_number);
int err;
TRACE("Entered squashfs_iget\n");
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
err = squashfs_read_inode(inode, ino);
if (err) {
iget_failed(inode);
return ERR_PTR(err);
}
unlock_new_inode(inode);
return inode;
}
/*
* Initialise VFS inode by reading inode from inode table (compressed
* metadata). The format and amount of data read depends on type.
*/
int squashfs_read_inode(struct inode *inode, long long ino)
{
struct super_block *sb = inode->i_sb;
struct squashfs_sb_info *msblk = sb->s_fs_info;
u64 block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table;
int err, type, offset = SQUASHFS_INODE_OFFSET(ino);
union squashfs_inode squashfs_ino;
struct squashfs_base_inode *sqshb_ino = &squashfs_ino.base;
int xattr_id = SQUASHFS_INVALID_XATTR;
TRACE("Entered squashfs_read_inode\n");
/*
* Read inode base common to all inode types.
*/
err = squashfs_read_metadata(sb, sqshb_ino, &block,
&offset, sizeof(*sqshb_ino));
if (err < 0)
goto failed_read;
err = squashfs_new_inode(sb, inode, sqshb_ino);
if (err)
goto failed_read;
block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table;
offset = SQUASHFS_INODE_OFFSET(ino);
type = le16_to_cpu(sqshb_ino->inode_type);
switch (type) {
case SQUASHFS_REG_TYPE: {
unsigned int frag_offset, frag;
int frag_size;
u64 frag_blk;
struct squashfs_reg_inode *sqsh_ino = &squashfs_ino.reg;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
frag = le32_to_cpu(sqsh_ino->fragment);
if (frag != SQUASHFS_INVALID_FRAG) {
frag_offset = le32_to_cpu(sqsh_ino->offset);
frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
if (frag_size < 0) {
err = frag_size;
goto failed_read;
}
} else {
frag_blk = SQUASHFS_INVALID_BLK;
frag_size = 0;
frag_offset = 0;
}
set_nlink(inode, 1);
inode->i_size = le32_to_cpu(sqsh_ino->file_size);
inode->i_fop = &generic_ro_fops;
inode->i_mode |= S_IFREG;
inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
squashfs_i(inode)->fragment_block = frag_blk;
squashfs_i(inode)->fragment_size = frag_size;
squashfs_i(inode)->fragment_offset = frag_offset;
squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
squashfs_i(inode)->block_list_start = block;
squashfs_i(inode)->offset = offset;
inode->i_data.a_ops = &squashfs_aops;
TRACE("File inode %x:%x, start_block %llx, block_list_start "
"%llx, offset %x\n", SQUASHFS_INODE_BLK(ino),
offset, squashfs_i(inode)->start, block, offset);
break;
}
case SQUASHFS_LREG_TYPE: {
unsigned int frag_offset, frag;
int frag_size;
u64 frag_blk;
struct squashfs_lreg_inode *sqsh_ino = &squashfs_ino.lreg;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
frag = le32_to_cpu(sqsh_ino->fragment);
if (frag != SQUASHFS_INVALID_FRAG) {
frag_offset = le32_to_cpu(sqsh_ino->offset);
frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
if (frag_size < 0) {
err = frag_size;
goto failed_read;
}
} else {
frag_blk = SQUASHFS_INVALID_BLK;
frag_size = 0;
frag_offset = 0;
}
xattr_id = le32_to_cpu(sqsh_ino->xattr);
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
inode->i_size = le64_to_cpu(sqsh_ino->file_size);
inode->i_op = &squashfs_inode_ops;
inode->i_fop = &generic_ro_fops;
inode->i_mode |= S_IFREG;
inode->i_blocks = (inode->i_size -
le64_to_cpu(sqsh_ino->sparse) + 511) >> 9;
squashfs_i(inode)->fragment_block = frag_blk;
squashfs_i(inode)->fragment_size = frag_size;
squashfs_i(inode)->fragment_offset = frag_offset;
squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block);
squashfs_i(inode)->block_list_start = block;
squashfs_i(inode)->offset = offset;
inode->i_data.a_ops = &squashfs_aops;
TRACE("File inode %x:%x, start_block %llx, block_list_start "
"%llx, offset %x\n", SQUASHFS_INODE_BLK(ino),
offset, squashfs_i(inode)->start, block, offset);
break;
}
case SQUASHFS_DIR_TYPE: {
struct squashfs_dir_inode *sqsh_ino = &squashfs_ino.dir;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
inode->i_size = le16_to_cpu(sqsh_ino->file_size);
inode->i_op = &squashfs_dir_inode_ops;
inode->i_fop = &squashfs_dir_ops;
inode->i_mode |= S_IFDIR;
squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset);
squashfs_i(inode)->dir_idx_cnt = 0;
squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode);
TRACE("Directory inode %x:%x, start_block %llx, offset %x\n",
SQUASHFS_INODE_BLK(ino), offset,
squashfs_i(inode)->start,
le16_to_cpu(sqsh_ino->offset));
break;
}
case SQUASHFS_LDIR_TYPE: {
struct squashfs_ldir_inode *sqsh_ino = &squashfs_ino.ldir;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
xattr_id = le32_to_cpu(sqsh_ino->xattr);
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
inode->i_size = le32_to_cpu(sqsh_ino->file_size);
inode->i_op = &squashfs_dir_inode_ops;
inode->i_fop = &squashfs_dir_ops;
inode->i_mode |= S_IFDIR;
squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset);
squashfs_i(inode)->dir_idx_start = block;
squashfs_i(inode)->dir_idx_offset = offset;
squashfs_i(inode)->dir_idx_cnt = le16_to_cpu(sqsh_ino->i_count);
squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode);
TRACE("Long directory inode %x:%x, start_block %llx, offset "
"%x\n", SQUASHFS_INODE_BLK(ino), offset,
squashfs_i(inode)->start,
le16_to_cpu(sqsh_ino->offset));
break;
}
case SQUASHFS_SYMLINK_TYPE:
case SQUASHFS_LSYMLINK_TYPE: {
struct squashfs_symlink_inode *sqsh_ino = &squashfs_ino.symlink;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
inode->i_op = &squashfs_symlink_inode_ops;
inode->i_data.a_ops = &squashfs_symlink_aops;
inode->i_mode |= S_IFLNK;
squashfs_i(inode)->start = block;
squashfs_i(inode)->offset = offset;
if (type == SQUASHFS_LSYMLINK_TYPE) {
__le32 xattr;
err = squashfs_read_metadata(sb, NULL, &block,
&offset, inode->i_size);
if (err < 0)
goto failed_read;
err = squashfs_read_metadata(sb, &xattr, &block,
&offset, sizeof(xattr));
if (err < 0)
goto failed_read;
xattr_id = le32_to_cpu(xattr);
}
TRACE("Symbolic link inode %x:%x, start_block %llx, offset "
"%x\n", SQUASHFS_INODE_BLK(ino), offset,
block, offset);
break;
}
case SQUASHFS_BLKDEV_TYPE:
case SQUASHFS_CHRDEV_TYPE: {
struct squashfs_dev_inode *sqsh_ino = &squashfs_ino.dev;
unsigned int rdev;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
if (type == SQUASHFS_CHRDEV_TYPE)
inode->i_mode |= S_IFCHR;
else
inode->i_mode |= S_IFBLK;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
rdev = le32_to_cpu(sqsh_ino->rdev);
init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
TRACE("Device inode %x:%x, rdev %x\n",
SQUASHFS_INODE_BLK(ino), offset, rdev);
break;
}
case SQUASHFS_LBLKDEV_TYPE:
case SQUASHFS_LCHRDEV_TYPE: {
struct squashfs_ldev_inode *sqsh_ino = &squashfs_ino.ldev;
unsigned int rdev;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
if (type == SQUASHFS_LCHRDEV_TYPE)
inode->i_mode |= S_IFCHR;
else
inode->i_mode |= S_IFBLK;
xattr_id = le32_to_cpu(sqsh_ino->xattr);
inode->i_op = &squashfs_inode_ops;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
rdev = le32_to_cpu(sqsh_ino->rdev);
init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
TRACE("Device inode %x:%x, rdev %x\n",
SQUASHFS_INODE_BLK(ino), offset, rdev);
break;
}
case SQUASHFS_FIFO_TYPE:
case SQUASHFS_SOCKET_TYPE: {
struct squashfs_ipc_inode *sqsh_ino = &squashfs_ino.ipc;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
if (type == SQUASHFS_FIFO_TYPE)
inode->i_mode |= S_IFIFO;
else
inode->i_mode |= S_IFSOCK;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
init_special_inode(inode, inode->i_mode, 0);
break;
}
case SQUASHFS_LFIFO_TYPE:
case SQUASHFS_LSOCKET_TYPE: {
struct squashfs_lipc_inode *sqsh_ino = &squashfs_ino.lipc;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
if (type == SQUASHFS_LFIFO_TYPE)
inode->i_mode |= S_IFIFO;
else
inode->i_mode |= S_IFSOCK;
xattr_id = le32_to_cpu(sqsh_ino->xattr);
inode->i_op = &squashfs_inode_ops;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
init_special_inode(inode, inode->i_mode, 0);
break;
}
default:
ERROR("Unknown inode type %d in squashfs_iget!\n", type);
return -EINVAL;
}
if (xattr_id != SQUASHFS_INVALID_XATTR && msblk->xattr_id_table) {
err = squashfs_xattr_lookup(sb, xattr_id,
&squashfs_i(inode)->xattr_count,
&squashfs_i(inode)->xattr_size,
&squashfs_i(inode)->xattr);
if (err < 0)
goto failed_read;
inode->i_blocks += ((squashfs_i(inode)->xattr_size - 1) >> 9)
+ 1;
} else
squashfs_i(inode)->xattr_count = 0;
return 0;
failed_read:
ERROR("Unable to read inode 0x%llx\n", ino);
return err;
}
const struct inode_operations squashfs_inode_ops = {
.getxattr = generic_getxattr,
.listxattr = squashfs_listxattr
};
| gpl-2.0 |
akhilnarang/ThugLife_bullhead | drivers/sbus/char/openprom.c | 4269 | 16222 | /*
* Linux/SPARC PROM Configuration Driver
* Copyright (C) 1996 Thomas K. Dyas (tdyas@noc.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
*
* This character device driver allows user programs to access the
* PROM device tree. It is compatible with the SunOS /dev/openprom
* driver and the NetBSD /dev/openprom driver. The SunOS eeprom
* utility works without any modifications.
*
* The driver uses a minor number under the misc device major. The
* file read/write mode determines the type of access to the PROM.
* Interrupts are disabled whenever the driver calls into the PROM for
* sanity's sake.
*/
/* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/miscdevice.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/uaccess.h>
#include <asm/openpromio.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
#endif
MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost (ecd@skynet.be)");
MODULE_DESCRIPTION("OPENPROM Configuration Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
MODULE_ALIAS_MISCDEV(SUN_OPENPROM_MINOR);
/* Private data kept by the driver for each descriptor. */
typedef struct openprom_private_data
{
struct device_node *current_node; /* Current node for SunOS ioctls. */
struct device_node *lastnode; /* Last valid node used by BSD ioctls. */
} DATA;
/* ID of the PROM node containing all of the EEPROM options. */
static DEFINE_MUTEX(openprom_mutex);
static struct device_node *options_node;
/*
* Copy an openpromio structure into kernel space from user space.
* This routine does error checking to make sure that all memory
* accesses are within bounds. A pointer to the allocated openpromio
* structure will be placed in "*opp_p". Return value is the length
* of the user supplied buffer.
*/
static int copyin(struct openpromio __user *info, struct openpromio **opp_p)
{
unsigned int bufsize;
if (!info || !opp_p)
return -EFAULT;
if (get_user(bufsize, &info->oprom_size))
return -EFAULT;
if (bufsize == 0)
return -EINVAL;
/* If the bufsize is too large, just limit it.
* Fix from Jason Rappleye.
*/
if (bufsize > OPROMMAXPARAM)
bufsize = OPROMMAXPARAM;
if (!(*opp_p = kzalloc(sizeof(int) + bufsize + 1, GFP_KERNEL)))
return -ENOMEM;
if (copy_from_user(&(*opp_p)->oprom_array,
&info->oprom_array, bufsize)) {
kfree(*opp_p);
return -EFAULT;
}
return bufsize;
}
static int getstrings(struct openpromio __user *info, struct openpromio **opp_p)
{
int n, bufsize;
char c;
if (!info || !opp_p)
return -EFAULT;
if (!(*opp_p = kzalloc(sizeof(int) + OPROMMAXPARAM + 1, GFP_KERNEL)))
return -ENOMEM;
(*opp_p)->oprom_size = 0;
n = bufsize = 0;
while ((n < 2) && (bufsize < OPROMMAXPARAM)) {
if (get_user(c, &info->oprom_array[bufsize])) {
kfree(*opp_p);
return -EFAULT;
}
if (c == '\0')
n++;
(*opp_p)->oprom_array[bufsize++] = c;
}
if (!n) {
kfree(*opp_p);
return -EINVAL;
}
return bufsize;
}
/*
* Copy an openpromio structure in kernel space back to user space.
*/
static int copyout(void __user *info, struct openpromio *opp, int len)
{
if (copy_to_user(info, opp, len))
return -EFAULT;
return 0;
}
static int opromgetprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize)
{
const void *pval;
int len;
if (!dp ||
!(pval = of_get_property(dp, op->oprom_array, &len)) ||
len <= 0 || len > bufsize)
return copyout(argp, op, sizeof(int));
memcpy(op->oprom_array, pval, len);
op->oprom_array[len] = '\0';
op->oprom_size = len;
return copyout(argp, op, sizeof(int) + bufsize);
}
static int opromnxtprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize)
{
struct property *prop;
int len;
if (!dp)
return copyout(argp, op, sizeof(int));
if (op->oprom_array[0] == '\0') {
prop = dp->properties;
if (!prop)
return copyout(argp, op, sizeof(int));
len = strlen(prop->name);
} else {
prop = of_find_property(dp, op->oprom_array, NULL);
if (!prop ||
!prop->next ||
(len = strlen(prop->next->name)) + 1 > bufsize)
return copyout(argp, op, sizeof(int));
prop = prop->next;
}
memcpy(op->oprom_array, prop->name, len);
op->oprom_array[len] = '\0';
op->oprom_size = ++len;
return copyout(argp, op, sizeof(int) + bufsize);
}
static int opromsetopt(struct device_node *dp, struct openpromio *op, int bufsize)
{
char *buf = op->oprom_array + strlen(op->oprom_array) + 1;
int len = op->oprom_array + bufsize - buf;
return of_set_property(options_node, op->oprom_array, buf, len);
}
static int opromnext(void __user *argp, unsigned int cmd, struct device_node *dp, struct openpromio *op, int bufsize, DATA *data)
{
phandle ph;
BUILD_BUG_ON(sizeof(phandle) != sizeof(int));
if (bufsize < sizeof(phandle))
return -EINVAL;
ph = *((int *) op->oprom_array);
if (ph) {
dp = of_find_node_by_phandle(ph);
if (!dp)
return -EINVAL;
switch (cmd) {
case OPROMNEXT:
dp = dp->sibling;
break;
case OPROMCHILD:
dp = dp->child;
break;
case OPROMSETCUR:
default:
break;
}
} else {
/* Sibling of node zero is the root node. */
if (cmd != OPROMNEXT)
return -EINVAL;
dp = of_find_node_by_path("/");
}
ph = 0;
if (dp)
ph = dp->phandle;
data->current_node = dp;
*((int *) op->oprom_array) = ph;
op->oprom_size = sizeof(phandle);
return copyout(argp, op, bufsize + sizeof(int));
}
static int oprompci2node(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize, DATA *data)
{
int err = -EINVAL;
if (bufsize >= 2*sizeof(int)) {
#ifdef CONFIG_PCI
struct pci_dev *pdev;
struct device_node *dp;
pdev = pci_get_bus_and_slot (((int *) op->oprom_array)[0],
((int *) op->oprom_array)[1]);
dp = pci_device_to_OF_node(pdev);
data->current_node = dp;
*((int *)op->oprom_array) = dp->phandle;
op->oprom_size = sizeof(int);
err = copyout(argp, op, bufsize + sizeof(int));
pci_dev_put(pdev);
#endif
}
return err;
}
static int oprompath2node(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize, DATA *data)
{
phandle ph = 0;
dp = of_find_node_by_path(op->oprom_array);
if (dp)
ph = dp->phandle;
data->current_node = dp;
*((int *)op->oprom_array) = ph;
op->oprom_size = sizeof(int);
return copyout(argp, op, bufsize + sizeof(int));
}
static int opromgetbootargs(void __user *argp, struct openpromio *op, int bufsize)
{
char *buf = saved_command_line;
int len = strlen(buf);
if (len > bufsize)
return -EINVAL;
strcpy(op->oprom_array, buf);
op->oprom_size = len;
return copyout(argp, op, bufsize + sizeof(int));
}
/*
* SunOS and Solaris /dev/openprom ioctl calls.
*/
static long openprom_sunos_ioctl(struct file * file,
unsigned int cmd, unsigned long arg,
struct device_node *dp)
{
DATA *data = file->private_data;
struct openpromio *opp = NULL;
int bufsize, error = 0;
static int cnt;
void __user *argp = (void __user *)arg;
if (cmd == OPROMSETOPT)
bufsize = getstrings(argp, &opp);
else
bufsize = copyin(argp, &opp);
if (bufsize < 0)
return bufsize;
mutex_lock(&openprom_mutex);
switch (cmd) {
case OPROMGETOPT:
case OPROMGETPROP:
error = opromgetprop(argp, dp, opp, bufsize);
break;
case OPROMNXTOPT:
case OPROMNXTPROP:
error = opromnxtprop(argp, dp, opp, bufsize);
break;
case OPROMSETOPT:
case OPROMSETOPT2:
error = opromsetopt(dp, opp, bufsize);
break;
case OPROMNEXT:
case OPROMCHILD:
case OPROMSETCUR:
error = opromnext(argp, cmd, dp, opp, bufsize, data);
break;
case OPROMPCI2NODE:
error = oprompci2node(argp, dp, opp, bufsize, data);
break;
case OPROMPATH2NODE:
error = oprompath2node(argp, dp, opp, bufsize, data);
break;
case OPROMGETBOOTARGS:
error = opromgetbootargs(argp, opp, bufsize);
break;
case OPROMU2P:
case OPROMGETCONS:
case OPROMGETFBNAME:
if (cnt++ < 10)
printk(KERN_INFO "openprom_sunos_ioctl: unimplemented ioctl\n");
error = -EINVAL;
break;
default:
if (cnt++ < 10)
printk(KERN_INFO "openprom_sunos_ioctl: cmd 0x%X, arg 0x%lX\n", cmd, arg);
error = -EINVAL;
break;
}
kfree(opp);
mutex_unlock(&openprom_mutex);
return error;
}
static struct device_node *get_node(phandle n, DATA *data)
{
struct device_node *dp = of_find_node_by_phandle(n);
if (dp)
data->lastnode = dp;
return dp;
}
/* Copy in a whole string from userspace into kernelspace. */
static int copyin_string(char __user *user, size_t len, char **ptr)
{
char *tmp;
if ((ssize_t)len < 0 || (ssize_t)(len + 1) < 0)
return -EINVAL;
tmp = kmalloc(len + 1, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
if (copy_from_user(tmp, user, len)) {
kfree(tmp);
return -EFAULT;
}
tmp[len] = '\0';
*ptr = tmp;
return 0;
}
/*
* NetBSD /dev/openprom ioctl calls.
*/
static int opiocget(void __user *argp, DATA *data)
{
struct opiocdesc op;
struct device_node *dp;
char *str;
const void *pval;
int err, len;
if (copy_from_user(&op, argp, sizeof(op)))
return -EFAULT;
dp = get_node(op.op_nodeid, data);
err = copyin_string(op.op_name, op.op_namelen, &str);
if (err)
return err;
pval = of_get_property(dp, str, &len);
err = 0;
if (!pval || len > op.op_buflen) {
err = -EINVAL;
} else {
op.op_buflen = len;
if (copy_to_user(argp, &op, sizeof(op)) ||
copy_to_user(op.op_buf, pval, len))
err = -EFAULT;
}
kfree(str);
return err;
}
static int opiocnextprop(void __user *argp, DATA *data)
{
struct opiocdesc op;
struct device_node *dp;
struct property *prop;
char *str;
int err, len;
if (copy_from_user(&op, argp, sizeof(op)))
return -EFAULT;
dp = get_node(op.op_nodeid, data);
if (!dp)
return -EINVAL;
err = copyin_string(op.op_name, op.op_namelen, &str);
if (err)
return err;
if (str[0] == '\0') {
prop = dp->properties;
} else {
prop = of_find_property(dp, str, NULL);
if (prop)
prop = prop->next;
}
kfree(str);
if (!prop)
len = 0;
else
len = prop->length;
if (len > op.op_buflen)
len = op.op_buflen;
if (copy_to_user(argp, &op, sizeof(op)))
return -EFAULT;
if (len &&
copy_to_user(op.op_buf, prop->value, len))
return -EFAULT;
return 0;
}
static int opiocset(void __user *argp, DATA *data)
{
struct opiocdesc op;
struct device_node *dp;
char *str, *tmp;
int err;
if (copy_from_user(&op, argp, sizeof(op)))
return -EFAULT;
dp = get_node(op.op_nodeid, data);
if (!dp)
return -EINVAL;
err = copyin_string(op.op_name, op.op_namelen, &str);
if (err)
return err;
err = copyin_string(op.op_buf, op.op_buflen, &tmp);
if (err) {
kfree(str);
return err;
}
err = of_set_property(dp, str, tmp, op.op_buflen);
kfree(str);
kfree(tmp);
return err;
}
static int opiocgetnext(unsigned int cmd, void __user *argp)
{
struct device_node *dp;
phandle nd;
BUILD_BUG_ON(sizeof(phandle) != sizeof(int));
if (copy_from_user(&nd, argp, sizeof(phandle)))
return -EFAULT;
if (nd == 0) {
if (cmd != OPIOCGETNEXT)
return -EINVAL;
dp = of_find_node_by_path("/");
} else {
dp = of_find_node_by_phandle(nd);
nd = 0;
if (dp) {
if (cmd == OPIOCGETNEXT)
dp = dp->sibling;
else
dp = dp->child;
}
}
if (dp)
nd = dp->phandle;
if (copy_to_user(argp, &nd, sizeof(phandle)))
return -EFAULT;
return 0;
}
static int openprom_bsd_ioctl(struct file * file,
unsigned int cmd, unsigned long arg)
{
DATA *data = file->private_data;
void __user *argp = (void __user *)arg;
int err;
mutex_lock(&openprom_mutex);
switch (cmd) {
case OPIOCGET:
err = opiocget(argp, data);
break;
case OPIOCNEXTPROP:
err = opiocnextprop(argp, data);
break;
case OPIOCSET:
err = opiocset(argp, data);
break;
case OPIOCGETOPTNODE:
BUILD_BUG_ON(sizeof(phandle) != sizeof(int));
err = 0;
if (copy_to_user(argp, &options_node->phandle, sizeof(phandle)))
err = -EFAULT;
break;
case OPIOCGETNEXT:
case OPIOCGETCHILD:
err = opiocgetnext(cmd, argp);
break;
default:
err = -EINVAL;
break;
}
mutex_unlock(&openprom_mutex);
return err;
}
/*
* Handoff control to the correct ioctl handler.
*/
static long openprom_ioctl(struct file * file,
unsigned int cmd, unsigned long arg)
{
DATA *data = file->private_data;
switch (cmd) {
case OPROMGETOPT:
case OPROMNXTOPT:
if ((file->f_mode & FMODE_READ) == 0)
return -EPERM;
return openprom_sunos_ioctl(file, cmd, arg,
options_node);
case OPROMSETOPT:
case OPROMSETOPT2:
if ((file->f_mode & FMODE_WRITE) == 0)
return -EPERM;
return openprom_sunos_ioctl(file, cmd, arg,
options_node);
case OPROMNEXT:
case OPROMCHILD:
case OPROMGETPROP:
case OPROMNXTPROP:
if ((file->f_mode & FMODE_READ) == 0)
return -EPERM;
return openprom_sunos_ioctl(file, cmd, arg,
data->current_node);
case OPROMU2P:
case OPROMGETCONS:
case OPROMGETFBNAME:
case OPROMGETBOOTARGS:
case OPROMSETCUR:
case OPROMPCI2NODE:
case OPROMPATH2NODE:
if ((file->f_mode & FMODE_READ) == 0)
return -EPERM;
return openprom_sunos_ioctl(file, cmd, arg, NULL);
case OPIOCGET:
case OPIOCNEXTPROP:
case OPIOCGETOPTNODE:
case OPIOCGETNEXT:
case OPIOCGETCHILD:
if ((file->f_mode & FMODE_READ) == 0)
return -EBADF;
return openprom_bsd_ioctl(file,cmd,arg);
case OPIOCSET:
if ((file->f_mode & FMODE_WRITE) == 0)
return -EBADF;
return openprom_bsd_ioctl(file,cmd,arg);
default:
return -EINVAL;
};
}
static long openprom_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
long rval = -ENOTTY;
/*
* SunOS/Solaris only, the NetBSD one's have embedded pointers in
* the arg which we'd need to clean up...
*/
switch (cmd) {
case OPROMGETOPT:
case OPROMSETOPT:
case OPROMNXTOPT:
case OPROMSETOPT2:
case OPROMNEXT:
case OPROMCHILD:
case OPROMGETPROP:
case OPROMNXTPROP:
case OPROMU2P:
case OPROMGETCONS:
case OPROMGETFBNAME:
case OPROMGETBOOTARGS:
case OPROMSETCUR:
case OPROMPCI2NODE:
case OPROMPATH2NODE:
rval = openprom_ioctl(file, cmd, arg);
break;
}
return rval;
}
static int openprom_open(struct inode * inode, struct file * file)
{
DATA *data;
data = kmalloc(sizeof(DATA), GFP_KERNEL);
if (!data)
return -ENOMEM;
mutex_lock(&openprom_mutex);
data->current_node = of_find_node_by_path("/");
data->lastnode = data->current_node;
file->private_data = (void *) data;
mutex_unlock(&openprom_mutex);
return 0;
}
static int openprom_release(struct inode * inode, struct file * file)
{
kfree(file->private_data);
return 0;
}
static const struct file_operations openprom_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = openprom_ioctl,
.compat_ioctl = openprom_compat_ioctl,
.open = openprom_open,
.release = openprom_release,
};
static struct miscdevice openprom_dev = {
.minor = SUN_OPENPROM_MINOR,
.name = "openprom",
.fops = &openprom_fops,
};
static int __init openprom_init(void)
{
struct device_node *dp;
int err;
err = misc_register(&openprom_dev);
if (err)
return err;
dp = of_find_node_by_path("/");
dp = dp->child;
while (dp) {
if (!strcmp(dp->name, "options"))
break;
dp = dp->sibling;
}
options_node = dp;
if (!options_node) {
misc_deregister(&openprom_dev);
return -EIO;
}
return 0;
}
static void __exit openprom_cleanup(void)
{
misc_deregister(&openprom_dev);
}
module_init(openprom_init);
module_exit(openprom_cleanup);
| gpl-2.0 |
eskyuu/linux | arch/m68k/coldfire/intc-simr.c | 4525 | 4866 | /*
* intc-simr.c
*
* Interrupt controller code for the ColdFire 5208, 5207 & 532x parts.
*
* (C) Copyright 2009-2011, Greg Ungerer <gerg@snapgear.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/traps.h>
/*
* The EDGE Port interrupts are the fixed 7 external interrupts.
* They need some special treatment, for example they need to be acked.
*/
#ifdef CONFIG_M520x
/*
* The 520x parts only support a limited range of these external
* interrupts, only 1, 4 and 7 (as interrupts 65, 66 and 67).
*/
#define EINT0 64 /* Is not actually used, but spot reserved for it */
#define EINT1 65 /* EDGE Port interrupt 1 */
#define EINT4 66 /* EDGE Port interrupt 4 */
#define EINT7 67 /* EDGE Port interrupt 7 */
static unsigned int irqebitmap[] = { 0, 1, 4, 7 };
static unsigned int inline irq2ebit(unsigned int irq)
{
return irqebitmap[irq - EINT0];
}
#else
/*
* Most of the ColdFire parts with the EDGE Port module just have
* a strait direct mapping of the 7 external interrupts. Although
* there is a bit reserved for 0, it is not used.
*/
#define EINT0 64 /* Is not actually used, but spot reserved for it */
#define EINT1 65 /* EDGE Port interrupt 1 */
#define EINT7 71 /* EDGE Port interrupt 7 */
static unsigned int inline irq2ebit(unsigned int irq)
{
return irq - EINT0;
}
#endif
/*
* There maybe one, two or three interrupt control units, each has 64
* interrupts. If there is no second or third unit then MCFINTC1_* or
* MCFINTC2_* defines will be 0 (and code for them optimized away).
*/
static void intc_irq_mask(struct irq_data *d)
{
unsigned int irq = d->irq - MCFINT_VECBASE;
if (MCFINTC2_SIMR && (irq > 128))
__raw_writeb(irq - 128, MCFINTC2_SIMR);
else if (MCFINTC1_SIMR && (irq > 64))
__raw_writeb(irq - 64, MCFINTC1_SIMR);
else
__raw_writeb(irq, MCFINTC0_SIMR);
}
static void intc_irq_unmask(struct irq_data *d)
{
unsigned int irq = d->irq - MCFINT_VECBASE;
if (MCFINTC2_CIMR && (irq > 128))
__raw_writeb(irq - 128, MCFINTC2_CIMR);
else if (MCFINTC1_CIMR && (irq > 64))
__raw_writeb(irq - 64, MCFINTC1_CIMR);
else
__raw_writeb(irq, MCFINTC0_CIMR);
}
static void intc_irq_ack(struct irq_data *d)
{
unsigned int ebit = irq2ebit(d->irq);
__raw_writeb(0x1 << ebit, MCFEPORT_EPFR);
}
static unsigned int intc_irq_startup(struct irq_data *d)
{
unsigned int irq = d->irq;
if ((irq >= EINT1) && (irq <= EINT7)) {
unsigned int ebit = irq2ebit(irq);
u8 v;
#if defined(MCFEPORT_EPDDR)
/* Set EPORT line as input */
v = __raw_readb(MCFEPORT_EPDDR);
__raw_writeb(v & ~(0x1 << ebit), MCFEPORT_EPDDR);
#endif
/* Set EPORT line as interrupt source */
v = __raw_readb(MCFEPORT_EPIER);
__raw_writeb(v | (0x1 << ebit), MCFEPORT_EPIER);
}
irq -= MCFINT_VECBASE;
if (MCFINTC2_ICR0 && (irq > 128))
__raw_writeb(5, MCFINTC2_ICR0 + irq - 128);
else if (MCFINTC1_ICR0 && (irq > 64))
__raw_writeb(5, MCFINTC1_ICR0 + irq - 64);
else
__raw_writeb(5, MCFINTC0_ICR0 + irq);
intc_irq_unmask(d);
return 0;
}
static int intc_irq_set_type(struct irq_data *d, unsigned int type)
{
unsigned int ebit, irq = d->irq;
u16 pa, tb;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
tb = 0x1;
break;
case IRQ_TYPE_EDGE_FALLING:
tb = 0x2;
break;
case IRQ_TYPE_EDGE_BOTH:
tb = 0x3;
break;
default:
/* Level triggered */
tb = 0;
break;
}
if (tb)
irq_set_handler(irq, handle_edge_irq);
ebit = irq2ebit(irq) * 2;
pa = __raw_readw(MCFEPORT_EPPAR);
pa = (pa & ~(0x3 << ebit)) | (tb << ebit);
__raw_writew(pa, MCFEPORT_EPPAR);
return 0;
}
static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
.irq_startup = intc_irq_startup,
.irq_mask = intc_irq_mask,
.irq_unmask = intc_irq_unmask,
};
static struct irq_chip intc_irq_chip_edge_port = {
.name = "CF-INTC-EP",
.irq_startup = intc_irq_startup,
.irq_mask = intc_irq_mask,
.irq_unmask = intc_irq_unmask,
.irq_ack = intc_irq_ack,
.irq_set_type = intc_irq_set_type,
};
void __init init_IRQ(void)
{
int irq, eirq;
/* Mask all interrupt sources */
__raw_writeb(0xff, MCFINTC0_SIMR);
if (MCFINTC1_SIMR)
__raw_writeb(0xff, MCFINTC1_SIMR);
if (MCFINTC2_SIMR)
__raw_writeb(0xff, MCFINTC2_SIMR);
eirq = MCFINT_VECBASE + 64 + (MCFINTC1_ICR0 ? 64 : 0) +
(MCFINTC2_ICR0 ? 64 : 0);
for (irq = MCFINT_VECBASE; (irq < eirq); irq++) {
if ((irq >= EINT1) && (irq <= EINT7))
irq_set_chip(irq, &intc_irq_chip_edge_port);
else
irq_set_chip(irq, &intc_irq_chip);
irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
irq_set_handler(irq, handle_level_irq);
}
}
| gpl-2.0 |
tbalden/android_kernel_htc_m9pw | arch/m68k/platform/coldfire/intc-simr.c | 4525 | 4866 | /*
* intc-simr.c
*
* Interrupt controller code for the ColdFire 5208, 5207 & 532x parts.
*
* (C) Copyright 2009-2011, Greg Ungerer <gerg@snapgear.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/traps.h>
/*
* The EDGE Port interrupts are the fixed 7 external interrupts.
* They need some special treatment, for example they need to be acked.
*/
#ifdef CONFIG_M520x
/*
* The 520x parts only support a limited range of these external
* interrupts, only 1, 4 and 7 (as interrupts 65, 66 and 67).
*/
#define EINT0 64 /* Is not actually used, but spot reserved for it */
#define EINT1 65 /* EDGE Port interrupt 1 */
#define EINT4 66 /* EDGE Port interrupt 4 */
#define EINT7 67 /* EDGE Port interrupt 7 */
static unsigned int irqebitmap[] = { 0, 1, 4, 7 };
static unsigned int inline irq2ebit(unsigned int irq)
{
return irqebitmap[irq - EINT0];
}
#else
/*
* Most of the ColdFire parts with the EDGE Port module just have
* a strait direct mapping of the 7 external interrupts. Although
* there is a bit reserved for 0, it is not used.
*/
#define EINT0 64 /* Is not actually used, but spot reserved for it */
#define EINT1 65 /* EDGE Port interrupt 1 */
#define EINT7 71 /* EDGE Port interrupt 7 */
static unsigned int inline irq2ebit(unsigned int irq)
{
return irq - EINT0;
}
#endif
/*
* There maybe one, two or three interrupt control units, each has 64
* interrupts. If there is no second or third unit then MCFINTC1_* or
* MCFINTC2_* defines will be 0 (and code for them optimized away).
*/
static void intc_irq_mask(struct irq_data *d)
{
unsigned int irq = d->irq - MCFINT_VECBASE;
if (MCFINTC2_SIMR && (irq > 128))
__raw_writeb(irq - 128, MCFINTC2_SIMR);
else if (MCFINTC1_SIMR && (irq > 64))
__raw_writeb(irq - 64, MCFINTC1_SIMR);
else
__raw_writeb(irq, MCFINTC0_SIMR);
}
static void intc_irq_unmask(struct irq_data *d)
{
unsigned int irq = d->irq - MCFINT_VECBASE;
if (MCFINTC2_CIMR && (irq > 128))
__raw_writeb(irq - 128, MCFINTC2_CIMR);
else if (MCFINTC1_CIMR && (irq > 64))
__raw_writeb(irq - 64, MCFINTC1_CIMR);
else
__raw_writeb(irq, MCFINTC0_CIMR);
}
static void intc_irq_ack(struct irq_data *d)
{
unsigned int ebit = irq2ebit(d->irq);
__raw_writeb(0x1 << ebit, MCFEPORT_EPFR);
}
static unsigned int intc_irq_startup(struct irq_data *d)
{
unsigned int irq = d->irq;
if ((irq >= EINT1) && (irq <= EINT7)) {
unsigned int ebit = irq2ebit(irq);
u8 v;
#if defined(MCFEPORT_EPDDR)
/* Set EPORT line as input */
v = __raw_readb(MCFEPORT_EPDDR);
__raw_writeb(v & ~(0x1 << ebit), MCFEPORT_EPDDR);
#endif
/* Set EPORT line as interrupt source */
v = __raw_readb(MCFEPORT_EPIER);
__raw_writeb(v | (0x1 << ebit), MCFEPORT_EPIER);
}
irq -= MCFINT_VECBASE;
if (MCFINTC2_ICR0 && (irq > 128))
__raw_writeb(5, MCFINTC2_ICR0 + irq - 128);
else if (MCFINTC1_ICR0 && (irq > 64))
__raw_writeb(5, MCFINTC1_ICR0 + irq - 64);
else
__raw_writeb(5, MCFINTC0_ICR0 + irq);
intc_irq_unmask(d);
return 0;
}
static int intc_irq_set_type(struct irq_data *d, unsigned int type)
{
unsigned int ebit, irq = d->irq;
u16 pa, tb;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
tb = 0x1;
break;
case IRQ_TYPE_EDGE_FALLING:
tb = 0x2;
break;
case IRQ_TYPE_EDGE_BOTH:
tb = 0x3;
break;
default:
/* Level triggered */
tb = 0;
break;
}
if (tb)
irq_set_handler(irq, handle_edge_irq);
ebit = irq2ebit(irq) * 2;
pa = __raw_readw(MCFEPORT_EPPAR);
pa = (pa & ~(0x3 << ebit)) | (tb << ebit);
__raw_writew(pa, MCFEPORT_EPPAR);
return 0;
}
static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
.irq_startup = intc_irq_startup,
.irq_mask = intc_irq_mask,
.irq_unmask = intc_irq_unmask,
};
static struct irq_chip intc_irq_chip_edge_port = {
.name = "CF-INTC-EP",
.irq_startup = intc_irq_startup,
.irq_mask = intc_irq_mask,
.irq_unmask = intc_irq_unmask,
.irq_ack = intc_irq_ack,
.irq_set_type = intc_irq_set_type,
};
void __init init_IRQ(void)
{
int irq, eirq;
/* Mask all interrupt sources */
__raw_writeb(0xff, MCFINTC0_SIMR);
if (MCFINTC1_SIMR)
__raw_writeb(0xff, MCFINTC1_SIMR);
if (MCFINTC2_SIMR)
__raw_writeb(0xff, MCFINTC2_SIMR);
eirq = MCFINT_VECBASE + 64 + (MCFINTC1_ICR0 ? 64 : 0) +
(MCFINTC2_ICR0 ? 64 : 0);
for (irq = MCFINT_VECBASE; (irq < eirq); irq++) {
if ((irq >= EINT1) && (irq <= EINT7))
irq_set_chip(irq, &intc_irq_chip_edge_port);
else
irq_set_chip(irq, &intc_irq_chip);
irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
irq_set_handler(irq, handle_level_irq);
}
}
| gpl-2.0 |
columbia/linux-2.6-mutable | arch/powerpc/platforms/cell/spufs/switch.c | 4525 | 64573 | /*
* spu_switch.c
*
* (C) Copyright IBM Corp. 2005
*
* Author: Mark Nutter <mnutter@us.ibm.com>
*
* Host-side part of SPU context switch sequence outlined in
* Synergistic Processor Element, Book IV.
*
* A fully premptive switch of an SPE is very expensive in terms
* of time and system resources. SPE Book IV indicates that SPE
* allocation should follow a "serially reusable device" model,
* in which the SPE is assigned a task until it completes. When
* this is not possible, this sequence may be used to premptively
* save, and then later (optionally) restore the context of a
* program executing on an SPE.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <asm/io.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <asm/spu_csa.h>
#include <asm/mmu_context.h>
#include "spufs.h"
#include "spu_save_dump.h"
#include "spu_restore_dump.h"
#if 0
#define POLL_WHILE_TRUE(_c) { \
do { \
} while (_c); \
}
#else
#define RELAX_SPIN_COUNT 1000
#define POLL_WHILE_TRUE(_c) { \
do { \
int _i; \
for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
cpu_relax(); \
} \
if (unlikely(_c)) yield(); \
else break; \
} while (_c); \
}
#endif /* debug */
#define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
static inline void acquire_spu_lock(struct spu *spu)
{
/* Save, Step 1:
* Restore, Step 1:
* Acquire SPU-specific mutual exclusion lock.
* TBD.
*/
}
static inline void release_spu_lock(struct spu *spu)
{
/* Restore, Step 76:
* Release SPU-specific mutual exclusion lock.
* TBD.
*/
}
static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
u32 isolate_state;
/* Save, Step 2:
* Save, Step 6:
* If SPU_Status[E,L,IS] any field is '1', this
* SPU is in isolate state and cannot be context
* saved at this time.
*/
isolate_state = SPU_STATUS_ISOLATED_STATE |
SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
}
static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 3:
* Restore, Step 2:
* Save INT_Mask_class0 in CSA.
* Write INT_MASK_class0 with value of 0.
* Save INT_Mask_class1 in CSA.
* Write INT_MASK_class1 with value of 0.
* Save INT_Mask_class2 in CSA.
* Write INT_MASK_class2 with value of 0.
* Synchronize all three interrupts to be sure
* we no longer execute a handler on another CPU.
*/
spin_lock_irq(&spu->register_lock);
if (csa) {
csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
}
spu_int_mask_set(spu, 0, 0ul);
spu_int_mask_set(spu, 1, 0ul);
spu_int_mask_set(spu, 2, 0ul);
eieio();
spin_unlock_irq(&spu->register_lock);
/*
* This flag needs to be set before calling synchronize_irq so
* that the update will be visible to the relevant handlers
* via a simple load.
*/
set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
synchronize_irq(spu->irqs[0]);
synchronize_irq(spu->irqs[1]);
synchronize_irq(spu->irqs[2]);
}
static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 4:
* Restore, Step 25.
* Set a software watchdog timer, which specifies the
* maximum allowable time for a context save sequence.
*
* For present, this implementation will not set a global
* watchdog timer, as virtualization & variable system load
* may cause unpredictable execution times.
*/
}
static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 5:
* Restore, Step 3:
* Inhibit user-space access (if provided) to this
* SPU by unmapping the virtual pages assigned to
* the SPU memory-mapped I/O (MMIO) for problem
* state. TBD.
*/
}
static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 7:
* Restore, Step 5:
* Set a software context switch pending flag.
* Done above in Step 3 - disable_interrupts().
*/
}
static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 8:
* Suspend DMA and save MFC_CNTL.
*/
switch (in_be64(&priv2->mfc_control_RW) &
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
case MFC_CNTL_SUSPEND_IN_PROGRESS:
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
MFC_CNTL_SUSPEND_COMPLETE);
/* fall through */
case MFC_CNTL_SUSPEND_COMPLETE:
if (csa)
csa->priv2.mfc_control_RW =
in_be64(&priv2->mfc_control_RW) |
MFC_CNTL_SUSPEND_DMA_QUEUE;
break;
case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
MFC_CNTL_SUSPEND_COMPLETE);
if (csa)
csa->priv2.mfc_control_RW =
in_be64(&priv2->mfc_control_RW) &
~MFC_CNTL_SUSPEND_DMA_QUEUE &
~MFC_CNTL_SUSPEND_MASK;
break;
}
}
static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 9:
* Save SPU_Runcntl in the CSA. This value contains
* the "Application Desired State".
*/
csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
}
static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 10:
* Save MFC_SR1 in the CSA.
*/
csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
}
static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 11:
* Read SPU_Status[R], and save to CSA.
*/
if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
} else {
u32 stopped;
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
stopped =
SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
if ((in_be32(&prob->spu_status_R) & stopped) == 0)
csa->prob.spu_status_R = SPU_STATUS_RUNNING;
else
csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
}
}
static inline void save_mfc_stopped_status(struct spu_state *csa,
struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
MFC_CNTL_DMA_QUEUES_EMPTY;
/* Save, Step 12:
* Read MFC_CNTL[Ds]. Update saved copy of
* CSA.MFC_CNTL[Ds].
*
* update: do the same with MFC_CNTL[Q].
*/
csa->priv2.mfc_control_RW &= ~mask;
csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;
}
static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 13:
* Write MFC_CNTL[Dh] set to a '1' to halt
* the decrementer.
*/
out_be64(&priv2->mfc_control_RW,
MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
eieio();
}
static inline void save_timebase(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 14:
* Read PPE Timebase High and Timebase low registers
* and save in CSA. TBD.
*/
csa->suspend_time = get_cycles();
}
static inline void remove_other_spu_access(struct spu_state *csa,
struct spu *spu)
{
/* Save, Step 15:
* Remove other SPU access to this SPU by unmapping
* this SPU's pages from their address space. TBD.
*/
}
static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 16:
* Restore, Step 11.
* Write SPU_MSSync register. Poll SPU_MSSync[P]
* for a value of 0.
*/
out_be64(&prob->spc_mssync_RW, 1UL);
POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
}
static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 17:
* Restore, Step 12.
* Restore, Step 48.
* Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
* Then issue a PPE sync instruction.
*/
spu_tlb_invalidate(spu);
mb();
}
static inline void handle_pending_interrupts(struct spu_state *csa,
struct spu *spu)
{
/* Save, Step 18:
* Handle any pending interrupts from this SPU
* here. This is OS or hypervisor specific. One
* option is to re-enable interrupts to handle any
* pending interrupts, with the interrupt handlers
* recognizing the software Context Switch Pending
* flag, to ensure the SPU execution or MFC command
* queue is not restarted. TBD.
*/
}
static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
int i;
/* Save, Step 19:
* If MFC_Cntl[Se]=0 then save
* MFC command queues.
*/
if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
for (i = 0; i < 8; i++) {
csa->priv2.puq[i].mfc_cq_data0_RW =
in_be64(&priv2->puq[i].mfc_cq_data0_RW);
csa->priv2.puq[i].mfc_cq_data1_RW =
in_be64(&priv2->puq[i].mfc_cq_data1_RW);
csa->priv2.puq[i].mfc_cq_data2_RW =
in_be64(&priv2->puq[i].mfc_cq_data2_RW);
csa->priv2.puq[i].mfc_cq_data3_RW =
in_be64(&priv2->puq[i].mfc_cq_data3_RW);
}
for (i = 0; i < 16; i++) {
csa->priv2.spuq[i].mfc_cq_data0_RW =
in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
csa->priv2.spuq[i].mfc_cq_data1_RW =
in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
csa->priv2.spuq[i].mfc_cq_data2_RW =
in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
csa->priv2.spuq[i].mfc_cq_data3_RW =
in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
}
}
}
static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 20:
* Save the PPU_QueryMask register
* in the CSA.
*/
csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
}
static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 21:
* Save the PPU_QueryType register
* in the CSA.
*/
csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
}
static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save the Prxy_TagStatus register in the CSA.
*
* It is unnecessary to restore dma_tagstatus_R, however,
* dma_tagstatus_R in the CSA is accessed via backing_ops, so
* we must save it.
*/
csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
}
static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 22:
* Save the MFC_CSR_TSQ register
* in the LSCSA.
*/
csa->priv2.spu_tag_status_query_RW =
in_be64(&priv2->spu_tag_status_query_RW);
}
static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 23:
* Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
* registers in the CSA.
*/
csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
}
static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 24:
* Save the MFC_CSR_ATO register in
* the CSA.
*/
csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
}
static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 25:
* Save the MFC_TCLASS_ID register in
* the CSA.
*/
csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
}
static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 26:
* Restore, Step 23.
* Write the MFC_TCLASS_ID register with
* the value 0x10000000.
*/
spu_mfc_tclass_id_set(spu, 0x10000000);
eieio();
}
static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 27:
* Restore, Step 14.
* Write MFC_CNTL[Pc]=1 (purge queue).
*/
out_be64(&priv2->mfc_control_RW,
MFC_CNTL_PURGE_DMA_REQUEST |
MFC_CNTL_SUSPEND_MASK);
eieio();
}
static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 28:
* Poll MFC_CNTL[Ps] until value '11' is read
* (purge complete).
*/
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
MFC_CNTL_PURGE_DMA_COMPLETE);
}
static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 30:
* Restore, Step 18:
* Write MFC_SR1 with MFC_SR1[D=0,S=1] and
* MFC_SR1[TL,R,Pr,T] set correctly for the
* OS specific environment.
*
* Implementation note: The SPU-side code
* for save/restore is privileged, so the
* MFC_SR1[Pr] bit is not set.
*
*/
spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
MFC_STATE1_RELOCATE_MASK |
MFC_STATE1_BUS_TLBIE_MASK));
}
static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 31:
* Save SPU_NPC in the CSA.
*/
csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
}
static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 32:
* Save SPU_PrivCntl in the CSA.
*/
csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
}
static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 33:
* Restore, Step 16:
* Write SPU_PrivCntl[S,Le,A] fields reset to 0.
*/
out_be64(&priv2->spu_privcntl_RW, 0UL);
eieio();
}
static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 34:
* Save SPU_LSLR in the CSA.
*/
csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
}
static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 35:
* Restore, Step 17.
* Reset SPU_LSLR.
*/
out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
eieio();
}
static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 36:
* Save SPU_Cfg in the CSA.
*/
csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
}
static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 37:
* Save PM_Trace_Tag_Wait_Mask in the CSA.
* Not performed by this implementation.
*/
}
static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 38:
* Save RA_GROUP_ID register and the
* RA_ENABLE reigster in the CSA.
*/
csa->priv1.resource_allocation_groupID_RW =
spu_resource_allocation_groupID_get(spu);
csa->priv1.resource_allocation_enable_RW =
spu_resource_allocation_enable_get(spu);
}
static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 39:
* Save MB_Stat register in the CSA.
*/
csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
}
static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 40:
* Save the PPU_MB register in the CSA.
*/
csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
}
static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 41:
* Save the PPUINT_MB register in the CSA.
*/
csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
}
static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
int i;
/* Save, Step 42:
*/
/* Save CH 1, without channel count */
out_be64(&priv2->spu_chnlcntptr_RW, 1);
csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
/* Save the following CH: [0,3,4,24,25,27] */
for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
out_be64(&priv2->spu_chnldata_RW, 0UL);
out_be64(&priv2->spu_chnlcnt_RW, 0UL);
eieio();
}
}
static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
int i;
/* Save, Step 43:
* Save SPU Read Mailbox Channel.
*/
out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
eieio();
csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
for (i = 0; i < 4; i++) {
csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
}
out_be64(&priv2->spu_chnlcnt_RW, 0UL);
eieio();
}
static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 44:
* Save MFC_CMD Channel.
*/
out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
eieio();
csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
eieio();
}
static inline void reset_ch(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
u64 idx;
int i;
/* Save, Step 45:
* Reset the following CH: [21, 23, 28, 30]
*/
for (i = 0; i < 4; i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
eieio();
}
}
static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 46:
* Restore, Step 25.
* Write MFC_CNTL[Sc]=0 (resume queue processing).
*/
out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
}
static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,
unsigned int *code, int code_size)
{
/* Save, Step 47:
* Restore, Step 30.
* If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
* register, then initialize SLB_VSID and SLB_ESID
* to provide access to SPU context save code and
* LSCSA.
*
* This implementation places both the context
* switch code and LSCSA in kernel address space.
*
* Further this implementation assumes that the
* MFC_SR1[R]=1 (in other words, assume that
* translation is desired by OS environment).
*/
spu_invalidate_slbs(spu);
spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);
}
static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 48:
* Restore, Step 23.
* Change the software context switch pending flag
* to context switch active. This implementation does
* not uses a switch active flag.
*
* Now that we have saved the mfc in the csa, we can add in the
* restart command if an exception occurred.
*/
if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))
csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
mb();
}
static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
{
unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
CLASS1_ENABLE_STORAGE_FAULT_INTR;
/* Save, Step 49:
* Restore, Step 22:
* Reset and then enable interrupts, as
* needed by OS.
*
* This implementation enables only class1
* (translation) interrupts.
*/
spin_lock_irq(&spu->register_lock);
spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
spu_int_mask_set(spu, 0, 0ul);
spu_int_mask_set(spu, 1, class1_mask);
spu_int_mask_set(spu, 2, 0ul);
spin_unlock_irq(&spu->register_lock);
}
static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
unsigned int ls_offset, unsigned int size,
unsigned int tag, unsigned int rclass,
unsigned int cmd)
{
struct spu_problem __iomem *prob = spu->problem;
union mfc_tag_size_class_cmd command;
unsigned int transfer_size;
volatile unsigned int status = 0x0;
while (size > 0) {
transfer_size =
(size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
command.u.mfc_size = transfer_size;
command.u.mfc_tag = tag;
command.u.mfc_rclassid = rclass;
command.u.mfc_cmd = cmd;
do {
out_be32(&prob->mfc_lsa_W, ls_offset);
out_be64(&prob->mfc_ea_W, ea);
out_be64(&prob->mfc_union_W.all64, command.all64);
status =
in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
if (unlikely(status & 0x2)) {
cpu_relax();
}
} while (status & 0x3);
size -= transfer_size;
ea += transfer_size;
ls_offset += transfer_size;
}
return 0;
}
static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
{
unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
unsigned int ls_offset = 0x0;
unsigned int size = 16384;
unsigned int tag = 0;
unsigned int rclass = 0;
unsigned int cmd = MFC_PUT_CMD;
/* Save, Step 50:
* Issue a DMA command to copy the first 16K bytes
* of local storage to the CSA.
*/
send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
}
static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 51:
* Restore, Step 31.
* Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
* point address of context save code in local
* storage.
*
* This implementation uses SPU-side save/restore
* programs with entry points at LSA of 0.
*/
out_be32(&prob->spu_npc_RW, 0);
eieio();
}
static inline void set_signot1(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
union {
u64 ull;
u32 ui[2];
} addr64;
/* Save, Step 52:
* Restore, Step 32:
* Write SPU_Sig_Notify_1 register with upper 32-bits
* of the CSA.LSCSA effective address.
*/
addr64.ull = (u64) csa->lscsa;
out_be32(&prob->signal_notify1, addr64.ui[0]);
eieio();
}
static inline void set_signot2(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
union {
u64 ull;
u32 ui[2];
} addr64;
/* Save, Step 53:
* Restore, Step 33:
* Write SPU_Sig_Notify_2 register with lower 32-bits
* of the CSA.LSCSA effective address.
*/
addr64.ull = (u64) csa->lscsa;
out_be32(&prob->signal_notify2, addr64.ui[1]);
eieio();
}
static inline void send_save_code(struct spu_state *csa, struct spu *spu)
{
unsigned long addr = (unsigned long)&spu_save_code[0];
unsigned int ls_offset = 0x0;
unsigned int size = sizeof(spu_save_code);
unsigned int tag = 0;
unsigned int rclass = 0;
unsigned int cmd = MFC_GETFS_CMD;
/* Save, Step 54:
* Issue a DMA command to copy context save code
* to local storage and start SPU.
*/
send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
}
static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 55:
* Restore, Step 38.
* Write PPU_QueryMask=1 (enable Tag Group 0)
* and issue eieio instruction.
*/
out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
eieio();
}
static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
u32 mask = MFC_TAGID_TO_TAGMASK(0);
unsigned long flags;
/* Save, Step 56:
* Restore, Step 39.
* Restore, Step 39.
* Restore, Step 46.
* Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
* or write PPU_QueryType[TS]=01 and wait for Tag Group
* Complete Interrupt. Write INT_Stat_Class0 or
* INT_Stat_Class2 with value of 'handled'.
*/
POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
local_irq_save(flags);
spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
local_irq_restore(flags);
}
static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
unsigned long flags;
/* Save, Step 57:
* Restore, Step 40.
* Poll until SPU_Status[R]=0 or wait for SPU Class 0
* or SPU Class 2 interrupt. Write INT_Stat_class0
* or INT_Stat_class2 with value of handled.
*/
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
local_irq_save(flags);
spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
local_irq_restore(flags);
}
static inline int check_save_status(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
u32 complete;
/* Save, Step 54:
* If SPU_Status[P]=1 and SPU_Status[SC] = "success",
* context save succeeded, otherwise context save
* failed.
*/
complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
SPU_STATUS_STOPPED_BY_STOP);
return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
}
static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 4:
* If required, notify the "using application" that
* the SPU task has been terminated. TBD.
*/
}
static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,
struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 7:
* Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend
* the queue and halt the decrementer.
*/
out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
MFC_CNTL_DECREMENTER_HALTED);
eieio();
}
static inline void wait_suspend_mfc_complete(struct spu_state *csa,
struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 8:
* Restore, Step 47.
* Poll MFC_CNTL[Ss] until 11 is returned.
*/
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
MFC_CNTL_SUSPEND_COMPLETE);
}
static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Restore, Step 9:
* If SPU_Status[R]=1, stop SPU execution
* and wait for stop to complete.
*
* Returns 1 if SPU_Status[R]=1 on entry.
* 0 otherwise
*/
if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
if (in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_EXIT_STATUS) {
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
if ((in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_LOAD_STATUS)
|| (in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_STATE)) {
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
out_be32(&prob->spu_runcntl_RW, 0x2);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
if (in_be32(&prob->spu_status_R) &
SPU_STATUS_WAITING_FOR_CHANNEL) {
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
return 1;
}
return 0;
}
static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Restore, Step 10:
* If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
* release SPU from isolate state.
*/
if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
if (in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_EXIT_STATUS) {
spu_mfc_sr1_set(spu,
MFC_STATE1_MASTER_RUN_CONTROL_MASK);
eieio();
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
if ((in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_LOAD_STATUS)
|| (in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_STATE)) {
spu_mfc_sr1_set(spu,
MFC_STATE1_MASTER_RUN_CONTROL_MASK);
eieio();
out_be32(&prob->spu_runcntl_RW, 0x2);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
}
}
static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
u64 idx;
int i;
/* Restore, Step 20:
*/
/* Reset CH 1 */
out_be64(&priv2->spu_chnlcntptr_RW, 1);
out_be64(&priv2->spu_chnldata_RW, 0UL);
/* Reset the following CH: [0,3,4,24,25,27] */
for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
out_be64(&priv2->spu_chnldata_RW, 0UL);
out_be64(&priv2->spu_chnlcnt_RW, 0UL);
eieio();
}
}
static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
u64 idx;
int i;
/* Restore, Step 21:
* Reset the following CH: [21, 23, 28, 29, 30]
*/
for (i = 0; i < 5; i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
eieio();
}
}
static inline void setup_spu_status_part1(struct spu_state *csa,
struct spu *spu)
{
u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
u32 status_I = SPU_STATUS_INVALID_INSTR;
u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
u32 status_S = SPU_STATUS_SINGLE_STEP;
u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
u32 status_code;
/* Restore, Step 27:
* If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
* instruction sequence to the end of the SPU based restore
* code (after the "context restored" stop and signal) to
* restore the correct SPU status.
*
* NOTE: Rather than modifying the SPU executable, we
* instead add a new 'stopped_status' field to the
* LSCSA. The SPU-side restore reads this field and
* takes the appropriate action when exiting.
*/
status_code =
(csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
/* SPU_Status[P,I]=1 - Illegal Instruction followed
* by Stop and Signal instruction, followed by 'br -4'.
*
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
csa->lscsa->stopped_status.slot[1] = status_code;
} else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
/* SPU_Status[P,H]=1 - Halt Conditional, followed
* by Stop and Signal instruction, followed by
* 'br -4'.
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
csa->lscsa->stopped_status.slot[1] = status_code;
} else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
/* SPU_Status[S,P]=1 - Stop and Signal instruction
* followed by 'br -4'.
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
csa->lscsa->stopped_status.slot[1] = status_code;
} else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
/* SPU_Status[S,I]=1 - Illegal instruction followed
* by 'br -4'.
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
csa->lscsa->stopped_status.slot[1] = status_code;
} else if ((csa->prob.spu_status_R & status_P) == status_P) {
/* SPU_Status[P]=1 - Stop and Signal instruction
* followed by 'br -4'.
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
csa->lscsa->stopped_status.slot[1] = status_code;
} else if ((csa->prob.spu_status_R & status_H) == status_H) {
/* SPU_Status[H]=1 - Halt Conditional, followed
* by 'br -4'.
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
} else if ((csa->prob.spu_status_R & status_S) == status_S) {
/* SPU_Status[S]=1 - Two nop instructions.
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
} else if ((csa->prob.spu_status_R & status_I) == status_I) {
/* SPU_Status[I]=1 - Illegal instruction followed
* by 'br -4'.
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
}
}
static inline void setup_spu_status_part2(struct spu_state *csa,
struct spu *spu)
{
u32 mask;
/* Restore, Step 28:
* If the CSA.SPU_Status[I,S,H,P,R]=0 then
* add a 'br *' instruction to the end of
* the SPU based restore code.
*
* NOTE: Rather than modifying the SPU executable, we
* instead add a new 'stopped_status' field to the
* LSCSA. The SPU-side restore reads this field and
* takes the appropriate action when exiting.
*/
mask = SPU_STATUS_INVALID_INSTR |
SPU_STATUS_SINGLE_STEP |
SPU_STATUS_STOPPED_BY_HALT |
SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
if (!(csa->prob.spu_status_R & mask)) {
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
}
}
static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 29:
* Restore RA_GROUP_ID register and the
* RA_ENABLE reigster from the CSA.
*/
spu_resource_allocation_groupID_set(spu,
csa->priv1.resource_allocation_groupID_RW);
spu_resource_allocation_enable_set(spu,
csa->priv1.resource_allocation_enable_RW);
}
static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
{
unsigned long addr = (unsigned long)&spu_restore_code[0];
unsigned int ls_offset = 0x0;
unsigned int size = sizeof(spu_restore_code);
unsigned int tag = 0;
unsigned int rclass = 0;
unsigned int cmd = MFC_GETFS_CMD;
/* Restore, Step 37:
* Issue MFC DMA command to copy context
* restore code to local storage.
*/
send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
}
static inline void setup_decr(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 34:
* If CSA.MFC_CNTL[Ds]=1 (decrementer was
* running) then adjust decrementer, set
* decrementer running status in LSCSA,
* and set decrementer "wrapped" status
* in LSCSA.
*/
if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
cycles_t resume_time = get_cycles();
cycles_t delta_time = resume_time - csa->suspend_time;
csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
if (csa->lscsa->decr.slot[0] < delta_time) {
csa->lscsa->decr_status.slot[0] |=
SPU_DECR_STATUS_WRAPPED;
}
csa->lscsa->decr.slot[0] -= delta_time;
} else {
csa->lscsa->decr_status.slot[0] = 0;
}
}
static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 35:
* Copy the CSA.PU_MB data into the LSCSA.
*/
csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
}
static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 36:
* Copy the CSA.PUINT_MB data into the LSCSA.
*/
csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
}
static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
u32 complete;
/* Restore, Step 40:
* If SPU_Status[P]=1 and SPU_Status[SC] = "success",
* context restore succeeded, otherwise context restore
* failed.
*/
complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
SPU_STATUS_STOPPED_BY_STOP);
return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
}
static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 41:
* Restore SPU_PrivCntl from the CSA.
*/
out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
eieio();
}
static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
u32 mask;
/* Restore, Step 42:
* If any CSA.SPU_Status[I,S,H,P]=1, then
* restore the error or single step state.
*/
mask = SPU_STATUS_INVALID_INSTR |
SPU_STATUS_SINGLE_STEP |
SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
if (csa->prob.spu_status_R & mask) {
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
}
static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
u32 mask;
/* Restore, Step 43:
* If all CSA.SPU_Status[I,S,H,P,R]=0 then write
* SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
* then write '00' to SPU_RunCntl[R0R1] and wait
* for SPU_Status[R]=0.
*/
mask = SPU_STATUS_INVALID_INSTR |
SPU_STATUS_SINGLE_STEP |
SPU_STATUS_STOPPED_BY_HALT |
SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
if (!(csa->prob.spu_status_R & mask)) {
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
eieio();
POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
}
static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
{
unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
unsigned int ls_offset = 0x0;
unsigned int size = 16384;
unsigned int tag = 0;
unsigned int rclass = 0;
unsigned int cmd = MFC_GET_CMD;
/* Restore, Step 44:
* Issue a DMA command to restore the first
* 16kb of local storage from CSA.
*/
send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
}
static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 47.
* Write MFC_Cntl[Sc,Sm]='1','0' to suspend
* the queue.
*/
out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
eieio();
}
static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 49:
* Write INT_MASK_class0 with value of 0.
* Write INT_MASK_class1 with value of 0.
* Write INT_MASK_class2 with value of 0.
* Write INT_STAT_class0 with value of -1.
* Write INT_STAT_class1 with value of -1.
* Write INT_STAT_class2 with value of -1.
*/
spin_lock_irq(&spu->register_lock);
spu_int_mask_set(spu, 0, 0ul);
spu_int_mask_set(spu, 1, 0ul);
spu_int_mask_set(spu, 2, 0ul);
spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
spin_unlock_irq(&spu->register_lock);
}
static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
int i;
/* Restore, Step 50:
* If MFC_Cntl[Se]!=0 then restore
* MFC command queues.
*/
if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
for (i = 0; i < 8; i++) {
out_be64(&priv2->puq[i].mfc_cq_data0_RW,
csa->priv2.puq[i].mfc_cq_data0_RW);
out_be64(&priv2->puq[i].mfc_cq_data1_RW,
csa->priv2.puq[i].mfc_cq_data1_RW);
out_be64(&priv2->puq[i].mfc_cq_data2_RW,
csa->priv2.puq[i].mfc_cq_data2_RW);
out_be64(&priv2->puq[i].mfc_cq_data3_RW,
csa->priv2.puq[i].mfc_cq_data3_RW);
}
for (i = 0; i < 16; i++) {
out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
csa->priv2.spuq[i].mfc_cq_data0_RW);
out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
csa->priv2.spuq[i].mfc_cq_data1_RW);
out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
csa->priv2.spuq[i].mfc_cq_data2_RW);
out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
csa->priv2.spuq[i].mfc_cq_data3_RW);
}
}
eieio();
}
static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Restore, Step 51:
* Restore the PPU_QueryMask register from CSA.
*/
out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
eieio();
}
static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Restore, Step 52:
* Restore the PPU_QueryType register from CSA.
*/
out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
eieio();
}
static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 53:
* Restore the MFC_CSR_TSQ register from CSA.
*/
out_be64(&priv2->spu_tag_status_query_RW,
csa->priv2.spu_tag_status_query_RW);
eieio();
}
static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 54:
* Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
* registers from CSA.
*/
out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
eieio();
}
static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 55:
* Restore the MFC_CSR_ATO register from CSA.
*/
out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
}
static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 56:
* Restore the MFC_TCLASS_ID register from CSA.
*/
spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
eieio();
}
static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
{
u64 ch0_cnt, ch0_data;
u64 ch1_data;
/* Restore, Step 57:
* Set the Lock Line Reservation Lost Event by:
* 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
* 2. If CSA.SPU_Channel_0_Count=0 and
* CSA.SPU_Wr_Event_Mask[Lr]=1 and
* CSA.SPU_Event_Status[Lr]=0 then set
* CSA.SPU_Event_Status_Count=1.
*/
ch0_cnt = csa->spu_chnlcnt_RW[0];
ch0_data = csa->spu_chnldata_RW[0];
ch1_data = csa->spu_chnldata_RW[1];
csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
(ch1_data & MFC_LLR_LOST_EVENT)) {
csa->spu_chnlcnt_RW[0] = 1;
}
}
static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 58:
* If the status of the CSA software decrementer
* "wrapped" flag is set, OR in a '1' to
* CSA.SPU_Event_Status[Tm].
*/
if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))
return;
if ((csa->spu_chnlcnt_RW[0] == 0) &&
(csa->spu_chnldata_RW[1] & 0x20) &&
!(csa->spu_chnldata_RW[0] & 0x20))
csa->spu_chnlcnt_RW[0] = 1;
csa->spu_chnldata_RW[0] |= 0x20;
}
static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
int i;
/* Restore, Step 59:
* Restore the following CH: [0,3,4,24,25,27]
*/
for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
eieio();
}
}
static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 ch_indices[3] = { 9UL, 21UL, 23UL };
u64 ch_counts[3] = { 1UL, 16UL, 1UL };
u64 idx;
int i;
/* Restore, Step 60:
* Restore the following CH: [9,21,23].
*/
ch_counts[0] = 1UL;
ch_counts[1] = csa->spu_chnlcnt_RW[21];
ch_counts[2] = 1UL;
for (i = 0; i < 3; i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
eieio();
}
}
static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 61:
* Restore the SPU_LSLR register from CSA.
*/
out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
eieio();
}
static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 62:
* Restore the SPU_Cfg register from CSA.
*/
out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
eieio();
}
static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 63:
* Restore PM_Trace_Tag_Wait_Mask from CSA.
* Not performed by this implementation.
*/
}
static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Restore, Step 64:
* Restore SPU_NPC from CSA.
*/
out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
eieio();
}
static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
int i;
/* Restore, Step 65:
* Restore MFC_RdSPU_MB from CSA.
*/
out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
eieio();
out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
for (i = 0; i < 4; i++) {
out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
}
eieio();
}
static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
u32 dummy = 0;
/* Restore, Step 66:
* If CSA.MB_Stat[P]=0 (mailbox empty) then
* read from the PPU_MB register.
*/
if ((csa->prob.mb_stat_R & 0xFF) == 0) {
dummy = in_be32(&prob->pu_mb_R);
eieio();
}
}
static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 dummy = 0UL;
/* Restore, Step 66:
* If CSA.MB_Stat[I]=0 (mailbox empty) then
* read from the PPUINT_MB register.
*/
if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
dummy = in_be64(&priv2->puint_mb_R);
eieio();
spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
eieio();
}
}
static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 69:
* Restore the MFC_SR1 register from CSA.
*/
spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
eieio();
}
static inline void set_int_route(struct spu_state *csa, struct spu *spu)
{
struct spu_context *ctx = spu->ctx;
spu_cpu_affinity_set(spu, ctx->last_ran);
}
static inline void restore_other_spu_access(struct spu_state *csa,
struct spu *spu)
{
/* Restore, Step 70:
* Restore other SPU mappings to this SPU. TBD.
*/
}
static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Restore, Step 71:
* If CSA.SPU_Status[R]=1 then write
* SPU_RunCntl[R0R1]='01'.
*/
if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
eieio();
}
}
static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 72:
* Restore the MFC_CNTL register for the CSA.
*/
out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
eieio();
/*
* The queue is put back into the same state that was evident prior to
* the context switch. The suspend flag is added to the saved state in
* the csa, if the operational state was suspending or suspended. In
* this case, the code that suspended the mfc is responsible for
* continuing it. Note that SPE faults do not change the operational
* state of the spu.
*/
}
static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 73:
* Enable user-space access (if provided) to this
* SPU by mapping the virtual pages assigned to
* the SPU memory-mapped I/O (MMIO) for problem
* state. TBD.
*/
}
static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 74:
* Reset the "context switch active" flag.
* Not performed by this implementation.
*/
}
static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 75:
* Re-enable SPU interrupts.
*/
spin_lock_irq(&spu->register_lock);
spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
spin_unlock_irq(&spu->register_lock);
}
static int quiece_spu(struct spu_state *prev, struct spu *spu)
{
/*
* Combined steps 2-18 of SPU context save sequence, which
* quiesce the SPU state (disable SPU execution, MFC command
* queues, decrementer, SPU interrupts, etc.).
*
* Returns 0 on success.
* 2 if failed step 2.
* 6 if failed step 6.
*/
if (check_spu_isolate(prev, spu)) { /* Step 2. */
return 2;
}
disable_interrupts(prev, spu); /* Step 3. */
set_watchdog_timer(prev, spu); /* Step 4. */
inhibit_user_access(prev, spu); /* Step 5. */
if (check_spu_isolate(prev, spu)) { /* Step 6. */
return 6;
}
set_switch_pending(prev, spu); /* Step 7. */
save_mfc_cntl(prev, spu); /* Step 8. */
save_spu_runcntl(prev, spu); /* Step 9. */
save_mfc_sr1(prev, spu); /* Step 10. */
save_spu_status(prev, spu); /* Step 11. */
save_mfc_stopped_status(prev, spu); /* Step 12. */
halt_mfc_decr(prev, spu); /* Step 13. */
save_timebase(prev, spu); /* Step 14. */
remove_other_spu_access(prev, spu); /* Step 15. */
do_mfc_mssync(prev, spu); /* Step 16. */
issue_mfc_tlbie(prev, spu); /* Step 17. */
handle_pending_interrupts(prev, spu); /* Step 18. */
return 0;
}
static void save_csa(struct spu_state *prev, struct spu *spu)
{
/*
* Combine steps 19-44 of SPU context save sequence, which
* save regions of the privileged & problem state areas.
*/
save_mfc_queues(prev, spu); /* Step 19. */
save_ppu_querymask(prev, spu); /* Step 20. */
save_ppu_querytype(prev, spu); /* Step 21. */
save_ppu_tagstatus(prev, spu); /* NEW. */
save_mfc_csr_tsq(prev, spu); /* Step 22. */
save_mfc_csr_cmd(prev, spu); /* Step 23. */
save_mfc_csr_ato(prev, spu); /* Step 24. */
save_mfc_tclass_id(prev, spu); /* Step 25. */
set_mfc_tclass_id(prev, spu); /* Step 26. */
save_mfc_cmd(prev, spu); /* Step 26a - moved from 44. */
purge_mfc_queue(prev, spu); /* Step 27. */
wait_purge_complete(prev, spu); /* Step 28. */
setup_mfc_sr1(prev, spu); /* Step 30. */
save_spu_npc(prev, spu); /* Step 31. */
save_spu_privcntl(prev, spu); /* Step 32. */
reset_spu_privcntl(prev, spu); /* Step 33. */
save_spu_lslr(prev, spu); /* Step 34. */
reset_spu_lslr(prev, spu); /* Step 35. */
save_spu_cfg(prev, spu); /* Step 36. */
save_pm_trace(prev, spu); /* Step 37. */
save_mfc_rag(prev, spu); /* Step 38. */
save_ppu_mb_stat(prev, spu); /* Step 39. */
save_ppu_mb(prev, spu); /* Step 40. */
save_ppuint_mb(prev, spu); /* Step 41. */
save_ch_part1(prev, spu); /* Step 42. */
save_spu_mb(prev, spu); /* Step 43. */
reset_ch(prev, spu); /* Step 45. */
}
static void save_lscsa(struct spu_state *prev, struct spu *spu)
{
/*
* Perform steps 46-57 of SPU context save sequence,
* which save regions of the local store and register
* file.
*/
resume_mfc_queue(prev, spu); /* Step 46. */
/* Step 47. */
setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code));
set_switch_active(prev, spu); /* Step 48. */
enable_interrupts(prev, spu); /* Step 49. */
save_ls_16kb(prev, spu); /* Step 50. */
set_spu_npc(prev, spu); /* Step 51. */
set_signot1(prev, spu); /* Step 52. */
set_signot2(prev, spu); /* Step 53. */
send_save_code(prev, spu); /* Step 54. */
set_ppu_querymask(prev, spu); /* Step 55. */
wait_tag_complete(prev, spu); /* Step 56. */
wait_spu_stopped(prev, spu); /* Step 57. */
}
static void force_spu_isolate_exit(struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Stop SPE execution and wait for completion. */
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
iobarrier_rw();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
/* Restart SPE master runcntl. */
spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
iobarrier_w();
/* Initiate isolate exit request and wait for completion. */
out_be64(&priv2->spu_privcntl_RW, 4LL);
iobarrier_w();
out_be32(&prob->spu_runcntl_RW, 2);
iobarrier_rw();
POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
& SPU_STATUS_STOPPED_BY_STOP));
/* Reset load request to normal. */
out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
iobarrier_w();
}
/**
* stop_spu_isolate
* Check SPU run-control state and force isolated
* exit function as necessary.
*/
static void stop_spu_isolate(struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
/* The SPU is in isolated state; the only way
* to get it out is to perform an isolated
* exit (clean) operation.
*/
force_spu_isolate_exit(spu);
}
}
static void harvest(struct spu_state *prev, struct spu *spu)
{
/*
* Perform steps 2-25 of SPU context restore sequence,
* which resets an SPU either after a failed save, or
* when using SPU for first time.
*/
disable_interrupts(prev, spu); /* Step 2. */
inhibit_user_access(prev, spu); /* Step 3. */
terminate_spu_app(prev, spu); /* Step 4. */
set_switch_pending(prev, spu); /* Step 5. */
stop_spu_isolate(spu); /* NEW. */
remove_other_spu_access(prev, spu); /* Step 6. */
suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */
wait_suspend_mfc_complete(prev, spu); /* Step 8. */
if (!suspend_spe(prev, spu)) /* Step 9. */
clear_spu_status(prev, spu); /* Step 10. */
do_mfc_mssync(prev, spu); /* Step 11. */
issue_mfc_tlbie(prev, spu); /* Step 12. */
handle_pending_interrupts(prev, spu); /* Step 13. */
purge_mfc_queue(prev, spu); /* Step 14. */
wait_purge_complete(prev, spu); /* Step 15. */
reset_spu_privcntl(prev, spu); /* Step 16. */
reset_spu_lslr(prev, spu); /* Step 17. */
setup_mfc_sr1(prev, spu); /* Step 18. */
spu_invalidate_slbs(spu); /* Step 19. */
reset_ch_part1(prev, spu); /* Step 20. */
reset_ch_part2(prev, spu); /* Step 21. */
enable_interrupts(prev, spu); /* Step 22. */
set_switch_active(prev, spu); /* Step 23. */
set_mfc_tclass_id(prev, spu); /* Step 24. */
resume_mfc_queue(prev, spu); /* Step 25. */
}
static void restore_lscsa(struct spu_state *next, struct spu *spu)
{
/*
* Perform steps 26-40 of SPU context restore sequence,
* which restores regions of the local store and register
* file.
*/
set_watchdog_timer(next, spu); /* Step 26. */
setup_spu_status_part1(next, spu); /* Step 27. */
setup_spu_status_part2(next, spu); /* Step 28. */
restore_mfc_rag(next, spu); /* Step 29. */
/* Step 30. */
setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code));
set_spu_npc(next, spu); /* Step 31. */
set_signot1(next, spu); /* Step 32. */
set_signot2(next, spu); /* Step 33. */
setup_decr(next, spu); /* Step 34. */
setup_ppu_mb(next, spu); /* Step 35. */
setup_ppuint_mb(next, spu); /* Step 36. */
send_restore_code(next, spu); /* Step 37. */
set_ppu_querymask(next, spu); /* Step 38. */
wait_tag_complete(next, spu); /* Step 39. */
wait_spu_stopped(next, spu); /* Step 40. */
}
static void restore_csa(struct spu_state *next, struct spu *spu)
{
/*
* Combine steps 41-76 of SPU context restore sequence, which
* restore regions of the privileged & problem state areas.
*/
restore_spu_privcntl(next, spu); /* Step 41. */
restore_status_part1(next, spu); /* Step 42. */
restore_status_part2(next, spu); /* Step 43. */
restore_ls_16kb(next, spu); /* Step 44. */
wait_tag_complete(next, spu); /* Step 45. */
suspend_mfc(next, spu); /* Step 46. */
wait_suspend_mfc_complete(next, spu); /* Step 47. */
issue_mfc_tlbie(next, spu); /* Step 48. */
clear_interrupts(next, spu); /* Step 49. */
restore_mfc_queues(next, spu); /* Step 50. */
restore_ppu_querymask(next, spu); /* Step 51. */
restore_ppu_querytype(next, spu); /* Step 52. */
restore_mfc_csr_tsq(next, spu); /* Step 53. */
restore_mfc_csr_cmd(next, spu); /* Step 54. */
restore_mfc_csr_ato(next, spu); /* Step 55. */
restore_mfc_tclass_id(next, spu); /* Step 56. */
set_llr_event(next, spu); /* Step 57. */
restore_decr_wrapped(next, spu); /* Step 58. */
restore_ch_part1(next, spu); /* Step 59. */
restore_ch_part2(next, spu); /* Step 60. */
restore_spu_lslr(next, spu); /* Step 61. */
restore_spu_cfg(next, spu); /* Step 62. */
restore_pm_trace(next, spu); /* Step 63. */
restore_spu_npc(next, spu); /* Step 64. */
restore_spu_mb(next, spu); /* Step 65. */
check_ppu_mb_stat(next, spu); /* Step 66. */
check_ppuint_mb_stat(next, spu); /* Step 67. */
spu_invalidate_slbs(spu); /* Modified Step 68. */
restore_mfc_sr1(next, spu); /* Step 69. */
set_int_route(next, spu); /* NEW */
restore_other_spu_access(next, spu); /* Step 70. */
restore_spu_runcntl(next, spu); /* Step 71. */
restore_mfc_cntl(next, spu); /* Step 72. */
enable_user_access(next, spu); /* Step 73. */
reset_switch_active(next, spu); /* Step 74. */
reenable_interrupts(next, spu); /* Step 75. */
}
static int __do_spu_save(struct spu_state *prev, struct spu *spu)
{
int rc;
/*
* SPU context save can be broken into three phases:
*
* (a) quiesce [steps 2-16].
* (b) save of CSA, performed by PPE [steps 17-42]
* (c) save of LSCSA, mostly performed by SPU [steps 43-52].
*
* Returns 0 on success.
* 2,6 if failed to quiece SPU
* 53 if SPU-side of save failed.
*/
rc = quiece_spu(prev, spu); /* Steps 2-16. */
switch (rc) {
default:
case 2:
case 6:
harvest(prev, spu);
return rc;
break;
case 0:
break;
}
save_csa(prev, spu); /* Steps 17-43. */
save_lscsa(prev, spu); /* Steps 44-53. */
return check_save_status(prev, spu); /* Step 54. */
}
static int __do_spu_restore(struct spu_state *next, struct spu *spu)
{
int rc;
/*
* SPU context restore can be broken into three phases:
*
* (a) harvest (or reset) SPU [steps 2-24].
* (b) restore LSCSA [steps 25-40], mostly performed by SPU.
* (c) restore CSA [steps 41-76], performed by PPE.
*
* The 'harvest' step is not performed here, but rather
* as needed below.
*/
restore_lscsa(next, spu); /* Steps 24-39. */
rc = check_restore_status(next, spu); /* Step 40. */
switch (rc) {
default:
/* Failed. Return now. */
return rc;
break;
case 0:
/* Fall through to next step. */
break;
}
restore_csa(next, spu);
return 0;
}
/**
* spu_save - SPU context save, with locking.
* @prev: pointer to SPU context save area, to be saved.
* @spu: pointer to SPU iomem structure.
*
* Acquire locks, perform the save operation then return.
*/
int spu_save(struct spu_state *prev, struct spu *spu)
{
int rc;
acquire_spu_lock(spu); /* Step 1. */
rc = __do_spu_save(prev, spu); /* Steps 2-53. */
release_spu_lock(spu);
if (rc != 0 && rc != 2 && rc != 6) {
panic("%s failed on SPU[%d], rc=%d.\n",
__func__, spu->number, rc);
}
return 0;
}
EXPORT_SYMBOL_GPL(spu_save);
/**
* spu_restore - SPU context restore, with harvest and locking.
* @new: pointer to SPU context save area, to be restored.
* @spu: pointer to SPU iomem structure.
*
* Perform harvest + restore, as we may not be coming
* from a previous successful save operation, and the
* hardware state is unknown.
*/
int spu_restore(struct spu_state *new, struct spu *spu)
{
int rc;
acquire_spu_lock(spu);
harvest(NULL, spu);
spu->slb_replace = 0;
rc = __do_spu_restore(new, spu);
release_spu_lock(spu);
if (rc) {
panic("%s failed on SPU[%d] rc=%d.\n",
__func__, spu->number, rc);
}
return rc;
}
EXPORT_SYMBOL_GPL(spu_restore);
static void init_prob(struct spu_state *csa)
{
csa->spu_chnlcnt_RW[9] = 1;
csa->spu_chnlcnt_RW[21] = 16;
csa->spu_chnlcnt_RW[23] = 1;
csa->spu_chnlcnt_RW[28] = 1;
csa->spu_chnlcnt_RW[30] = 1;
csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
csa->prob.mb_stat_R = 0x000400;
}
static void init_priv1(struct spu_state *csa)
{
/* Enable decode, relocate, tlbie response, master runcntl. */
csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
MFC_STATE1_MASTER_RUN_CONTROL_MASK |
MFC_STATE1_PROBLEM_STATE_MASK |
MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
/* Enable OS-specific set of interrupts. */
csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
CLASS0_ENABLE_SPU_ERROR_INTR;
csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
CLASS1_ENABLE_STORAGE_FAULT_INTR;
csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
CLASS2_ENABLE_SPU_HALT_INTR |
CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
}
static void init_priv2(struct spu_state *csa)
{
csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
}
/**
* spu_alloc_csa - allocate and initialize an SPU context save area.
*
* Allocate and initialize the contents of an SPU context save area.
* This includes enabling address translation, interrupt masks, etc.,
* as appropriate for the given OS environment.
*
* Note that storage for the 'lscsa' is allocated separately,
* as it is by far the largest of the context save regions,
* and may need to be pinned or otherwise specially aligned.
*/
int spu_init_csa(struct spu_state *csa)
{
int rc;
if (!csa)
return -EINVAL;
memset(csa, 0, sizeof(struct spu_state));
rc = spu_alloc_lscsa(csa);
if (rc)
return rc;
spin_lock_init(&csa->register_lock);
init_prob(csa);
init_priv1(csa);
init_priv2(csa);
return 0;
}
void spu_fini_csa(struct spu_state *csa)
{
spu_free_lscsa(csa);
}
| gpl-2.0 |
SlimDev/kernel_samsung_msm8660-common | arch/powerpc/platforms/cell/spufs/switch.c | 4525 | 64573 | /*
* spu_switch.c
*
* (C) Copyright IBM Corp. 2005
*
* Author: Mark Nutter <mnutter@us.ibm.com>
*
* Host-side part of SPU context switch sequence outlined in
* Synergistic Processor Element, Book IV.
*
* A fully premptive switch of an SPE is very expensive in terms
* of time and system resources. SPE Book IV indicates that SPE
* allocation should follow a "serially reusable device" model,
* in which the SPE is assigned a task until it completes. When
* this is not possible, this sequence may be used to premptively
* save, and then later (optionally) restore the context of a
* program executing on an SPE.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <asm/io.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <asm/spu_csa.h>
#include <asm/mmu_context.h>
#include "spufs.h"
#include "spu_save_dump.h"
#include "spu_restore_dump.h"
#if 0
#define POLL_WHILE_TRUE(_c) { \
do { \
} while (_c); \
}
#else
#define RELAX_SPIN_COUNT 1000
#define POLL_WHILE_TRUE(_c) { \
do { \
int _i; \
for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
cpu_relax(); \
} \
if (unlikely(_c)) yield(); \
else break; \
} while (_c); \
}
#endif /* debug */
#define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
static inline void acquire_spu_lock(struct spu *spu)
{
/* Save, Step 1:
* Restore, Step 1:
* Acquire SPU-specific mutual exclusion lock.
* TBD.
*/
}
static inline void release_spu_lock(struct spu *spu)
{
/* Restore, Step 76:
* Release SPU-specific mutual exclusion lock.
* TBD.
*/
}
static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
u32 isolate_state;
/* Save, Step 2:
* Save, Step 6:
* If SPU_Status[E,L,IS] any field is '1', this
* SPU is in isolate state and cannot be context
* saved at this time.
*/
isolate_state = SPU_STATUS_ISOLATED_STATE |
SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
}
static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 3:
* Restore, Step 2:
* Save INT_Mask_class0 in CSA.
* Write INT_MASK_class0 with value of 0.
* Save INT_Mask_class1 in CSA.
* Write INT_MASK_class1 with value of 0.
* Save INT_Mask_class2 in CSA.
* Write INT_MASK_class2 with value of 0.
* Synchronize all three interrupts to be sure
* we no longer execute a handler on another CPU.
*/
spin_lock_irq(&spu->register_lock);
if (csa) {
csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
}
spu_int_mask_set(spu, 0, 0ul);
spu_int_mask_set(spu, 1, 0ul);
spu_int_mask_set(spu, 2, 0ul);
eieio();
spin_unlock_irq(&spu->register_lock);
/*
* This flag needs to be set before calling synchronize_irq so
* that the update will be visible to the relevant handlers
* via a simple load.
*/
set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
synchronize_irq(spu->irqs[0]);
synchronize_irq(spu->irqs[1]);
synchronize_irq(spu->irqs[2]);
}
static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 4:
* Restore, Step 25.
* Set a software watchdog timer, which specifies the
* maximum allowable time for a context save sequence.
*
* For present, this implementation will not set a global
* watchdog timer, as virtualization & variable system load
* may cause unpredictable execution times.
*/
}
static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 5:
* Restore, Step 3:
* Inhibit user-space access (if provided) to this
* SPU by unmapping the virtual pages assigned to
* the SPU memory-mapped I/O (MMIO) for problem
* state. TBD.
*/
}
static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 7:
* Restore, Step 5:
* Set a software context switch pending flag.
* Done above in Step 3 - disable_interrupts().
*/
}
static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 8:
* Suspend DMA and save MFC_CNTL.
*/
switch (in_be64(&priv2->mfc_control_RW) &
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
case MFC_CNTL_SUSPEND_IN_PROGRESS:
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
MFC_CNTL_SUSPEND_COMPLETE);
/* fall through */
case MFC_CNTL_SUSPEND_COMPLETE:
if (csa)
csa->priv2.mfc_control_RW =
in_be64(&priv2->mfc_control_RW) |
MFC_CNTL_SUSPEND_DMA_QUEUE;
break;
case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
MFC_CNTL_SUSPEND_COMPLETE);
if (csa)
csa->priv2.mfc_control_RW =
in_be64(&priv2->mfc_control_RW) &
~MFC_CNTL_SUSPEND_DMA_QUEUE &
~MFC_CNTL_SUSPEND_MASK;
break;
}
}
static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 9:
* Save SPU_Runcntl in the CSA. This value contains
* the "Application Desired State".
*/
csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
}
static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 10:
* Save MFC_SR1 in the CSA.
*/
csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
}
static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 11:
* Read SPU_Status[R], and save to CSA.
*/
if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
} else {
u32 stopped;
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
stopped =
SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
if ((in_be32(&prob->spu_status_R) & stopped) == 0)
csa->prob.spu_status_R = SPU_STATUS_RUNNING;
else
csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
}
}
static inline void save_mfc_stopped_status(struct spu_state *csa,
struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
MFC_CNTL_DMA_QUEUES_EMPTY;
/* Save, Step 12:
* Read MFC_CNTL[Ds]. Update saved copy of
* CSA.MFC_CNTL[Ds].
*
* update: do the same with MFC_CNTL[Q].
*/
csa->priv2.mfc_control_RW &= ~mask;
csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;
}
static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 13:
* Write MFC_CNTL[Dh] set to a '1' to halt
* the decrementer.
*/
out_be64(&priv2->mfc_control_RW,
MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
eieio();
}
static inline void save_timebase(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 14:
* Read PPE Timebase High and Timebase low registers
* and save in CSA. TBD.
*/
csa->suspend_time = get_cycles();
}
static inline void remove_other_spu_access(struct spu_state *csa,
struct spu *spu)
{
/* Save, Step 15:
* Remove other SPU access to this SPU by unmapping
* this SPU's pages from their address space. TBD.
*/
}
static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 16:
* Restore, Step 11.
* Write SPU_MSSync register. Poll SPU_MSSync[P]
* for a value of 0.
*/
out_be64(&prob->spc_mssync_RW, 1UL);
POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
}
static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 17:
* Restore, Step 12.
* Restore, Step 48.
* Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
* Then issue a PPE sync instruction.
*/
spu_tlb_invalidate(spu);
mb();
}
static inline void handle_pending_interrupts(struct spu_state *csa,
struct spu *spu)
{
/* Save, Step 18:
* Handle any pending interrupts from this SPU
* here. This is OS or hypervisor specific. One
* option is to re-enable interrupts to handle any
* pending interrupts, with the interrupt handlers
* recognizing the software Context Switch Pending
* flag, to ensure the SPU execution or MFC command
* queue is not restarted. TBD.
*/
}
static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
int i;
/* Save, Step 19:
* If MFC_Cntl[Se]=0 then save
* MFC command queues.
*/
if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
for (i = 0; i < 8; i++) {
csa->priv2.puq[i].mfc_cq_data0_RW =
in_be64(&priv2->puq[i].mfc_cq_data0_RW);
csa->priv2.puq[i].mfc_cq_data1_RW =
in_be64(&priv2->puq[i].mfc_cq_data1_RW);
csa->priv2.puq[i].mfc_cq_data2_RW =
in_be64(&priv2->puq[i].mfc_cq_data2_RW);
csa->priv2.puq[i].mfc_cq_data3_RW =
in_be64(&priv2->puq[i].mfc_cq_data3_RW);
}
for (i = 0; i < 16; i++) {
csa->priv2.spuq[i].mfc_cq_data0_RW =
in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
csa->priv2.spuq[i].mfc_cq_data1_RW =
in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
csa->priv2.spuq[i].mfc_cq_data2_RW =
in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
csa->priv2.spuq[i].mfc_cq_data3_RW =
in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
}
}
}
static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 20:
* Save the PPU_QueryMask register
* in the CSA.
*/
csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
}
static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 21:
* Save the PPU_QueryType register
* in the CSA.
*/
csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
}
static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save the Prxy_TagStatus register in the CSA.
*
* It is unnecessary to restore dma_tagstatus_R, however,
* dma_tagstatus_R in the CSA is accessed via backing_ops, so
* we must save it.
*/
csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
}
static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 22:
* Save the MFC_CSR_TSQ register
* in the LSCSA.
*/
csa->priv2.spu_tag_status_query_RW =
in_be64(&priv2->spu_tag_status_query_RW);
}
static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 23:
* Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
* registers in the CSA.
*/
csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
}
static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 24:
* Save the MFC_CSR_ATO register in
* the CSA.
*/
csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
}
static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 25:
* Save the MFC_TCLASS_ID register in
* the CSA.
*/
csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
}
static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 26:
* Restore, Step 23.
* Write the MFC_TCLASS_ID register with
* the value 0x10000000.
*/
spu_mfc_tclass_id_set(spu, 0x10000000);
eieio();
}
static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 27:
* Restore, Step 14.
* Write MFC_CNTL[Pc]=1 (purge queue).
*/
out_be64(&priv2->mfc_control_RW,
MFC_CNTL_PURGE_DMA_REQUEST |
MFC_CNTL_SUSPEND_MASK);
eieio();
}
static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 28:
* Poll MFC_CNTL[Ps] until value '11' is read
* (purge complete).
*/
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
MFC_CNTL_PURGE_DMA_COMPLETE);
}
static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 30:
* Restore, Step 18:
* Write MFC_SR1 with MFC_SR1[D=0,S=1] and
* MFC_SR1[TL,R,Pr,T] set correctly for the
* OS specific environment.
*
* Implementation note: The SPU-side code
* for save/restore is privileged, so the
* MFC_SR1[Pr] bit is not set.
*
*/
spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
MFC_STATE1_RELOCATE_MASK |
MFC_STATE1_BUS_TLBIE_MASK));
}
static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 31:
* Save SPU_NPC in the CSA.
*/
csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
}
static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 32:
* Save SPU_PrivCntl in the CSA.
*/
csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
}
static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 33:
* Restore, Step 16:
* Write SPU_PrivCntl[S,Le,A] fields reset to 0.
*/
out_be64(&priv2->spu_privcntl_RW, 0UL);
eieio();
}
static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 34:
* Save SPU_LSLR in the CSA.
*/
csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
}
static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 35:
* Restore, Step 17.
* Reset SPU_LSLR.
*/
out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
eieio();
}
static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 36:
* Save SPU_Cfg in the CSA.
*/
csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
}
static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 37:
* Save PM_Trace_Tag_Wait_Mask in the CSA.
* Not performed by this implementation.
*/
}
static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 38:
* Save RA_GROUP_ID register and the
* RA_ENABLE reigster in the CSA.
*/
csa->priv1.resource_allocation_groupID_RW =
spu_resource_allocation_groupID_get(spu);
csa->priv1.resource_allocation_enable_RW =
spu_resource_allocation_enable_get(spu);
}
static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 39:
* Save MB_Stat register in the CSA.
*/
csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
}
static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 40:
* Save the PPU_MB register in the CSA.
*/
csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
}
static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 41:
* Save the PPUINT_MB register in the CSA.
*/
csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
}
static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
int i;
/* Save, Step 42:
*/
/* Save CH 1, without channel count */
out_be64(&priv2->spu_chnlcntptr_RW, 1);
csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
/* Save the following CH: [0,3,4,24,25,27] */
for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
out_be64(&priv2->spu_chnldata_RW, 0UL);
out_be64(&priv2->spu_chnlcnt_RW, 0UL);
eieio();
}
}
static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
int i;
/* Save, Step 43:
* Save SPU Read Mailbox Channel.
*/
out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
eieio();
csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
for (i = 0; i < 4; i++) {
csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
}
out_be64(&priv2->spu_chnlcnt_RW, 0UL);
eieio();
}
static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 44:
* Save MFC_CMD Channel.
*/
out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
eieio();
csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
eieio();
}
static inline void reset_ch(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
u64 idx;
int i;
/* Save, Step 45:
* Reset the following CH: [21, 23, 28, 30]
*/
for (i = 0; i < 4; i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
eieio();
}
}
static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 46:
* Restore, Step 25.
* Write MFC_CNTL[Sc]=0 (resume queue processing).
*/
out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
}
static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,
unsigned int *code, int code_size)
{
/* Save, Step 47:
* Restore, Step 30.
* If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
* register, then initialize SLB_VSID and SLB_ESID
* to provide access to SPU context save code and
* LSCSA.
*
* This implementation places both the context
* switch code and LSCSA in kernel address space.
*
* Further this implementation assumes that the
* MFC_SR1[R]=1 (in other words, assume that
* translation is desired by OS environment).
*/
spu_invalidate_slbs(spu);
spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);
}
static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
{
/* Save, Step 48:
* Restore, Step 23.
* Change the software context switch pending flag
* to context switch active. This implementation does
* not uses a switch active flag.
*
* Now that we have saved the mfc in the csa, we can add in the
* restart command if an exception occurred.
*/
if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))
csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
mb();
}
static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
{
unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
CLASS1_ENABLE_STORAGE_FAULT_INTR;
/* Save, Step 49:
* Restore, Step 22:
* Reset and then enable interrupts, as
* needed by OS.
*
* This implementation enables only class1
* (translation) interrupts.
*/
spin_lock_irq(&spu->register_lock);
spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
spu_int_mask_set(spu, 0, 0ul);
spu_int_mask_set(spu, 1, class1_mask);
spu_int_mask_set(spu, 2, 0ul);
spin_unlock_irq(&spu->register_lock);
}
static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
unsigned int ls_offset, unsigned int size,
unsigned int tag, unsigned int rclass,
unsigned int cmd)
{
struct spu_problem __iomem *prob = spu->problem;
union mfc_tag_size_class_cmd command;
unsigned int transfer_size;
volatile unsigned int status = 0x0;
while (size > 0) {
transfer_size =
(size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
command.u.mfc_size = transfer_size;
command.u.mfc_tag = tag;
command.u.mfc_rclassid = rclass;
command.u.mfc_cmd = cmd;
do {
out_be32(&prob->mfc_lsa_W, ls_offset);
out_be64(&prob->mfc_ea_W, ea);
out_be64(&prob->mfc_union_W.all64, command.all64);
status =
in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
if (unlikely(status & 0x2)) {
cpu_relax();
}
} while (status & 0x3);
size -= transfer_size;
ea += transfer_size;
ls_offset += transfer_size;
}
return 0;
}
static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
{
unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
unsigned int ls_offset = 0x0;
unsigned int size = 16384;
unsigned int tag = 0;
unsigned int rclass = 0;
unsigned int cmd = MFC_PUT_CMD;
/* Save, Step 50:
* Issue a DMA command to copy the first 16K bytes
* of local storage to the CSA.
*/
send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
}
static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 51:
* Restore, Step 31.
* Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
* point address of context save code in local
* storage.
*
* This implementation uses SPU-side save/restore
* programs with entry points at LSA of 0.
*/
out_be32(&prob->spu_npc_RW, 0);
eieio();
}
static inline void set_signot1(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
union {
u64 ull;
u32 ui[2];
} addr64;
/* Save, Step 52:
* Restore, Step 32:
* Write SPU_Sig_Notify_1 register with upper 32-bits
* of the CSA.LSCSA effective address.
*/
addr64.ull = (u64) csa->lscsa;
out_be32(&prob->signal_notify1, addr64.ui[0]);
eieio();
}
static inline void set_signot2(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
union {
u64 ull;
u32 ui[2];
} addr64;
/* Save, Step 53:
* Restore, Step 33:
* Write SPU_Sig_Notify_2 register with lower 32-bits
* of the CSA.LSCSA effective address.
*/
addr64.ull = (u64) csa->lscsa;
out_be32(&prob->signal_notify2, addr64.ui[1]);
eieio();
}
static inline void send_save_code(struct spu_state *csa, struct spu *spu)
{
unsigned long addr = (unsigned long)&spu_save_code[0];
unsigned int ls_offset = 0x0;
unsigned int size = sizeof(spu_save_code);
unsigned int tag = 0;
unsigned int rclass = 0;
unsigned int cmd = MFC_GETFS_CMD;
/* Save, Step 54:
* Issue a DMA command to copy context save code
* to local storage and start SPU.
*/
send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
}
static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Save, Step 55:
* Restore, Step 38.
* Write PPU_QueryMask=1 (enable Tag Group 0)
* and issue eieio instruction.
*/
out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
eieio();
}
static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
u32 mask = MFC_TAGID_TO_TAGMASK(0);
unsigned long flags;
/* Save, Step 56:
* Restore, Step 39.
* Restore, Step 39.
* Restore, Step 46.
* Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
* or write PPU_QueryType[TS]=01 and wait for Tag Group
* Complete Interrupt. Write INT_Stat_Class0 or
* INT_Stat_Class2 with value of 'handled'.
*/
POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
local_irq_save(flags);
spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
local_irq_restore(flags);
}
static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
unsigned long flags;
/* Save, Step 57:
* Restore, Step 40.
* Poll until SPU_Status[R]=0 or wait for SPU Class 0
* or SPU Class 2 interrupt. Write INT_Stat_class0
* or INT_Stat_class2 with value of handled.
*/
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
local_irq_save(flags);
spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
local_irq_restore(flags);
}
static inline int check_save_status(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
u32 complete;
/* Save, Step 54:
* If SPU_Status[P]=1 and SPU_Status[SC] = "success",
* context save succeeded, otherwise context save
* failed.
*/
complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
SPU_STATUS_STOPPED_BY_STOP);
return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
}
static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 4:
* If required, notify the "using application" that
* the SPU task has been terminated. TBD.
*/
}
static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,
struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 7:
* Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend
* the queue and halt the decrementer.
*/
out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
MFC_CNTL_DECREMENTER_HALTED);
eieio();
}
static inline void wait_suspend_mfc_complete(struct spu_state *csa,
struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 8:
* Restore, Step 47.
* Poll MFC_CNTL[Ss] until 11 is returned.
*/
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
MFC_CNTL_SUSPEND_COMPLETE);
}
static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Restore, Step 9:
* If SPU_Status[R]=1, stop SPU execution
* and wait for stop to complete.
*
* Returns 1 if SPU_Status[R]=1 on entry.
* 0 otherwise
*/
if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
if (in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_EXIT_STATUS) {
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
if ((in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_LOAD_STATUS)
|| (in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_STATE)) {
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
out_be32(&prob->spu_runcntl_RW, 0x2);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
if (in_be32(&prob->spu_status_R) &
SPU_STATUS_WAITING_FOR_CHANNEL) {
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
return 1;
}
return 0;
}
static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Restore, Step 10:
* If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
* release SPU from isolate state.
*/
if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
if (in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_EXIT_STATUS) {
spu_mfc_sr1_set(spu,
MFC_STATE1_MASTER_RUN_CONTROL_MASK);
eieio();
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
if ((in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_LOAD_STATUS)
|| (in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_STATE)) {
spu_mfc_sr1_set(spu,
MFC_STATE1_MASTER_RUN_CONTROL_MASK);
eieio();
out_be32(&prob->spu_runcntl_RW, 0x2);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
}
}
static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
u64 idx;
int i;
/* Restore, Step 20:
*/
/* Reset CH 1 */
out_be64(&priv2->spu_chnlcntptr_RW, 1);
out_be64(&priv2->spu_chnldata_RW, 0UL);
/* Reset the following CH: [0,3,4,24,25,27] */
for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
out_be64(&priv2->spu_chnldata_RW, 0UL);
out_be64(&priv2->spu_chnlcnt_RW, 0UL);
eieio();
}
}
static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
u64 idx;
int i;
/* Restore, Step 21:
* Reset the following CH: [21, 23, 28, 29, 30]
*/
for (i = 0; i < 5; i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
eieio();
}
}
static inline void setup_spu_status_part1(struct spu_state *csa,
struct spu *spu)
{
u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
u32 status_I = SPU_STATUS_INVALID_INSTR;
u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
u32 status_S = SPU_STATUS_SINGLE_STEP;
u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
u32 status_code;
/* Restore, Step 27:
* If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
* instruction sequence to the end of the SPU based restore
* code (after the "context restored" stop and signal) to
* restore the correct SPU status.
*
* NOTE: Rather than modifying the SPU executable, we
* instead add a new 'stopped_status' field to the
* LSCSA. The SPU-side restore reads this field and
* takes the appropriate action when exiting.
*/
status_code =
(csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
/* SPU_Status[P,I]=1 - Illegal Instruction followed
* by Stop and Signal instruction, followed by 'br -4'.
*
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
csa->lscsa->stopped_status.slot[1] = status_code;
} else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
/* SPU_Status[P,H]=1 - Halt Conditional, followed
* by Stop and Signal instruction, followed by
* 'br -4'.
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
csa->lscsa->stopped_status.slot[1] = status_code;
} else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
/* SPU_Status[S,P]=1 - Stop and Signal instruction
* followed by 'br -4'.
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
csa->lscsa->stopped_status.slot[1] = status_code;
} else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
/* SPU_Status[S,I]=1 - Illegal instruction followed
* by 'br -4'.
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
csa->lscsa->stopped_status.slot[1] = status_code;
} else if ((csa->prob.spu_status_R & status_P) == status_P) {
/* SPU_Status[P]=1 - Stop and Signal instruction
* followed by 'br -4'.
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
csa->lscsa->stopped_status.slot[1] = status_code;
} else if ((csa->prob.spu_status_R & status_H) == status_H) {
/* SPU_Status[H]=1 - Halt Conditional, followed
* by 'br -4'.
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
} else if ((csa->prob.spu_status_R & status_S) == status_S) {
/* SPU_Status[S]=1 - Two nop instructions.
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
} else if ((csa->prob.spu_status_R & status_I) == status_I) {
/* SPU_Status[I]=1 - Illegal instruction followed
* by 'br -4'.
*/
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
}
}
static inline void setup_spu_status_part2(struct spu_state *csa,
struct spu *spu)
{
u32 mask;
/* Restore, Step 28:
* If the CSA.SPU_Status[I,S,H,P,R]=0 then
* add a 'br *' instruction to the end of
* the SPU based restore code.
*
* NOTE: Rather than modifying the SPU executable, we
* instead add a new 'stopped_status' field to the
* LSCSA. The SPU-side restore reads this field and
* takes the appropriate action when exiting.
*/
mask = SPU_STATUS_INVALID_INSTR |
SPU_STATUS_SINGLE_STEP |
SPU_STATUS_STOPPED_BY_HALT |
SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
if (!(csa->prob.spu_status_R & mask)) {
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
}
}
static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 29:
* Restore RA_GROUP_ID register and the
* RA_ENABLE reigster from the CSA.
*/
spu_resource_allocation_groupID_set(spu,
csa->priv1.resource_allocation_groupID_RW);
spu_resource_allocation_enable_set(spu,
csa->priv1.resource_allocation_enable_RW);
}
static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
{
unsigned long addr = (unsigned long)&spu_restore_code[0];
unsigned int ls_offset = 0x0;
unsigned int size = sizeof(spu_restore_code);
unsigned int tag = 0;
unsigned int rclass = 0;
unsigned int cmd = MFC_GETFS_CMD;
/* Restore, Step 37:
* Issue MFC DMA command to copy context
* restore code to local storage.
*/
send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
}
static inline void setup_decr(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 34:
* If CSA.MFC_CNTL[Ds]=1 (decrementer was
* running) then adjust decrementer, set
* decrementer running status in LSCSA,
* and set decrementer "wrapped" status
* in LSCSA.
*/
if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
cycles_t resume_time = get_cycles();
cycles_t delta_time = resume_time - csa->suspend_time;
csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
if (csa->lscsa->decr.slot[0] < delta_time) {
csa->lscsa->decr_status.slot[0] |=
SPU_DECR_STATUS_WRAPPED;
}
csa->lscsa->decr.slot[0] -= delta_time;
} else {
csa->lscsa->decr_status.slot[0] = 0;
}
}
static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 35:
* Copy the CSA.PU_MB data into the LSCSA.
*/
csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
}
static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 36:
* Copy the CSA.PUINT_MB data into the LSCSA.
*/
csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
}
static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
u32 complete;
/* Restore, Step 40:
* If SPU_Status[P]=1 and SPU_Status[SC] = "success",
* context restore succeeded, otherwise context restore
* failed.
*/
complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
SPU_STATUS_STOPPED_BY_STOP);
return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
}
static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 41:
* Restore SPU_PrivCntl from the CSA.
*/
out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
eieio();
}
static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
u32 mask;
/* Restore, Step 42:
* If any CSA.SPU_Status[I,S,H,P]=1, then
* restore the error or single step state.
*/
mask = SPU_STATUS_INVALID_INSTR |
SPU_STATUS_SINGLE_STEP |
SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
if (csa->prob.spu_status_R & mask) {
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
}
static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
u32 mask;
/* Restore, Step 43:
* If all CSA.SPU_Status[I,S,H,P,R]=0 then write
* SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
* then write '00' to SPU_RunCntl[R0R1] and wait
* for SPU_Status[R]=0.
*/
mask = SPU_STATUS_INVALID_INSTR |
SPU_STATUS_SINGLE_STEP |
SPU_STATUS_STOPPED_BY_HALT |
SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
if (!(csa->prob.spu_status_R & mask)) {
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
eieio();
POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
eieio();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
}
static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
{
unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
unsigned int ls_offset = 0x0;
unsigned int size = 16384;
unsigned int tag = 0;
unsigned int rclass = 0;
unsigned int cmd = MFC_GET_CMD;
/* Restore, Step 44:
* Issue a DMA command to restore the first
* 16kb of local storage from CSA.
*/
send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
}
static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 47.
* Write MFC_Cntl[Sc,Sm]='1','0' to suspend
* the queue.
*/
out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
eieio();
}
static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 49:
* Write INT_MASK_class0 with value of 0.
* Write INT_MASK_class1 with value of 0.
* Write INT_MASK_class2 with value of 0.
* Write INT_STAT_class0 with value of -1.
* Write INT_STAT_class1 with value of -1.
* Write INT_STAT_class2 with value of -1.
*/
spin_lock_irq(&spu->register_lock);
spu_int_mask_set(spu, 0, 0ul);
spu_int_mask_set(spu, 1, 0ul);
spu_int_mask_set(spu, 2, 0ul);
spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
spin_unlock_irq(&spu->register_lock);
}
static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
int i;
/* Restore, Step 50:
* If MFC_Cntl[Se]!=0 then restore
* MFC command queues.
*/
if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
for (i = 0; i < 8; i++) {
out_be64(&priv2->puq[i].mfc_cq_data0_RW,
csa->priv2.puq[i].mfc_cq_data0_RW);
out_be64(&priv2->puq[i].mfc_cq_data1_RW,
csa->priv2.puq[i].mfc_cq_data1_RW);
out_be64(&priv2->puq[i].mfc_cq_data2_RW,
csa->priv2.puq[i].mfc_cq_data2_RW);
out_be64(&priv2->puq[i].mfc_cq_data3_RW,
csa->priv2.puq[i].mfc_cq_data3_RW);
}
for (i = 0; i < 16; i++) {
out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
csa->priv2.spuq[i].mfc_cq_data0_RW);
out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
csa->priv2.spuq[i].mfc_cq_data1_RW);
out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
csa->priv2.spuq[i].mfc_cq_data2_RW);
out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
csa->priv2.spuq[i].mfc_cq_data3_RW);
}
}
eieio();
}
static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Restore, Step 51:
* Restore the PPU_QueryMask register from CSA.
*/
out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
eieio();
}
static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Restore, Step 52:
* Restore the PPU_QueryType register from CSA.
*/
out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
eieio();
}
static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 53:
* Restore the MFC_CSR_TSQ register from CSA.
*/
out_be64(&priv2->spu_tag_status_query_RW,
csa->priv2.spu_tag_status_query_RW);
eieio();
}
static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 54:
* Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
* registers from CSA.
*/
out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
eieio();
}
static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 55:
* Restore the MFC_CSR_ATO register from CSA.
*/
out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
}
static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 56:
* Restore the MFC_TCLASS_ID register from CSA.
*/
spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
eieio();
}
static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
{
u64 ch0_cnt, ch0_data;
u64 ch1_data;
/* Restore, Step 57:
* Set the Lock Line Reservation Lost Event by:
* 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
* 2. If CSA.SPU_Channel_0_Count=0 and
* CSA.SPU_Wr_Event_Mask[Lr]=1 and
* CSA.SPU_Event_Status[Lr]=0 then set
* CSA.SPU_Event_Status_Count=1.
*/
ch0_cnt = csa->spu_chnlcnt_RW[0];
ch0_data = csa->spu_chnldata_RW[0];
ch1_data = csa->spu_chnldata_RW[1];
csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
(ch1_data & MFC_LLR_LOST_EVENT)) {
csa->spu_chnlcnt_RW[0] = 1;
}
}
static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 58:
* If the status of the CSA software decrementer
* "wrapped" flag is set, OR in a '1' to
* CSA.SPU_Event_Status[Tm].
*/
if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))
return;
if ((csa->spu_chnlcnt_RW[0] == 0) &&
(csa->spu_chnldata_RW[1] & 0x20) &&
!(csa->spu_chnldata_RW[0] & 0x20))
csa->spu_chnlcnt_RW[0] = 1;
csa->spu_chnldata_RW[0] |= 0x20;
}
static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
int i;
/* Restore, Step 59:
* Restore the following CH: [0,3,4,24,25,27]
*/
for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
eieio();
}
}
static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 ch_indices[3] = { 9UL, 21UL, 23UL };
u64 ch_counts[3] = { 1UL, 16UL, 1UL };
u64 idx;
int i;
/* Restore, Step 60:
* Restore the following CH: [9,21,23].
*/
ch_counts[0] = 1UL;
ch_counts[1] = csa->spu_chnlcnt_RW[21];
ch_counts[2] = 1UL;
for (i = 0; i < 3; i++) {
idx = ch_indices[i];
out_be64(&priv2->spu_chnlcntptr_RW, idx);
eieio();
out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
eieio();
}
}
static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 61:
* Restore the SPU_LSLR register from CSA.
*/
out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
eieio();
}
static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 62:
* Restore the SPU_Cfg register from CSA.
*/
out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
eieio();
}
static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 63:
* Restore PM_Trace_Tag_Wait_Mask from CSA.
* Not performed by this implementation.
*/
}
static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Restore, Step 64:
* Restore SPU_NPC from CSA.
*/
out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
eieio();
}
static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
int i;
/* Restore, Step 65:
* Restore MFC_RdSPU_MB from CSA.
*/
out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
eieio();
out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
for (i = 0; i < 4; i++) {
out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
}
eieio();
}
static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
u32 dummy = 0;
/* Restore, Step 66:
* If CSA.MB_Stat[P]=0 (mailbox empty) then
* read from the PPU_MB register.
*/
if ((csa->prob.mb_stat_R & 0xFF) == 0) {
dummy = in_be32(&prob->pu_mb_R);
eieio();
}
}
static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 dummy = 0UL;
/* Restore, Step 66:
* If CSA.MB_Stat[I]=0 (mailbox empty) then
* read from the PPUINT_MB register.
*/
if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
dummy = in_be64(&priv2->puint_mb_R);
eieio();
spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
eieio();
}
}
static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 69:
* Restore the MFC_SR1 register from CSA.
*/
spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
eieio();
}
static inline void set_int_route(struct spu_state *csa, struct spu *spu)
{
struct spu_context *ctx = spu->ctx;
spu_cpu_affinity_set(spu, ctx->last_ran);
}
static inline void restore_other_spu_access(struct spu_state *csa,
struct spu *spu)
{
/* Restore, Step 70:
* Restore other SPU mappings to this SPU. TBD.
*/
}
static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
/* Restore, Step 71:
* If CSA.SPU_Status[R]=1 then write
* SPU_RunCntl[R0R1]='01'.
*/
if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
eieio();
}
}
static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Restore, Step 72:
* Restore the MFC_CNTL register for the CSA.
*/
out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
eieio();
/*
* The queue is put back into the same state that was evident prior to
* the context switch. The suspend flag is added to the saved state in
* the csa, if the operational state was suspending or suspended. In
* this case, the code that suspended the mfc is responsible for
* continuing it. Note that SPE faults do not change the operational
* state of the spu.
*/
}
static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 73:
* Enable user-space access (if provided) to this
* SPU by mapping the virtual pages assigned to
* the SPU memory-mapped I/O (MMIO) for problem
* state. TBD.
*/
}
static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 74:
* Reset the "context switch active" flag.
* Not performed by this implementation.
*/
}
static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
{
/* Restore, Step 75:
* Re-enable SPU interrupts.
*/
spin_lock_irq(&spu->register_lock);
spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
spin_unlock_irq(&spu->register_lock);
}
static int quiece_spu(struct spu_state *prev, struct spu *spu)
{
/*
* Combined steps 2-18 of SPU context save sequence, which
* quiesce the SPU state (disable SPU execution, MFC command
* queues, decrementer, SPU interrupts, etc.).
*
* Returns 0 on success.
* 2 if failed step 2.
* 6 if failed step 6.
*/
if (check_spu_isolate(prev, spu)) { /* Step 2. */
return 2;
}
disable_interrupts(prev, spu); /* Step 3. */
set_watchdog_timer(prev, spu); /* Step 4. */
inhibit_user_access(prev, spu); /* Step 5. */
if (check_spu_isolate(prev, spu)) { /* Step 6. */
return 6;
}
set_switch_pending(prev, spu); /* Step 7. */
save_mfc_cntl(prev, spu); /* Step 8. */
save_spu_runcntl(prev, spu); /* Step 9. */
save_mfc_sr1(prev, spu); /* Step 10. */
save_spu_status(prev, spu); /* Step 11. */
save_mfc_stopped_status(prev, spu); /* Step 12. */
halt_mfc_decr(prev, spu); /* Step 13. */
save_timebase(prev, spu); /* Step 14. */
remove_other_spu_access(prev, spu); /* Step 15. */
do_mfc_mssync(prev, spu); /* Step 16. */
issue_mfc_tlbie(prev, spu); /* Step 17. */
handle_pending_interrupts(prev, spu); /* Step 18. */
return 0;
}
static void save_csa(struct spu_state *prev, struct spu *spu)
{
/*
* Combine steps 19-44 of SPU context save sequence, which
* save regions of the privileged & problem state areas.
*/
save_mfc_queues(prev, spu); /* Step 19. */
save_ppu_querymask(prev, spu); /* Step 20. */
save_ppu_querytype(prev, spu); /* Step 21. */
save_ppu_tagstatus(prev, spu); /* NEW. */
save_mfc_csr_tsq(prev, spu); /* Step 22. */
save_mfc_csr_cmd(prev, spu); /* Step 23. */
save_mfc_csr_ato(prev, spu); /* Step 24. */
save_mfc_tclass_id(prev, spu); /* Step 25. */
set_mfc_tclass_id(prev, spu); /* Step 26. */
save_mfc_cmd(prev, spu); /* Step 26a - moved from 44. */
purge_mfc_queue(prev, spu); /* Step 27. */
wait_purge_complete(prev, spu); /* Step 28. */
setup_mfc_sr1(prev, spu); /* Step 30. */
save_spu_npc(prev, spu); /* Step 31. */
save_spu_privcntl(prev, spu); /* Step 32. */
reset_spu_privcntl(prev, spu); /* Step 33. */
save_spu_lslr(prev, spu); /* Step 34. */
reset_spu_lslr(prev, spu); /* Step 35. */
save_spu_cfg(prev, spu); /* Step 36. */
save_pm_trace(prev, spu); /* Step 37. */
save_mfc_rag(prev, spu); /* Step 38. */
save_ppu_mb_stat(prev, spu); /* Step 39. */
save_ppu_mb(prev, spu); /* Step 40. */
save_ppuint_mb(prev, spu); /* Step 41. */
save_ch_part1(prev, spu); /* Step 42. */
save_spu_mb(prev, spu); /* Step 43. */
reset_ch(prev, spu); /* Step 45. */
}
static void save_lscsa(struct spu_state *prev, struct spu *spu)
{
/*
* Perform steps 46-57 of SPU context save sequence,
* which save regions of the local store and register
* file.
*/
resume_mfc_queue(prev, spu); /* Step 46. */
/* Step 47. */
setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code));
set_switch_active(prev, spu); /* Step 48. */
enable_interrupts(prev, spu); /* Step 49. */
save_ls_16kb(prev, spu); /* Step 50. */
set_spu_npc(prev, spu); /* Step 51. */
set_signot1(prev, spu); /* Step 52. */
set_signot2(prev, spu); /* Step 53. */
send_save_code(prev, spu); /* Step 54. */
set_ppu_querymask(prev, spu); /* Step 55. */
wait_tag_complete(prev, spu); /* Step 56. */
wait_spu_stopped(prev, spu); /* Step 57. */
}
static void force_spu_isolate_exit(struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Stop SPE execution and wait for completion. */
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
iobarrier_rw();
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
/* Restart SPE master runcntl. */
spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
iobarrier_w();
/* Initiate isolate exit request and wait for completion. */
out_be64(&priv2->spu_privcntl_RW, 4LL);
iobarrier_w();
out_be32(&prob->spu_runcntl_RW, 2);
iobarrier_rw();
POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
& SPU_STATUS_STOPPED_BY_STOP));
/* Reset load request to normal. */
out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
iobarrier_w();
}
/**
* stop_spu_isolate
* Check SPU run-control state and force isolated
* exit function as necessary.
*/
static void stop_spu_isolate(struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
/* The SPU is in isolated state; the only way
* to get it out is to perform an isolated
* exit (clean) operation.
*/
force_spu_isolate_exit(spu);
}
}
static void harvest(struct spu_state *prev, struct spu *spu)
{
/*
* Perform steps 2-25 of SPU context restore sequence,
* which resets an SPU either after a failed save, or
* when using SPU for first time.
*/
disable_interrupts(prev, spu); /* Step 2. */
inhibit_user_access(prev, spu); /* Step 3. */
terminate_spu_app(prev, spu); /* Step 4. */
set_switch_pending(prev, spu); /* Step 5. */
stop_spu_isolate(spu); /* NEW. */
remove_other_spu_access(prev, spu); /* Step 6. */
suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */
wait_suspend_mfc_complete(prev, spu); /* Step 8. */
if (!suspend_spe(prev, spu)) /* Step 9. */
clear_spu_status(prev, spu); /* Step 10. */
do_mfc_mssync(prev, spu); /* Step 11. */
issue_mfc_tlbie(prev, spu); /* Step 12. */
handle_pending_interrupts(prev, spu); /* Step 13. */
purge_mfc_queue(prev, spu); /* Step 14. */
wait_purge_complete(prev, spu); /* Step 15. */
reset_spu_privcntl(prev, spu); /* Step 16. */
reset_spu_lslr(prev, spu); /* Step 17. */
setup_mfc_sr1(prev, spu); /* Step 18. */
spu_invalidate_slbs(spu); /* Step 19. */
reset_ch_part1(prev, spu); /* Step 20. */
reset_ch_part2(prev, spu); /* Step 21. */
enable_interrupts(prev, spu); /* Step 22. */
set_switch_active(prev, spu); /* Step 23. */
set_mfc_tclass_id(prev, spu); /* Step 24. */
resume_mfc_queue(prev, spu); /* Step 25. */
}
static void restore_lscsa(struct spu_state *next, struct spu *spu)
{
/*
* Perform steps 26-40 of SPU context restore sequence,
* which restores regions of the local store and register
* file.
*/
set_watchdog_timer(next, spu); /* Step 26. */
setup_spu_status_part1(next, spu); /* Step 27. */
setup_spu_status_part2(next, spu); /* Step 28. */
restore_mfc_rag(next, spu); /* Step 29. */
/* Step 30. */
setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code));
set_spu_npc(next, spu); /* Step 31. */
set_signot1(next, spu); /* Step 32. */
set_signot2(next, spu); /* Step 33. */
setup_decr(next, spu); /* Step 34. */
setup_ppu_mb(next, spu); /* Step 35. */
setup_ppuint_mb(next, spu); /* Step 36. */
send_restore_code(next, spu); /* Step 37. */
set_ppu_querymask(next, spu); /* Step 38. */
wait_tag_complete(next, spu); /* Step 39. */
wait_spu_stopped(next, spu); /* Step 40. */
}
static void restore_csa(struct spu_state *next, struct spu *spu)
{
/*
* Combine steps 41-76 of SPU context restore sequence, which
* restore regions of the privileged & problem state areas.
*/
restore_spu_privcntl(next, spu); /* Step 41. */
restore_status_part1(next, spu); /* Step 42. */
restore_status_part2(next, spu); /* Step 43. */
restore_ls_16kb(next, spu); /* Step 44. */
wait_tag_complete(next, spu); /* Step 45. */
suspend_mfc(next, spu); /* Step 46. */
wait_suspend_mfc_complete(next, spu); /* Step 47. */
issue_mfc_tlbie(next, spu); /* Step 48. */
clear_interrupts(next, spu); /* Step 49. */
restore_mfc_queues(next, spu); /* Step 50. */
restore_ppu_querymask(next, spu); /* Step 51. */
restore_ppu_querytype(next, spu); /* Step 52. */
restore_mfc_csr_tsq(next, spu); /* Step 53. */
restore_mfc_csr_cmd(next, spu); /* Step 54. */
restore_mfc_csr_ato(next, spu); /* Step 55. */
restore_mfc_tclass_id(next, spu); /* Step 56. */
set_llr_event(next, spu); /* Step 57. */
restore_decr_wrapped(next, spu); /* Step 58. */
restore_ch_part1(next, spu); /* Step 59. */
restore_ch_part2(next, spu); /* Step 60. */
restore_spu_lslr(next, spu); /* Step 61. */
restore_spu_cfg(next, spu); /* Step 62. */
restore_pm_trace(next, spu); /* Step 63. */
restore_spu_npc(next, spu); /* Step 64. */
restore_spu_mb(next, spu); /* Step 65. */
check_ppu_mb_stat(next, spu); /* Step 66. */
check_ppuint_mb_stat(next, spu); /* Step 67. */
spu_invalidate_slbs(spu); /* Modified Step 68. */
restore_mfc_sr1(next, spu); /* Step 69. */
set_int_route(next, spu); /* NEW */
restore_other_spu_access(next, spu); /* Step 70. */
restore_spu_runcntl(next, spu); /* Step 71. */
restore_mfc_cntl(next, spu); /* Step 72. */
enable_user_access(next, spu); /* Step 73. */
reset_switch_active(next, spu); /* Step 74. */
reenable_interrupts(next, spu); /* Step 75. */
}
static int __do_spu_save(struct spu_state *prev, struct spu *spu)
{
int rc;
/*
* SPU context save can be broken into three phases:
*
* (a) quiesce [steps 2-16].
* (b) save of CSA, performed by PPE [steps 17-42]
* (c) save of LSCSA, mostly performed by SPU [steps 43-52].
*
* Returns 0 on success.
* 2,6 if failed to quiece SPU
* 53 if SPU-side of save failed.
*/
rc = quiece_spu(prev, spu); /* Steps 2-16. */
switch (rc) {
default:
case 2:
case 6:
harvest(prev, spu);
return rc;
break;
case 0:
break;
}
save_csa(prev, spu); /* Steps 17-43. */
save_lscsa(prev, spu); /* Steps 44-53. */
return check_save_status(prev, spu); /* Step 54. */
}
static int __do_spu_restore(struct spu_state *next, struct spu *spu)
{
int rc;
/*
* SPU context restore can be broken into three phases:
*
* (a) harvest (or reset) SPU [steps 2-24].
* (b) restore LSCSA [steps 25-40], mostly performed by SPU.
* (c) restore CSA [steps 41-76], performed by PPE.
*
* The 'harvest' step is not performed here, but rather
* as needed below.
*/
restore_lscsa(next, spu); /* Steps 24-39. */
rc = check_restore_status(next, spu); /* Step 40. */
switch (rc) {
default:
/* Failed. Return now. */
return rc;
break;
case 0:
/* Fall through to next step. */
break;
}
restore_csa(next, spu);
return 0;
}
/**
* spu_save - SPU context save, with locking.
* @prev: pointer to SPU context save area, to be saved.
* @spu: pointer to SPU iomem structure.
*
* Acquire locks, perform the save operation then return.
*/
int spu_save(struct spu_state *prev, struct spu *spu)
{
int rc;
acquire_spu_lock(spu); /* Step 1. */
rc = __do_spu_save(prev, spu); /* Steps 2-53. */
release_spu_lock(spu);
if (rc != 0 && rc != 2 && rc != 6) {
panic("%s failed on SPU[%d], rc=%d.\n",
__func__, spu->number, rc);
}
return 0;
}
EXPORT_SYMBOL_GPL(spu_save);
/**
* spu_restore - SPU context restore, with harvest and locking.
* @new: pointer to SPU context save area, to be restored.
* @spu: pointer to SPU iomem structure.
*
* Perform harvest + restore, as we may not be coming
* from a previous successful save operation, and the
* hardware state is unknown.
*/
int spu_restore(struct spu_state *new, struct spu *spu)
{
int rc;
acquire_spu_lock(spu);
harvest(NULL, spu);
spu->slb_replace = 0;
rc = __do_spu_restore(new, spu);
release_spu_lock(spu);
if (rc) {
panic("%s failed on SPU[%d] rc=%d.\n",
__func__, spu->number, rc);
}
return rc;
}
EXPORT_SYMBOL_GPL(spu_restore);
static void init_prob(struct spu_state *csa)
{
csa->spu_chnlcnt_RW[9] = 1;
csa->spu_chnlcnt_RW[21] = 16;
csa->spu_chnlcnt_RW[23] = 1;
csa->spu_chnlcnt_RW[28] = 1;
csa->spu_chnlcnt_RW[30] = 1;
csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
csa->prob.mb_stat_R = 0x000400;
}
static void init_priv1(struct spu_state *csa)
{
/* Enable decode, relocate, tlbie response, master runcntl. */
csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
MFC_STATE1_MASTER_RUN_CONTROL_MASK |
MFC_STATE1_PROBLEM_STATE_MASK |
MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
/* Enable OS-specific set of interrupts. */
csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
CLASS0_ENABLE_SPU_ERROR_INTR;
csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
CLASS1_ENABLE_STORAGE_FAULT_INTR;
csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
CLASS2_ENABLE_SPU_HALT_INTR |
CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
}
static void init_priv2(struct spu_state *csa)
{
csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
}
/**
* spu_alloc_csa - allocate and initialize an SPU context save area.
*
* Allocate and initialize the contents of an SPU context save area.
* This includes enabling address translation, interrupt masks, etc.,
* as appropriate for the given OS environment.
*
* Note that storage for the 'lscsa' is allocated separately,
* as it is by far the largest of the context save regions,
* and may need to be pinned or otherwise specially aligned.
*/
int spu_init_csa(struct spu_state *csa)
{
int rc;
if (!csa)
return -EINVAL;
memset(csa, 0, sizeof(struct spu_state));
rc = spu_alloc_lscsa(csa);
if (rc)
return rc;
spin_lock_init(&csa->register_lock);
init_prob(csa);
init_priv1(csa);
init_priv2(csa);
return 0;
}
void spu_fini_csa(struct spu_state *csa)
{
spu_free_lscsa(csa);
}
| gpl-2.0 |
szezso/android_kernel_samsung_expressltexx | tools/perf/builtin-top.c | 4781 | 34913 | /*
* builtin-top.c
*
* Builtin top command: Display a continuously updated profile of
* any workload, CPU or specific PID.
*
* Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
* 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Improvements and fixes by:
*
* Arjan van de Ven <arjan@linux.intel.com>
* Yanmin Zhang <yanmin.zhang@intel.com>
* Wu Fengguang <fengguang.wu@intel.com>
* Mike Galbraith <efault@gmx.de>
* Paul Mackerras <paulus@samba.org>
*
* Released under the GPL v2. (and only v2, not any later version)
*/
#include "builtin.h"
#include "perf.h"
#include "util/annotate.h"
#include "util/cache.h"
#include "util/color.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/session.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/thread_map.h"
#include "util/top.h"
#include "util/util.h"
#include <linux/rbtree.h>
#include "util/parse-options.h"
#include "util/parse-events.h"
#include "util/cpumap.h"
#include "util/xyarray.h"
#include "util/sort.h"
#include "util/debug.h"
#include <assert.h>
#include <elf.h>
#include <fcntl.h>
#include <stdio.h>
#include <termios.h>
#include <unistd.h>
#include <inttypes.h>
#include <errno.h>
#include <time.h>
#include <sched.h>
#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <sys/poll.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include <sys/utsname.h>
#include <sys/mman.h>
#include <linux/unistd.h>
#include <linux/types.h>
void get_term_dimensions(struct winsize *ws)
{
char *s = getenv("LINES");
if (s != NULL) {
ws->ws_row = atoi(s);
s = getenv("COLUMNS");
if (s != NULL) {
ws->ws_col = atoi(s);
if (ws->ws_row && ws->ws_col)
return;
}
}
#ifdef TIOCGWINSZ
if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
ws->ws_row && ws->ws_col)
return;
#endif
ws->ws_row = 25;
ws->ws_col = 80;
}
static void perf_top__update_print_entries(struct perf_top *top)
{
if (top->print_entries > 9)
top->print_entries -= 9;
}
static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *arg)
{
struct perf_top *top = arg;
get_term_dimensions(&top->winsize);
if (!top->print_entries
|| (top->print_entries+4) > top->winsize.ws_row) {
top->print_entries = top->winsize.ws_row;
} else {
top->print_entries += 4;
top->winsize.ws_row = top->print_entries;
}
perf_top__update_print_entries(top);
}
static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
{
struct symbol *sym;
struct annotation *notes;
struct map *map;
int err = -1;
if (!he || !he->ms.sym)
return -1;
sym = he->ms.sym;
map = he->ms.map;
/*
* We can't annotate with just /proc/kallsyms
*/
if (map->dso->symtab_type == SYMTAB__KALLSYMS) {
pr_err("Can't annotate %s: No vmlinux file was found in the "
"path\n", sym->name);
sleep(1);
return -1;
}
notes = symbol__annotation(sym);
if (notes->src != NULL) {
pthread_mutex_lock(¬es->lock);
goto out_assign;
}
pthread_mutex_lock(¬es->lock);
if (symbol__alloc_hist(sym) < 0) {
pthread_mutex_unlock(¬es->lock);
pr_err("Not enough memory for annotating '%s' symbol!\n",
sym->name);
sleep(1);
return err;
}
err = symbol__annotate(sym, map, 0);
if (err == 0) {
out_assign:
top->sym_filter_entry = he;
}
pthread_mutex_unlock(¬es->lock);
return err;
}
static void __zero_source_counters(struct hist_entry *he)
{
struct symbol *sym = he->ms.sym;
symbol__annotate_zero_histograms(sym);
}
static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
{
struct utsname uts;
int err = uname(&uts);
ui__warning("Out of bounds address found:\n\n"
"Addr: %" PRIx64 "\n"
"DSO: %s %c\n"
"Map: %" PRIx64 "-%" PRIx64 "\n"
"Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
"Arch: %s\n"
"Kernel: %s\n"
"Tools: %s\n\n"
"Not all samples will be on the annotation output.\n\n"
"Please report to linux-kernel@vger.kernel.org\n",
ip, map->dso->long_name, dso__symtab_origin(map->dso),
map->start, map->end, sym->start, sym->end,
sym->binding == STB_GLOBAL ? 'g' :
sym->binding == STB_LOCAL ? 'l' : 'w', sym->name,
err ? "[unknown]" : uts.machine,
err ? "[unknown]" : uts.release, perf_version_string);
if (use_browser <= 0)
sleep(5);
map->erange_warned = true;
}
static void perf_top__record_precise_ip(struct perf_top *top,
struct hist_entry *he,
int counter, u64 ip)
{
struct annotation *notes;
struct symbol *sym;
int err;
if (he == NULL || he->ms.sym == NULL ||
((top->sym_filter_entry == NULL ||
top->sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1))
return;
sym = he->ms.sym;
notes = symbol__annotation(sym);
if (pthread_mutex_trylock(¬es->lock))
return;
if (notes->src == NULL && symbol__alloc_hist(sym) < 0) {
pthread_mutex_unlock(¬es->lock);
pr_err("Not enough memory for annotating '%s' symbol!\n",
sym->name);
sleep(1);
return;
}
ip = he->ms.map->map_ip(he->ms.map, ip);
err = symbol__inc_addr_samples(sym, he->ms.map, counter, ip);
pthread_mutex_unlock(¬es->lock);
if (err == -ERANGE && !he->ms.map->erange_warned)
ui__warn_map_erange(he->ms.map, sym, ip);
}
static void perf_top__show_details(struct perf_top *top)
{
struct hist_entry *he = top->sym_filter_entry;
struct annotation *notes;
struct symbol *symbol;
int more;
if (!he)
return;
symbol = he->ms.sym;
notes = symbol__annotation(symbol);
pthread_mutex_lock(¬es->lock);
if (notes->src == NULL)
goto out_unlock;
printf("Showing %s for %s\n", event_name(top->sym_evsel), symbol->name);
printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel->idx,
0, top->sym_pcnt_filter, top->print_entries, 4);
if (top->zero)
symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
else
symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
if (more != 0)
printf("%d lines not displayed, maybe increase display entries [e]\n", more);
out_unlock:
pthread_mutex_unlock(¬es->lock);
}
static const char CONSOLE_CLEAR[] = "[H[2J";
static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel,
struct addr_location *al,
struct perf_sample *sample)
{
struct hist_entry *he;
he = __hists__add_entry(&evsel->hists, al, NULL, sample->period);
if (he == NULL)
return NULL;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
return he;
}
static void perf_top__print_sym_table(struct perf_top *top)
{
char bf[160];
int printed = 0;
const int win_width = top->winsize.ws_col - 1;
puts(CONSOLE_CLEAR);
perf_top__header_snprintf(top, bf, sizeof(bf));
printf("%s\n", bf);
perf_top__reset_sample_counters(top);
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
if (top->sym_evsel->hists.stats.nr_lost_warned !=
top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) {
top->sym_evsel->hists.stats.nr_lost_warned =
top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST];
color_fprintf(stdout, PERF_COLOR_RED,
"WARNING: LOST %d chunks, Check IO/CPU overload",
top->sym_evsel->hists.stats.nr_lost_warned);
++printed;
}
if (top->sym_filter_entry) {
perf_top__show_details(top);
return;
}
hists__collapse_resort_threaded(&top->sym_evsel->hists);
hists__output_resort_threaded(&top->sym_evsel->hists);
hists__decay_entries_threaded(&top->sym_evsel->hists,
top->hide_user_symbols,
top->hide_kernel_symbols);
hists__output_recalc_col_len(&top->sym_evsel->hists,
top->winsize.ws_row - 3);
putchar('\n');
hists__fprintf(&top->sym_evsel->hists, NULL, false, false,
top->winsize.ws_row - 4 - printed, win_width, stdout);
}
static void prompt_integer(int *target, const char *msg)
{
char *buf = malloc(0), *p;
size_t dummy = 0;
int tmp;
fprintf(stdout, "\n%s: ", msg);
if (getline(&buf, &dummy, stdin) < 0)
return;
p = strchr(buf, '\n');
if (p)
*p = 0;
p = buf;
while(*p) {
if (!isdigit(*p))
goto out_free;
p++;
}
tmp = strtoul(buf, NULL, 10);
*target = tmp;
out_free:
free(buf);
}
static void prompt_percent(int *target, const char *msg)
{
int tmp = 0;
prompt_integer(&tmp, msg);
if (tmp >= 0 && tmp <= 100)
*target = tmp;
}
static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
{
char *buf = malloc(0), *p;
struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
struct rb_node *next;
size_t dummy = 0;
/* zero counters of active symbol */
if (syme) {
__zero_source_counters(syme);
top->sym_filter_entry = NULL;
}
fprintf(stdout, "\n%s: ", msg);
if (getline(&buf, &dummy, stdin) < 0)
goto out_free;
p = strchr(buf, '\n');
if (p)
*p = 0;
next = rb_first(&top->sym_evsel->hists.entries);
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
found = n;
break;
}
next = rb_next(&n->rb_node);
}
if (!found) {
fprintf(stderr, "Sorry, %s is not active.\n", buf);
sleep(1);
} else
perf_top__parse_source(top, found);
out_free:
free(buf);
}
static void perf_top__print_mapped_keys(struct perf_top *top)
{
char *name = NULL;
if (top->sym_filter_entry) {
struct symbol *sym = top->sym_filter_entry->ms.sym;
name = sym->name;
}
fprintf(stdout, "\nMapped keys:\n");
fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
if (top->evlist->nr_entries > 1)
fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(top->sym_evsel));
fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter);
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
fprintf(stdout, "\t[S] stop annotation.\n");
fprintf(stdout,
"\t[K] hide kernel_symbols symbols. \t(%s)\n",
top->hide_kernel_symbols ? "yes" : "no");
fprintf(stdout,
"\t[U] hide user symbols. \t(%s)\n",
top->hide_user_symbols ? "yes" : "no");
fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0);
fprintf(stdout, "\t[qQ] quit.\n");
}
static int perf_top__key_mapped(struct perf_top *top, int c)
{
switch (c) {
case 'd':
case 'e':
case 'f':
case 'z':
case 'q':
case 'Q':
case 'K':
case 'U':
case 'F':
case 's':
case 'S':
return 1;
case 'E':
return top->evlist->nr_entries > 1 ? 1 : 0;
default:
break;
}
return 0;
}
static void perf_top__handle_keypress(struct perf_top *top, int c)
{
if (!perf_top__key_mapped(top, c)) {
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
struct termios tc, save;
perf_top__print_mapped_keys(top);
fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
fflush(stdout);
tcgetattr(0, &save);
tc = save;
tc.c_lflag &= ~(ICANON | ECHO);
tc.c_cc[VMIN] = 0;
tc.c_cc[VTIME] = 0;
tcsetattr(0, TCSANOW, &tc);
poll(&stdin_poll, 1, -1);
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
if (!perf_top__key_mapped(top, c))
return;
}
switch (c) {
case 'd':
prompt_integer(&top->delay_secs, "Enter display delay");
if (top->delay_secs < 1)
top->delay_secs = 1;
break;
case 'e':
prompt_integer(&top->print_entries, "Enter display entries (lines)");
if (top->print_entries == 0) {
struct sigaction act = {
.sa_sigaction = perf_top__sig_winch,
.sa_flags = SA_SIGINFO,
};
perf_top__sig_winch(SIGWINCH, NULL, top);
sigaction(SIGWINCH, &act, NULL);
} else {
perf_top__sig_winch(SIGWINCH, NULL, top);
signal(SIGWINCH, SIG_DFL);
}
break;
case 'E':
if (top->evlist->nr_entries > 1) {
/* Select 0 as the default event: */
int counter = 0;
fprintf(stderr, "\nAvailable events:");
list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, event_name(top->sym_evsel));
prompt_integer(&counter, "Enter details event counter");
if (counter >= top->evlist->nr_entries) {
top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top->sym_evsel));
sleep(1);
break;
}
list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
if (top->sym_evsel->idx == counter)
break;
} else
top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
break;
case 'f':
prompt_integer(&top->count_filter, "Enter display event count filter");
break;
case 'F':
prompt_percent(&top->sym_pcnt_filter,
"Enter details display event filter (percent)");
break;
case 'K':
top->hide_kernel_symbols = !top->hide_kernel_symbols;
break;
case 'q':
case 'Q':
printf("exiting.\n");
if (top->dump_symtab)
perf_session__fprintf_dsos(top->session, stderr);
exit(0);
case 's':
perf_top__prompt_symbol(top, "Enter details symbol");
break;
case 'S':
if (!top->sym_filter_entry)
break;
else {
struct hist_entry *syme = top->sym_filter_entry;
top->sym_filter_entry = NULL;
__zero_source_counters(syme);
}
break;
case 'U':
top->hide_user_symbols = !top->hide_user_symbols;
break;
case 'z':
top->zero = !top->zero;
break;
default:
break;
}
}
static void perf_top__sort_new_samples(void *arg)
{
struct perf_top *t = arg;
perf_top__reset_sample_counters(t);
if (t->evlist->selected != NULL)
t->sym_evsel = t->evlist->selected;
hists__collapse_resort_threaded(&t->sym_evsel->hists);
hists__output_resort_threaded(&t->sym_evsel->hists);
hists__decay_entries_threaded(&t->sym_evsel->hists,
t->hide_user_symbols,
t->hide_kernel_symbols);
}
static void *display_thread_tui(void *arg)
{
struct perf_evsel *pos;
struct perf_top *top = arg;
const char *help = "For a higher level overview, try: perf top --sort comm,dso";
perf_top__sort_new_samples(top);
/*
* Initialize the uid_filter_str, in the future the TUI will allow
* Zooming in/out UIDs. For now juse use whatever the user passed
* via --uid.
*/
list_for_each_entry(pos, &top->evlist->entries, node)
pos->hists.uid_filter_str = top->uid_str;
perf_evlist__tui_browse_hists(top->evlist, help,
perf_top__sort_new_samples,
top, top->delay_secs);
exit_browser(0);
exit(0);
return NULL;
}
static void *display_thread(void *arg)
{
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
struct termios tc, save;
struct perf_top *top = arg;
int delay_msecs, c;
tcgetattr(0, &save);
tc = save;
tc.c_lflag &= ~(ICANON | ECHO);
tc.c_cc[VMIN] = 0;
tc.c_cc[VTIME] = 0;
pthread__unblock_sigwinch();
repeat:
delay_msecs = top->delay_secs * 1000;
tcsetattr(0, TCSANOW, &tc);
/* trash return*/
getc(stdin);
while (1) {
perf_top__print_sym_table(top);
/*
* Either timeout expired or we got an EINTR due to SIGWINCH,
* refresh screen in both cases.
*/
switch (poll(&stdin_poll, 1, delay_msecs)) {
case 0:
continue;
case -1:
if (errno == EINTR)
continue;
/* Fall trhu */
default:
goto process_hotkey;
}
}
process_hotkey:
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
perf_top__handle_keypress(top, c);
goto repeat;
return NULL;
}
/* Tag samples to be skipped. */
static const char *skip_symbols[] = {
"intel_idle",
"default_idle",
"native_safe_halt",
"cpu_idle",
"enter_idle",
"exit_idle",
"mwait_idle",
"mwait_idle_with_hints",
"poll_idle",
"ppc64_runlatch_off",
"pseries_dedicated_idle_sleep",
NULL
};
static int symbol_filter(struct map *map __used, struct symbol *sym)
{
const char *name = sym->name;
int i;
/*
* ppc64 uses function descriptors and appends a '.' to the
* start of every instruction address. Remove it.
*/
if (name[0] == '.')
name++;
if (!strcmp(name, "_text") ||
!strcmp(name, "_etext") ||
!strcmp(name, "_sinittext") ||
!strncmp("init_module", name, 11) ||
!strncmp("cleanup_module", name, 14) ||
strstr(name, "_text_start") ||
strstr(name, "_text_end"))
return 1;
for (i = 0; skip_symbols[i]; i++) {
if (!strcmp(skip_symbols[i], name)) {
sym->ignore = true;
break;
}
}
return 0;
}
static void perf_event__process_sample(struct perf_tool *tool,
const union perf_event *event,
struct perf_evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_top *top = container_of(tool, struct perf_top, tool);
struct symbol *parent = NULL;
u64 ip = event->ip.ip;
struct addr_location al;
int err;
if (!machine && perf_guest) {
pr_err("Can't find guest [%d]'s kernel information\n",
event->ip.pid);
return;
}
if (!machine) {
pr_err("%u unprocessable samples recorded.",
top->session->hists.stats.nr_unprocessable_samples++);
return;
}
if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
top->exact_samples++;
if (perf_event__preprocess_sample(event, machine, &al, sample,
symbol_filter) < 0 ||
al.filtered)
return;
if (!top->kptr_restrict_warned &&
symbol_conf.kptr_restrict &&
al.cpumode == PERF_RECORD_MISC_KERNEL) {
ui__warning(
"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
"Check /proc/sys/kernel/kptr_restrict.\n\n"
"Kernel%s samples will not be resolved.\n",
!RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
" modules" : "");
if (use_browser <= 0)
sleep(5);
top->kptr_restrict_warned = true;
}
if (al.sym == NULL) {
const char *msg = "Kernel samples will not be resolved.\n";
/*
* As we do lazy loading of symtabs we only will know if the
* specified vmlinux file is invalid when we actually have a
* hit in kernel space and then try to load it. So if we get
* here and there are _no_ symbols in the DSO backing the
* kernel map, bail out.
*
* We may never get here, for instance, if we use -K/
* --hide-kernel-symbols, even if the user specifies an
* invalid --vmlinux ;-)
*/
if (!top->kptr_restrict_warned && !top->vmlinux_warned &&
al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
if (symbol_conf.vmlinux_name) {
ui__warning("The %s file can't be used.\n%s",
symbol_conf.vmlinux_name, msg);
} else {
ui__warning("A vmlinux file was not found.\n%s",
msg);
}
if (use_browser <= 0)
sleep(5);
top->vmlinux_warned = true;
}
}
if (al.sym == NULL || !al.sym->ignore) {
struct hist_entry *he;
if ((sort__has_parent || symbol_conf.use_callchain) &&
sample->callchain) {
err = machine__resolve_callchain(machine, evsel, al.thread,
sample->callchain, &parent);
if (err)
return;
}
he = perf_evsel__add_hist_entry(evsel, &al, sample);
if (he == NULL) {
pr_err("Problem incrementing symbol period, skipping event\n");
return;
}
if (symbol_conf.use_callchain) {
err = callchain_append(he->callchain, &evsel->hists.callchain_cursor,
sample->period);
if (err)
return;
}
if (top->sort_has_symbols)
perf_top__record_precise_ip(top, he, evsel->idx, ip);
}
return;
}
static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
{
struct perf_sample sample;
struct perf_evsel *evsel;
struct perf_session *session = top->session;
union perf_event *event;
struct machine *machine;
u8 origin;
int ret;
while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
ret = perf_session__parse_sample(session, event, &sample);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
continue;
}
evsel = perf_evlist__id2evsel(session->evlist, sample.id);
assert(evsel != NULL);
origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
if (event->header.type == PERF_RECORD_SAMPLE)
++top->samples;
switch (origin) {
case PERF_RECORD_MISC_USER:
++top->us_samples;
if (top->hide_user_symbols)
continue;
machine = perf_session__find_host_machine(session);
break;
case PERF_RECORD_MISC_KERNEL:
++top->kernel_samples;
if (top->hide_kernel_symbols)
continue;
machine = perf_session__find_host_machine(session);
break;
case PERF_RECORD_MISC_GUEST_KERNEL:
++top->guest_kernel_samples;
machine = perf_session__find_machine(session, event->ip.pid);
break;
case PERF_RECORD_MISC_GUEST_USER:
++top->guest_us_samples;
/*
* TODO: we don't process guest user from host side
* except simple counting.
*/
/* Fall thru */
default:
continue;
}
if (event->header.type == PERF_RECORD_SAMPLE) {
perf_event__process_sample(&top->tool, event, evsel,
&sample, machine);
} else if (event->header.type < PERF_RECORD_MAX) {
hists__inc_nr_events(&evsel->hists, event->header.type);
perf_event__process(&top->tool, event, &sample, machine);
} else
++session->hists.stats.nr_unknown_events;
}
}
static void perf_top__mmap_read(struct perf_top *top)
{
int i;
for (i = 0; i < top->evlist->nr_mmaps; i++)
perf_top__mmap_read_idx(top, i);
}
static void perf_top__start_counters(struct perf_top *top)
{
struct perf_evsel *counter, *first;
struct perf_evlist *evlist = top->evlist;
first = list_entry(evlist->entries.next, struct perf_evsel, node);
list_for_each_entry(counter, &evlist->entries, node) {
struct perf_event_attr *attr = &counter->attr;
struct xyarray *group_fd = NULL;
if (top->group && counter != first)
group_fd = first->fd;
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
if (top->freq) {
attr->sample_type |= PERF_SAMPLE_PERIOD;
attr->freq = 1;
attr->sample_freq = top->freq;
}
if (evlist->nr_entries > 1) {
attr->sample_type |= PERF_SAMPLE_ID;
attr->read_format |= PERF_FORMAT_ID;
}
if (symbol_conf.use_callchain)
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
attr->mmap = 1;
attr->comm = 1;
attr->inherit = top->inherit;
fallback_missing_features:
if (top->exclude_guest_missing)
attr->exclude_guest = attr->exclude_host = 0;
retry_sample_id:
attr->sample_id_all = top->sample_id_all_missing ? 0 : 1;
try_again:
if (perf_evsel__open(counter, top->evlist->cpus,
top->evlist->threads, top->group,
group_fd) < 0) {
int err = errno;
if (err == EPERM || err == EACCES) {
ui__error_paranoid();
goto out_err;
} else if (err == EINVAL) {
if (!top->exclude_guest_missing &&
(attr->exclude_guest || attr->exclude_host)) {
pr_debug("Old kernel, cannot exclude "
"guest or host samples.\n");
top->exclude_guest_missing = true;
goto fallback_missing_features;
} else if (!top->sample_id_all_missing) {
/*
* Old kernel, no attr->sample_id_type_all field
*/
top->sample_id_all_missing = true;
goto retry_sample_id;
}
}
/*
* If it's cycles then fall back to hrtimer
* based cpu-clock-tick sw counter, which
* is always available even if no PMU support:
*/
if (attr->type == PERF_TYPE_HARDWARE &&
attr->config == PERF_COUNT_HW_CPU_CYCLES) {
if (verbose)
ui__warning("Cycles event not supported,\n"
"trying to fall back to cpu-clock-ticks\n");
attr->type = PERF_TYPE_SOFTWARE;
attr->config = PERF_COUNT_SW_CPU_CLOCK;
goto try_again;
}
if (err == ENOENT) {
ui__warning("The %s event is not supported.\n",
event_name(counter));
goto out_err;
} else if (err == EMFILE) {
ui__warning("Too many events are opened.\n"
"Try again after reducing the number of events\n");
goto out_err;
}
ui__warning("The sys_perf_event_open() syscall "
"returned with %d (%s). /bin/dmesg "
"may provide additional information.\n"
"No CONFIG_PERF_EVENTS=y kernel support "
"configured?\n", err, strerror(err));
goto out_err;
}
}
if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) {
ui__warning("Failed to mmap with %d (%s)\n",
errno, strerror(errno));
goto out_err;
}
return;
out_err:
exit_browser(0);
exit(0);
}
static int perf_top__setup_sample_type(struct perf_top *top)
{
if (!top->sort_has_symbols) {
if (symbol_conf.use_callchain) {
ui__warning("Selected -g but \"sym\" not present in --sort/-s.");
return -EINVAL;
}
} else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
if (callchain_register_param(&callchain_param) < 0) {
ui__warning("Can't register callchain params.\n");
return -EINVAL;
}
}
return 0;
}
static int __cmd_top(struct perf_top *top)
{
pthread_t thread;
int ret;
/*
* FIXME: perf_session__new should allow passing a O_MMAP, so that all this
* mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
*/
top->session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
if (top->session == NULL)
return -ENOMEM;
ret = perf_top__setup_sample_type(top);
if (ret)
goto out_delete;
if (top->target_tid || top->uid != UINT_MAX)
perf_event__synthesize_thread_map(&top->tool, top->evlist->threads,
perf_event__process,
&top->session->host_machine);
else
perf_event__synthesize_threads(&top->tool, perf_event__process,
&top->session->host_machine);
perf_top__start_counters(top);
top->session->evlist = top->evlist;
perf_session__update_sample_type(top->session);
/* Wait for a minimal set of events before starting the snapshot */
poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
perf_top__mmap_read(top);
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
display_thread), top)) {
printf("Could not create display thread.\n");
exit(-1);
}
if (top->realtime_prio) {
struct sched_param param;
param.sched_priority = top->realtime_prio;
if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
printf("Could not set realtime priority.\n");
exit(-1);
}
}
while (1) {
u64 hits = top->samples;
perf_top__mmap_read(top);
if (hits == top->samples)
ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
}
out_delete:
perf_session__delete(top->session);
top->session = NULL;
return 0;
}
static int
parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
struct perf_top *top = (struct perf_top *)opt->value;
char *tok, *tok2;
char *endptr;
/*
* --no-call-graph
*/
if (unset) {
top->dont_use_callchains = true;
return 0;
}
symbol_conf.use_callchain = true;
if (!arg)
return 0;
tok = strtok((char *)arg, ",");
if (!tok)
return -1;
/* get the output mode */
if (!strncmp(tok, "graph", strlen(arg)))
callchain_param.mode = CHAIN_GRAPH_ABS;
else if (!strncmp(tok, "flat", strlen(arg)))
callchain_param.mode = CHAIN_FLAT;
else if (!strncmp(tok, "fractal", strlen(arg)))
callchain_param.mode = CHAIN_GRAPH_REL;
else if (!strncmp(tok, "none", strlen(arg))) {
callchain_param.mode = CHAIN_NONE;
symbol_conf.use_callchain = false;
return 0;
} else
return -1;
/* get the min percentage */
tok = strtok(NULL, ",");
if (!tok)
goto setup;
callchain_param.min_percent = strtod(tok, &endptr);
if (tok == endptr)
return -1;
/* get the print limit */
tok2 = strtok(NULL, ",");
if (!tok2)
goto setup;
if (tok2[0] != 'c') {
callchain_param.print_limit = strtod(tok2, &endptr);
tok2 = strtok(NULL, ",");
if (!tok2)
goto setup;
}
/* get the call chain order */
if (!strcmp(tok2, "caller"))
callchain_param.order = ORDER_CALLER;
else if (!strcmp(tok2, "callee"))
callchain_param.order = ORDER_CALLEE;
else
return -1;
setup:
if (callchain_register_param(&callchain_param) < 0) {
fprintf(stderr, "Can't register callchain params\n");
return -1;
}
return 0;
}
static const char * const top_usage[] = {
"perf top [<options>]",
NULL
};
int cmd_top(int argc, const char **argv, const char *prefix __used)
{
struct perf_evsel *pos;
int status = -ENOMEM;
struct perf_top top = {
.count_filter = 5,
.delay_secs = 2,
.uid = UINT_MAX,
.freq = 1000, /* 1 KHz */
.mmap_pages = 128,
.sym_pcnt_filter = 5,
};
char callchain_default_opt[] = "fractal,0.5,callee";
const struct option options[] = {
OPT_CALLBACK('e', "event", &top.evlist, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
OPT_INTEGER('c', "count", &top.default_interval,
"event period to sample"),
OPT_STRING('p', "pid", &top.target_pid, "pid",
"profile events on existing process id"),
OPT_STRING('t', "tid", &top.target_tid, "tid",
"profile events on existing thread id"),
OPT_BOOLEAN('a', "all-cpus", &top.system_wide,
"system-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &top.cpu_list, "cpu",
"list of cpus to monitor"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
"hide kernel symbols"),
OPT_UINTEGER('m', "mmap-pages", &top.mmap_pages, "number of mmap data pages"),
OPT_INTEGER('r', "realtime", &top.realtime_prio,
"collect data with this RT SCHED_FIFO priority"),
OPT_INTEGER('d', "delay", &top.delay_secs,
"number of seconds to delay between refreshes"),
OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
"dump the symbol table used for profiling"),
OPT_INTEGER('f', "count-filter", &top.count_filter,
"only display functions with more events than this"),
OPT_BOOLEAN('g', "group", &top.group,
"put the counters into a counter group"),
OPT_BOOLEAN('i', "inherit", &top.inherit,
"child tasks inherit counters"),
OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
"symbol to annotate"),
OPT_BOOLEAN('z', "zero", &top.zero,
"zero history across updates"),
OPT_INTEGER('F', "freq", &top.freq,
"profile at this frequency"),
OPT_INTEGER('E', "entries", &top.print_entries,
"display this many functions"),
OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
"hide user symbols"),
OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
"Show a column with the number of samples"),
OPT_CALLBACK_DEFAULT('G', "call-graph", &top, "output_type,min_percent, call_order",
"Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. "
"Default: fractal,0.5,callee", &parse_callchain_opt,
callchain_default_opt),
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
"Show a column with the sum of periods"),
OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only consider symbols in these comms"),
OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
"Interleave source code with assembly code (default)"),
OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
"Display raw encoding of assembly instructions (default)"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_STRING('u', "uid", &top.uid_str, "user", "user to profile"),
OPT_END()
};
top.evlist = perf_evlist__new(NULL, NULL);
if (top.evlist == NULL)
return -ENOMEM;
symbol_conf.exclude_other = false;
argc = parse_options(argc, argv, options, top_usage, 0);
if (argc)
usage_with_options(top_usage, options);
if (sort_order == default_sort_order)
sort_order = "dso,symbol";
setup_sorting(top_usage, options);
if (top.use_stdio)
use_browser = 0;
else if (top.use_tui)
use_browser = 1;
setup_browser(false);
top.uid = parse_target_uid(top.uid_str, top.target_tid, top.target_pid);
if (top.uid_str != NULL && top.uid == UINT_MAX - 1)
goto out_delete_evlist;
/* CPU and PID are mutually exclusive */
if (top.target_tid && top.cpu_list) {
printf("WARNING: PID switch overriding CPU\n");
sleep(1);
top.cpu_list = NULL;
}
if (top.target_pid)
top.target_tid = top.target_pid;
if (perf_evlist__create_maps(top.evlist, top.target_pid,
top.target_tid, top.uid, top.cpu_list) < 0)
usage_with_options(top_usage, options);
if (!top.evlist->nr_entries &&
perf_evlist__add_default(top.evlist) < 0) {
pr_err("Not enough memory for event selector list\n");
return -ENOMEM;
}
symbol_conf.nr_events = top.evlist->nr_entries;
if (top.delay_secs < 1)
top.delay_secs = 1;
/*
* User specified count overrides default frequency.
*/
if (top.default_interval)
top.freq = 0;
else if (top.freq) {
top.default_interval = top.freq;
} else {
fprintf(stderr, "frequency and count are zero, aborting\n");
exit(EXIT_FAILURE);
}
list_for_each_entry(pos, &top.evlist->entries, node) {
/*
* Fill in the ones not specifically initialized via -c:
*/
if (!pos->attr.sample_period)
pos->attr.sample_period = top.default_interval;
}
top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
symbol_conf.priv_size = sizeof(struct annotation);
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
if (symbol__init() < 0)
return -1;
sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout);
sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout);
sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout);
/*
* Avoid annotation data structures overhead when symbols aren't on the
* sort list.
*/
top.sort_has_symbols = sort_sym.list.next != NULL;
get_term_dimensions(&top.winsize);
if (top.print_entries == 0) {
struct sigaction act = {
.sa_sigaction = perf_top__sig_winch,
.sa_flags = SA_SIGINFO,
};
perf_top__update_print_entries(&top);
sigaction(SIGWINCH, &act, NULL);
}
status = __cmd_top(&top);
out_delete_evlist:
perf_evlist__delete(top.evlist);
return status;
}
| gpl-2.0 |
faux123/NX403A | tools/perf/util/session.c | 4781 | 39745 | #define _FILE_OFFSET_BITS 64
#include <linux/kernel.h>
#include <byteswap.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/mman.h>
#include "evlist.h"
#include "evsel.h"
#include "session.h"
#include "tool.h"
#include "sort.h"
#include "util.h"
#include "cpumap.h"
static int perf_session__open(struct perf_session *self, bool force)
{
struct stat input_stat;
if (!strcmp(self->filename, "-")) {
self->fd_pipe = true;
self->fd = STDIN_FILENO;
if (perf_session__read_header(self, self->fd) < 0)
pr_err("incompatible file format (rerun with -v to learn more)");
return 0;
}
self->fd = open(self->filename, O_RDONLY);
if (self->fd < 0) {
int err = errno;
pr_err("failed to open %s: %s", self->filename, strerror(err));
if (err == ENOENT && !strcmp(self->filename, "perf.data"))
pr_err(" (try 'perf record' first)");
pr_err("\n");
return -errno;
}
if (fstat(self->fd, &input_stat) < 0)
goto out_close;
if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
pr_err("file %s not owned by current user or root\n",
self->filename);
goto out_close;
}
if (!input_stat.st_size) {
pr_info("zero-sized file (%s), nothing to do!\n",
self->filename);
goto out_close;
}
if (perf_session__read_header(self, self->fd) < 0) {
pr_err("incompatible file format (rerun with -v to learn more)");
goto out_close;
}
if (!perf_evlist__valid_sample_type(self->evlist)) {
pr_err("non matching sample_type");
goto out_close;
}
if (!perf_evlist__valid_sample_id_all(self->evlist)) {
pr_err("non matching sample_id_all");
goto out_close;
}
self->size = input_stat.st_size;
return 0;
out_close:
close(self->fd);
self->fd = -1;
return -1;
}
void perf_session__update_sample_type(struct perf_session *self)
{
self->sample_type = perf_evlist__sample_type(self->evlist);
self->sample_size = __perf_evsel__sample_size(self->sample_type);
self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
self->host_machine.id_hdr_size = self->id_hdr_size;
}
int perf_session__create_kernel_maps(struct perf_session *self)
{
int ret = machine__create_kernel_maps(&self->host_machine);
if (ret >= 0)
ret = machines__create_guest_kernel_maps(&self->machines);
return ret;
}
static void perf_session__destroy_kernel_maps(struct perf_session *self)
{
machine__destroy_kernel_maps(&self->host_machine);
machines__destroy_guest_kernel_maps(&self->machines);
}
struct perf_session *perf_session__new(const char *filename, int mode,
bool force, bool repipe,
struct perf_tool *tool)
{
struct perf_session *self;
struct stat st;
size_t len;
if (!filename || !strlen(filename)) {
if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
filename = "-";
else
filename = "perf.data";
}
len = strlen(filename);
self = zalloc(sizeof(*self) + len);
if (self == NULL)
goto out;
memcpy(self->filename, filename, len);
/*
* On 64bit we can mmap the data file in one go. No need for tiny mmap
* slices. On 32bit we use 32MB.
*/
#if BITS_PER_LONG == 64
self->mmap_window = ULLONG_MAX;
#else
self->mmap_window = 32 * 1024 * 1024ULL;
#endif
self->machines = RB_ROOT;
self->repipe = repipe;
INIT_LIST_HEAD(&self->ordered_samples.samples);
INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
INIT_LIST_HEAD(&self->ordered_samples.to_free);
machine__init(&self->host_machine, "", HOST_KERNEL_ID);
hists__init(&self->hists);
if (mode == O_RDONLY) {
if (perf_session__open(self, force) < 0)
goto out_delete;
perf_session__update_sample_type(self);
} else if (mode == O_WRONLY) {
/*
* In O_RDONLY mode this will be performed when reading the
* kernel MMAP event, in perf_event__process_mmap().
*/
if (perf_session__create_kernel_maps(self) < 0)
goto out_delete;
}
if (tool && tool->ordering_requires_timestamps &&
tool->ordered_samples && !self->sample_id_all) {
dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
tool->ordered_samples = false;
}
out:
return self;
out_delete:
perf_session__delete(self);
return NULL;
}
static void machine__delete_dead_threads(struct machine *machine)
{
struct thread *n, *t;
list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
list_del(&t->node);
thread__delete(t);
}
}
static void perf_session__delete_dead_threads(struct perf_session *session)
{
machine__delete_dead_threads(&session->host_machine);
}
static void machine__delete_threads(struct machine *self)
{
struct rb_node *nd = rb_first(&self->threads);
while (nd) {
struct thread *t = rb_entry(nd, struct thread, rb_node);
rb_erase(&t->rb_node, &self->threads);
nd = rb_next(nd);
thread__delete(t);
}
}
static void perf_session__delete_threads(struct perf_session *session)
{
machine__delete_threads(&session->host_machine);
}
void perf_session__delete(struct perf_session *self)
{
perf_session__destroy_kernel_maps(self);
perf_session__delete_dead_threads(self);
perf_session__delete_threads(self);
machine__exit(&self->host_machine);
close(self->fd);
free(self);
}
void machine__remove_thread(struct machine *self, struct thread *th)
{
self->last_match = NULL;
rb_erase(&th->rb_node, &self->threads);
/*
* We may have references to this thread, for instance in some hist_entry
* instances, so just move them to a separate list.
*/
list_add_tail(&th->node, &self->dead_threads);
}
static bool symbol__match_parent_regex(struct symbol *sym)
{
if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
return 1;
return 0;
}
static const u8 cpumodes[] = {
PERF_RECORD_MISC_USER,
PERF_RECORD_MISC_KERNEL,
PERF_RECORD_MISC_GUEST_USER,
PERF_RECORD_MISC_GUEST_KERNEL
};
#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
static void ip__resolve_ams(struct machine *self, struct thread *thread,
struct addr_map_symbol *ams,
u64 ip)
{
struct addr_location al;
size_t i;
u8 m;
memset(&al, 0, sizeof(al));
for (i = 0; i < NCPUMODES; i++) {
m = cpumodes[i];
/*
* We cannot use the header.misc hint to determine whether a
* branch stack address is user, kernel, guest, hypervisor.
* Branches may straddle the kernel/user/hypervisor boundaries.
* Thus, we have to try consecutively until we find a match
* or else, the symbol is unknown
*/
thread__find_addr_location(thread, self, m, MAP__FUNCTION,
ip, &al, NULL);
if (al.sym)
goto found;
}
found:
ams->addr = ip;
ams->al_addr = al.addr;
ams->sym = al.sym;
ams->map = al.map;
}
struct branch_info *machine__resolve_bstack(struct machine *self,
struct thread *thr,
struct branch_stack *bs)
{
struct branch_info *bi;
unsigned int i;
bi = calloc(bs->nr, sizeof(struct branch_info));
if (!bi)
return NULL;
for (i = 0; i < bs->nr; i++) {
ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to);
ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from);
bi[i].flags = bs->entries[i].flags;
}
return bi;
}
int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
struct thread *thread,
struct ip_callchain *chain,
struct symbol **parent)
{
u8 cpumode = PERF_RECORD_MISC_USER;
unsigned int i;
int err;
callchain_cursor_reset(&evsel->hists.callchain_cursor);
for (i = 0; i < chain->nr; i++) {
u64 ip;
struct addr_location al;
if (callchain_param.order == ORDER_CALLEE)
ip = chain->ips[i];
else
ip = chain->ips[chain->nr - i - 1];
if (ip >= PERF_CONTEXT_MAX) {
switch (ip) {
case PERF_CONTEXT_HV:
cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
case PERF_CONTEXT_KERNEL:
cpumode = PERF_RECORD_MISC_KERNEL; break;
case PERF_CONTEXT_USER:
cpumode = PERF_RECORD_MISC_USER; break;
default:
break;
}
continue;
}
al.filtered = false;
thread__find_addr_location(thread, self, cpumode,
MAP__FUNCTION, ip, &al, NULL);
if (al.sym != NULL) {
if (sort__has_parent && !*parent &&
symbol__match_parent_regex(al.sym))
*parent = al.sym;
if (!symbol_conf.use_callchain)
break;
}
err = callchain_cursor_append(&evsel->hists.callchain_cursor,
ip, al.map, al.sym);
if (err)
return err;
}
return 0;
}
static int process_event_synth_tracing_data_stub(union perf_event *event __used,
struct perf_session *session __used)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_event_synth_attr_stub(union perf_event *event __used,
struct perf_evlist **pevlist __used)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_event_sample_stub(struct perf_tool *tool __used,
union perf_event *event __used,
struct perf_sample *sample __used,
struct perf_evsel *evsel __used,
struct machine *machine __used)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_event_stub(struct perf_tool *tool __used,
union perf_event *event __used,
struct perf_sample *sample __used,
struct machine *machine __used)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_finished_round_stub(struct perf_tool *tool __used,
union perf_event *event __used,
struct perf_session *perf_session __used)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_event_type_stub(struct perf_tool *tool __used,
union perf_event *event __used)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_finished_round(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session);
static void perf_tool__fill_defaults(struct perf_tool *tool)
{
if (tool->sample == NULL)
tool->sample = process_event_sample_stub;
if (tool->mmap == NULL)
tool->mmap = process_event_stub;
if (tool->comm == NULL)
tool->comm = process_event_stub;
if (tool->fork == NULL)
tool->fork = process_event_stub;
if (tool->exit == NULL)
tool->exit = process_event_stub;
if (tool->lost == NULL)
tool->lost = perf_event__process_lost;
if (tool->read == NULL)
tool->read = process_event_sample_stub;
if (tool->throttle == NULL)
tool->throttle = process_event_stub;
if (tool->unthrottle == NULL)
tool->unthrottle = process_event_stub;
if (tool->attr == NULL)
tool->attr = process_event_synth_attr_stub;
if (tool->event_type == NULL)
tool->event_type = process_event_type_stub;
if (tool->tracing_data == NULL)
tool->tracing_data = process_event_synth_tracing_data_stub;
if (tool->build_id == NULL)
tool->build_id = process_finished_round_stub;
if (tool->finished_round == NULL) {
if (tool->ordered_samples)
tool->finished_round = process_finished_round;
else
tool->finished_round = process_finished_round_stub;
}
}
void mem_bswap_64(void *src, int byte_size)
{
u64 *m = src;
while (byte_size > 0) {
*m = bswap_64(*m);
byte_size -= sizeof(u64);
++m;
}
}
static void perf_event__all64_swap(union perf_event *event)
{
struct perf_event_header *hdr = &event->header;
mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
}
static void perf_event__comm_swap(union perf_event *event)
{
event->comm.pid = bswap_32(event->comm.pid);
event->comm.tid = bswap_32(event->comm.tid);
}
static void perf_event__mmap_swap(union perf_event *event)
{
event->mmap.pid = bswap_32(event->mmap.pid);
event->mmap.tid = bswap_32(event->mmap.tid);
event->mmap.start = bswap_64(event->mmap.start);
event->mmap.len = bswap_64(event->mmap.len);
event->mmap.pgoff = bswap_64(event->mmap.pgoff);
}
static void perf_event__task_swap(union perf_event *event)
{
event->fork.pid = bswap_32(event->fork.pid);
event->fork.tid = bswap_32(event->fork.tid);
event->fork.ppid = bswap_32(event->fork.ppid);
event->fork.ptid = bswap_32(event->fork.ptid);
event->fork.time = bswap_64(event->fork.time);
}
static void perf_event__read_swap(union perf_event *event)
{
event->read.pid = bswap_32(event->read.pid);
event->read.tid = bswap_32(event->read.tid);
event->read.value = bswap_64(event->read.value);
event->read.time_enabled = bswap_64(event->read.time_enabled);
event->read.time_running = bswap_64(event->read.time_running);
event->read.id = bswap_64(event->read.id);
}
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
attr->type = bswap_32(attr->type);
attr->size = bswap_32(attr->size);
attr->config = bswap_64(attr->config);
attr->sample_period = bswap_64(attr->sample_period);
attr->sample_type = bswap_64(attr->sample_type);
attr->read_format = bswap_64(attr->read_format);
attr->wakeup_events = bswap_32(attr->wakeup_events);
attr->bp_type = bswap_32(attr->bp_type);
attr->bp_addr = bswap_64(attr->bp_addr);
attr->bp_len = bswap_64(attr->bp_len);
}
static void perf_event__hdr_attr_swap(union perf_event *event)
{
size_t size;
perf_event__attr_swap(&event->attr.attr);
size = event->header.size;
size -= (void *)&event->attr.id - (void *)event;
mem_bswap_64(event->attr.id, size);
}
static void perf_event__event_type_swap(union perf_event *event)
{
event->event_type.event_type.event_id =
bswap_64(event->event_type.event_type.event_id);
}
static void perf_event__tracing_data_swap(union perf_event *event)
{
event->tracing_data.size = bswap_32(event->tracing_data.size);
}
typedef void (*perf_event__swap_op)(union perf_event *event);
static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_MMAP] = perf_event__mmap_swap,
[PERF_RECORD_COMM] = perf_event__comm_swap,
[PERF_RECORD_FORK] = perf_event__task_swap,
[PERF_RECORD_EXIT] = perf_event__task_swap,
[PERF_RECORD_LOST] = perf_event__all64_swap,
[PERF_RECORD_READ] = perf_event__read_swap,
[PERF_RECORD_SAMPLE] = perf_event__all64_swap,
[PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
[PERF_RECORD_HEADER_BUILD_ID] = NULL,
[PERF_RECORD_HEADER_MAX] = NULL,
};
struct sample_queue {
u64 timestamp;
u64 file_offset;
union perf_event *event;
struct list_head list;
};
static void perf_session_free_sample_buffers(struct perf_session *session)
{
struct ordered_samples *os = &session->ordered_samples;
while (!list_empty(&os->to_free)) {
struct sample_queue *sq;
sq = list_entry(os->to_free.next, struct sample_queue, list);
list_del(&sq->list);
free(sq);
}
}
static int perf_session_deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool,
u64 file_offset);
static void flush_sample_queue(struct perf_session *s,
struct perf_tool *tool)
{
struct ordered_samples *os = &s->ordered_samples;
struct list_head *head = &os->samples;
struct sample_queue *tmp, *iter;
struct perf_sample sample;
u64 limit = os->next_flush;
u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
unsigned idx = 0, progress_next = os->nr_samples / 16;
int ret;
if (!tool->ordered_samples || !limit)
return;
list_for_each_entry_safe(iter, tmp, head, list) {
if (iter->timestamp > limit)
break;
ret = perf_session__parse_sample(s, iter->event, &sample);
if (ret)
pr_err("Can't parse sample, err = %d\n", ret);
else
perf_session_deliver_event(s, iter->event, &sample, tool,
iter->file_offset);
os->last_flush = iter->timestamp;
list_del(&iter->list);
list_add(&iter->list, &os->sample_cache);
if (++idx >= progress_next) {
progress_next += os->nr_samples / 16;
ui_progress__update(idx, os->nr_samples,
"Processing time ordered events...");
}
}
if (list_empty(head)) {
os->last_sample = NULL;
} else if (last_ts <= limit) {
os->last_sample =
list_entry(head->prev, struct sample_queue, list);
}
os->nr_samples = 0;
}
/*
* When perf record finishes a pass on every buffers, it records this pseudo
* event.
* We record the max timestamp t found in the pass n.
* Assuming these timestamps are monotonic across cpus, we know that if
* a buffer still has events with timestamps below t, they will be all
* available and then read in the pass n + 1.
* Hence when we start to read the pass n + 2, we can safely flush every
* events with timestamps below t.
*
* ============ PASS n =================
* CPU 0 | CPU 1
* |
* cnt1 timestamps | cnt2 timestamps
* 1 | 2
* 2 | 3
* - | 4 <--- max recorded
*
* ============ PASS n + 1 ==============
* CPU 0 | CPU 1
* |
* cnt1 timestamps | cnt2 timestamps
* 3 | 5
* 4 | 6
* 5 | 7 <---- max recorded
*
* Flush every events below timestamp 4
*
* ============ PASS n + 2 ==============
* CPU 0 | CPU 1
* |
* cnt1 timestamps | cnt2 timestamps
* 6 | 8
* 7 | 9
* - | 10
*
* Flush every events below timestamp 7
* etc...
*/
static int process_finished_round(struct perf_tool *tool,
union perf_event *event __used,
struct perf_session *session)
{
flush_sample_queue(session, tool);
session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
return 0;
}
/* The queue is ordered by time */
static void __queue_event(struct sample_queue *new, struct perf_session *s)
{
struct ordered_samples *os = &s->ordered_samples;
struct sample_queue *sample = os->last_sample;
u64 timestamp = new->timestamp;
struct list_head *p;
++os->nr_samples;
os->last_sample = new;
if (!sample) {
list_add(&new->list, &os->samples);
os->max_timestamp = timestamp;
return;
}
/*
* last_sample might point to some random place in the list as it's
* the last queued event. We expect that the new event is close to
* this.
*/
if (sample->timestamp <= timestamp) {
while (sample->timestamp <= timestamp) {
p = sample->list.next;
if (p == &os->samples) {
list_add_tail(&new->list, &os->samples);
os->max_timestamp = timestamp;
return;
}
sample = list_entry(p, struct sample_queue, list);
}
list_add_tail(&new->list, &sample->list);
} else {
while (sample->timestamp > timestamp) {
p = sample->list.prev;
if (p == &os->samples) {
list_add(&new->list, &os->samples);
return;
}
sample = list_entry(p, struct sample_queue, list);
}
list_add(&new->list, &sample->list);
}
}
#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
struct perf_sample *sample, u64 file_offset)
{
struct ordered_samples *os = &s->ordered_samples;
struct list_head *sc = &os->sample_cache;
u64 timestamp = sample->time;
struct sample_queue *new;
if (!timestamp || timestamp == ~0ULL)
return -ETIME;
if (timestamp < s->ordered_samples.last_flush) {
printf("Warning: Timestamp below last timeslice flush\n");
return -EINVAL;
}
if (!list_empty(sc)) {
new = list_entry(sc->next, struct sample_queue, list);
list_del(&new->list);
} else if (os->sample_buffer) {
new = os->sample_buffer + os->sample_buffer_idx;
if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
os->sample_buffer = NULL;
} else {
os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
if (!os->sample_buffer)
return -ENOMEM;
list_add(&os->sample_buffer->list, &os->to_free);
os->sample_buffer_idx = 2;
new = os->sample_buffer + 1;
}
new->timestamp = timestamp;
new->file_offset = file_offset;
new->event = event;
__queue_event(new, s);
return 0;
}
static void callchain__printf(struct perf_sample *sample)
{
unsigned int i;
printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
for (i = 0; i < sample->callchain->nr; i++)
printf("..... %2d: %016" PRIx64 "\n",
i, sample->callchain->ips[i]);
}
static void branch_stack__printf(struct perf_sample *sample)
{
uint64_t i;
printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
for (i = 0; i < sample->branch_stack->nr; i++)
printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
i, sample->branch_stack->entries[i].from,
sample->branch_stack->entries[i].to);
}
static void perf_session__print_tstamp(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample)
{
if (event->header.type != PERF_RECORD_SAMPLE &&
!session->sample_id_all) {
fputs("-1 -1 ", stdout);
return;
}
if ((session->sample_type & PERF_SAMPLE_CPU))
printf("%u ", sample->cpu);
if (session->sample_type & PERF_SAMPLE_TIME)
printf("%" PRIu64 " ", sample->time);
}
static void dump_event(struct perf_session *session, union perf_event *event,
u64 file_offset, struct perf_sample *sample)
{
if (!dump_trace)
return;
printf("\n%#" PRIx64 " [%#x]: event: %d\n",
file_offset, event->header.size, event->header.type);
trace_event(event);
if (sample)
perf_session__print_tstamp(session, event, sample);
printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
event->header.size, perf_event__name(event->header.type));
}
static void dump_sample(struct perf_session *session, union perf_event *event,
struct perf_sample *sample)
{
if (!dump_trace)
return;
printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
event->header.misc, sample->pid, sample->tid, sample->ip,
sample->period, sample->addr);
if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
callchain__printf(sample);
if (session->sample_type & PERF_SAMPLE_BRANCH_STACK)
branch_stack__printf(sample);
}
static struct machine *
perf_session__find_machine_for_cpumode(struct perf_session *session,
union perf_event *event)
{
const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
u32 pid;
if (event->header.type == PERF_RECORD_MMAP)
pid = event->mmap.pid;
else
pid = event->ip.pid;
return perf_session__find_machine(session, pid);
}
return perf_session__find_host_machine(session);
}
static int perf_session_deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool,
u64 file_offset)
{
struct perf_evsel *evsel;
struct machine *machine;
dump_event(session, event, file_offset, sample);
evsel = perf_evlist__id2evsel(session->evlist, sample->id);
if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
/*
* XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
* because the tools right now may apply filters, discarding
* some of the samples. For consistency, in the future we
* should have something like nr_filtered_samples and remove
* the sample->period from total_sample_period, etc, KISS for
* now tho.
*
* Also testing against NULL allows us to handle files without
* attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
* future probably it'll be a good idea to restrict event
* processing via perf_session to files with both set.
*/
hists__inc_nr_events(&evsel->hists, event->header.type);
}
machine = perf_session__find_machine_for_cpumode(session, event);
switch (event->header.type) {
case PERF_RECORD_SAMPLE:
dump_sample(session, event, sample);
if (evsel == NULL) {
++session->hists.stats.nr_unknown_id;
return 0;
}
if (machine == NULL) {
++session->hists.stats.nr_unprocessable_samples;
return 0;
}
return tool->sample(tool, event, sample, evsel, machine);
case PERF_RECORD_MMAP:
return tool->mmap(tool, event, sample, machine);
case PERF_RECORD_COMM:
return tool->comm(tool, event, sample, machine);
case PERF_RECORD_FORK:
return tool->fork(tool, event, sample, machine);
case PERF_RECORD_EXIT:
return tool->exit(tool, event, sample, machine);
case PERF_RECORD_LOST:
if (tool->lost == perf_event__process_lost)
session->hists.stats.total_lost += event->lost.lost;
return tool->lost(tool, event, sample, machine);
case PERF_RECORD_READ:
return tool->read(tool, event, sample, evsel, machine);
case PERF_RECORD_THROTTLE:
return tool->throttle(tool, event, sample, machine);
case PERF_RECORD_UNTHROTTLE:
return tool->unthrottle(tool, event, sample, machine);
default:
++session->hists.stats.nr_unknown_events;
return -1;
}
}
static int perf_session__preprocess_sample(struct perf_session *session,
union perf_event *event, struct perf_sample *sample)
{
if (event->header.type != PERF_RECORD_SAMPLE ||
!(session->sample_type & PERF_SAMPLE_CALLCHAIN))
return 0;
if (!ip_callchain__valid(sample->callchain, event)) {
pr_debug("call-chain problem with event, skipping it.\n");
++session->hists.stats.nr_invalid_chains;
session->hists.stats.total_invalid_chains += sample->period;
return -EINVAL;
}
return 0;
}
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
struct perf_tool *tool, u64 file_offset)
{
int err;
dump_event(session, event, file_offset, NULL);
/* These events are processed right away */
switch (event->header.type) {
case PERF_RECORD_HEADER_ATTR:
err = tool->attr(event, &session->evlist);
if (err == 0)
perf_session__update_sample_type(session);
return err;
case PERF_RECORD_HEADER_EVENT_TYPE:
return tool->event_type(tool, event);
case PERF_RECORD_HEADER_TRACING_DATA:
/* setup for reading amidst mmap */
lseek(session->fd, file_offset, SEEK_SET);
return tool->tracing_data(event, session);
case PERF_RECORD_HEADER_BUILD_ID:
return tool->build_id(tool, event, session);
case PERF_RECORD_FINISHED_ROUND:
return tool->finished_round(tool, event, session);
default:
return -EINVAL;
}
}
static int perf_session__process_event(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool,
u64 file_offset)
{
struct perf_sample sample;
int ret;
if (session->header.needs_swap &&
perf_event__swap_ops[event->header.type])
perf_event__swap_ops[event->header.type](event);
if (event->header.type >= PERF_RECORD_HEADER_MAX)
return -EINVAL;
hists__inc_nr_events(&session->hists, event->header.type);
if (event->header.type >= PERF_RECORD_USER_TYPE_START)
return perf_session__process_user_event(session, event, tool, file_offset);
/*
* For all kernel events we get the sample data
*/
ret = perf_session__parse_sample(session, event, &sample);
if (ret)
return ret;
/* Preprocess sample records - precheck callchains */
if (perf_session__preprocess_sample(session, event, &sample))
return 0;
if (tool->ordered_samples) {
ret = perf_session_queue_event(session, event, &sample,
file_offset);
if (ret != -ETIME)
return ret;
}
return perf_session_deliver_event(session, event, &sample, tool,
file_offset);
}
void perf_event_header__bswap(struct perf_event_header *self)
{
self->type = bswap_32(self->type);
self->misc = bswap_16(self->misc);
self->size = bswap_16(self->size);
}
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
return machine__findnew_thread(&session->host_machine, pid);
}
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
struct thread *thread = perf_session__findnew(self, 0);
if (thread == NULL || thread__set_comm(thread, "swapper")) {
pr_err("problem inserting idle task.\n");
thread = NULL;
}
return thread;
}
static void perf_session__warn_about_errors(const struct perf_session *session,
const struct perf_tool *tool)
{
if (tool->lost == perf_event__process_lost &&
session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
ui__warning("Processed %d events and lost %d chunks!\n\n"
"Check IO/CPU overload!\n\n",
session->hists.stats.nr_events[0],
session->hists.stats.nr_events[PERF_RECORD_LOST]);
}
if (session->hists.stats.nr_unknown_events != 0) {
ui__warning("Found %u unknown events!\n\n"
"Is this an older tool processing a perf.data "
"file generated by a more recent tool?\n\n"
"If that is not the case, consider "
"reporting to linux-kernel@vger.kernel.org.\n\n",
session->hists.stats.nr_unknown_events);
}
if (session->hists.stats.nr_unknown_id != 0) {
ui__warning("%u samples with id not present in the header\n",
session->hists.stats.nr_unknown_id);
}
if (session->hists.stats.nr_invalid_chains != 0) {
ui__warning("Found invalid callchains!\n\n"
"%u out of %u events were discarded for this reason.\n\n"
"Consider reporting to linux-kernel@vger.kernel.org.\n\n",
session->hists.stats.nr_invalid_chains,
session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
}
if (session->hists.stats.nr_unprocessable_samples != 0) {
ui__warning("%u unprocessable samples recorded.\n"
"Do you have a KVM guest running and not using 'perf kvm'?\n",
session->hists.stats.nr_unprocessable_samples);
}
}
#define session_done() (*(volatile int *)(&session_done))
volatile int session_done;
static int __perf_session__process_pipe_events(struct perf_session *self,
struct perf_tool *tool)
{
union perf_event event;
uint32_t size;
int skip = 0;
u64 head;
int err;
void *p;
perf_tool__fill_defaults(tool);
head = 0;
more:
err = readn(self->fd, &event, sizeof(struct perf_event_header));
if (err <= 0) {
if (err == 0)
goto done;
pr_err("failed to read event header\n");
goto out_err;
}
if (self->header.needs_swap)
perf_event_header__bswap(&event.header);
size = event.header.size;
if (size == 0)
size = 8;
p = &event;
p += sizeof(struct perf_event_header);
if (size - sizeof(struct perf_event_header)) {
err = readn(self->fd, p, size - sizeof(struct perf_event_header));
if (err <= 0) {
if (err == 0) {
pr_err("unexpected end of event stream\n");
goto done;
}
pr_err("failed to read event data\n");
goto out_err;
}
}
if ((skip = perf_session__process_event(self, &event, tool, head)) < 0) {
dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
head, event.header.size, event.header.type);
/*
* assume we lost track of the stream, check alignment, and
* increment a single u64 in the hope to catch on again 'soon'.
*/
if (unlikely(head & 7))
head &= ~7ULL;
size = 8;
}
head += size;
if (skip > 0)
head += skip;
if (!session_done())
goto more;
done:
err = 0;
out_err:
perf_session__warn_about_errors(self, tool);
perf_session_free_sample_buffers(self);
return err;
}
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
u64 head, size_t mmap_size, char *buf)
{
union perf_event *event;
/*
* Ensure we have enough space remaining to read
* the size of the event in the headers.
*/
if (head + sizeof(event->header) > mmap_size)
return NULL;
event = (union perf_event *)(buf + head);
if (session->header.needs_swap)
perf_event_header__bswap(&event->header);
if (head + event->header.size > mmap_size)
return NULL;
return event;
}
int __perf_session__process_events(struct perf_session *session,
u64 data_offset, u64 data_size,
u64 file_size, struct perf_tool *tool)
{
u64 head, page_offset, file_offset, file_pos, progress_next;
int err, mmap_prot, mmap_flags, map_idx = 0;
size_t page_size, mmap_size;
char *buf, *mmaps[8];
union perf_event *event;
uint32_t size;
perf_tool__fill_defaults(tool);
page_size = sysconf(_SC_PAGESIZE);
page_offset = page_size * (data_offset / page_size);
file_offset = page_offset;
head = data_offset - page_offset;
if (data_offset + data_size < file_size)
file_size = data_offset + data_size;
progress_next = file_size / 16;
mmap_size = session->mmap_window;
if (mmap_size > file_size)
mmap_size = file_size;
memset(mmaps, 0, sizeof(mmaps));
mmap_prot = PROT_READ;
mmap_flags = MAP_SHARED;
if (session->header.needs_swap) {
mmap_prot |= PROT_WRITE;
mmap_flags = MAP_PRIVATE;
}
remap:
buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
file_offset);
if (buf == MAP_FAILED) {
pr_err("failed to mmap file\n");
err = -errno;
goto out_err;
}
mmaps[map_idx] = buf;
map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
file_pos = file_offset + head;
more:
event = fetch_mmaped_event(session, head, mmap_size, buf);
if (!event) {
if (mmaps[map_idx]) {
munmap(mmaps[map_idx], mmap_size);
mmaps[map_idx] = NULL;
}
page_offset = page_size * (head / page_size);
file_offset += page_offset;
head -= page_offset;
goto remap;
}
size = event->header.size;
if (size == 0 ||
perf_session__process_event(session, event, tool, file_pos) < 0) {
dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
file_offset + head, event->header.size,
event->header.type);
/*
* assume we lost track of the stream, check alignment, and
* increment a single u64 in the hope to catch on again 'soon'.
*/
if (unlikely(head & 7))
head &= ~7ULL;
size = 8;
}
head += size;
file_pos += size;
if (file_pos >= progress_next) {
progress_next += file_size / 16;
ui_progress__update(file_pos, file_size,
"Processing events...");
}
if (file_pos < file_size)
goto more;
err = 0;
/* do the final flush for ordered samples */
session->ordered_samples.next_flush = ULLONG_MAX;
flush_sample_queue(session, tool);
out_err:
perf_session__warn_about_errors(session, tool);
perf_session_free_sample_buffers(session);
return err;
}
int perf_session__process_events(struct perf_session *self,
struct perf_tool *tool)
{
int err;
if (perf_session__register_idle_thread(self) == NULL)
return -ENOMEM;
if (!self->fd_pipe)
err = __perf_session__process_events(self,
self->header.data_offset,
self->header.data_size,
self->size, tool);
else
err = __perf_session__process_pipe_events(self, tool);
return err;
}
bool perf_session__has_traces(struct perf_session *self, const char *msg)
{
if (!(self->sample_type & PERF_SAMPLE_RAW)) {
pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
return false;
}
return true;
}
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
const char *symbol_name, u64 addr)
{
char *bracket;
enum map_type i;
struct ref_reloc_sym *ref;
ref = zalloc(sizeof(struct ref_reloc_sym));
if (ref == NULL)
return -ENOMEM;
ref->name = strdup(symbol_name);
if (ref->name == NULL) {
free(ref);
return -ENOMEM;
}
bracket = strchr(ref->name, ']');
if (bracket)
*bracket = '\0';
ref->addr = addr;
for (i = 0; i < MAP__NR_TYPES; ++i) {
struct kmap *kmap = map__kmap(maps[i]);
kmap->ref_reloc_sym = ref;
}
return 0;
}
size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
__dsos__fprintf(&self->host_machine.user_dsos, fp) +
machines__fprintf_dsos(&self->machines, fp);
}
size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
bool with_hits)
{
size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
}
size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
struct perf_evsel *pos;
size_t ret = fprintf(fp, "Aggregated stats:\n");
ret += hists__fprintf_nr_events(&session->hists, fp);
list_for_each_entry(pos, &session->evlist->entries, node) {
ret += fprintf(fp, "%s stats:\n", event_name(pos));
ret += hists__fprintf_nr_events(&pos->hists, fp);
}
return ret;
}
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
/*
* FIXME: Here we have to actually print all the machines in this
* session, not just the host...
*/
return machine__fprintf(&session->host_machine, fp);
}
void perf_session__remove_thread(struct perf_session *session,
struct thread *th)
{
/*
* FIXME: This one makes no sense, we need to remove the thread from
* the machine it belongs to, perf_session can have many machines, so
* doing it always on ->host_machine is wrong. Fix when auditing all
* the 'perf kvm' code.
*/
machine__remove_thread(&session->host_machine, th);
}
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type)
{
struct perf_evsel *pos;
list_for_each_entry(pos, &session->evlist->entries, node) {
if (pos->attr.type == type)
return pos;
}
return NULL;
}
void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
struct machine *machine, struct perf_evsel *evsel,
int print_sym, int print_dso, int print_symoffset)
{
struct addr_location al;
struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
struct callchain_cursor_node *node;
if (perf_event__preprocess_sample(event, machine, &al, sample,
NULL) < 0) {
error("problem processing %d event, skipping it.\n",
event->header.type);
return;
}
if (symbol_conf.use_callchain && sample->callchain) {
if (machine__resolve_callchain(machine, evsel, al.thread,
sample->callchain, NULL) != 0) {
if (verbose)
error("Failed to resolve callchain. Skipping\n");
return;
}
callchain_cursor_commit(cursor);
while (1) {
node = callchain_cursor_current(cursor);
if (!node)
break;
printf("\t%16" PRIx64, node->ip);
if (print_sym) {
printf(" ");
symbol__fprintf_symname(node->sym, stdout);
}
if (print_dso) {
printf(" (");
map__fprintf_dsoname(al.map, stdout);
printf(")");
}
printf("\n");
callchain_cursor_advance(cursor);
}
} else {
printf("%16" PRIx64, sample->ip);
if (print_sym) {
printf(" ");
if (print_symoffset)
symbol__fprintf_symname_offs(al.sym, &al,
stdout);
else
symbol__fprintf_symname(al.sym, stdout);
}
if (print_dso) {
printf(" (");
map__fprintf_dsoname(al.map, stdout);
printf(")");
}
}
}
int perf_session__cpu_bitmap(struct perf_session *session,
const char *cpu_list, unsigned long *cpu_bitmap)
{
int i;
struct cpu_map *map;
for (i = 0; i < PERF_TYPE_MAX; ++i) {
struct perf_evsel *evsel;
evsel = perf_session__find_first_evtype(session, i);
if (!evsel)
continue;
if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
pr_err("File does not contain CPU events. "
"Remove -c option to proceed.\n");
return -1;
}
}
map = cpu_map__new(cpu_list);
if (map == NULL) {
pr_err("Invalid cpu_list\n");
return -1;
}
for (i = 0; i < map->nr; i++) {
int cpu = map->map[i];
if (cpu >= MAX_NR_CPUS) {
pr_err("Requested CPU %d too large. "
"Consider raising MAX_NR_CPUS\n", cpu);
return -1;
}
set_bit(cpu, cpu_bitmap);
}
return 0;
}
void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
bool full)
{
struct stat st;
int ret;
if (session == NULL || fp == NULL)
return;
ret = fstat(session->fd, &st);
if (ret == -1)
return;
fprintf(fp, "# ========\n");
fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
perf_header__fprintf_info(session, fp, full);
fprintf(fp, "# ========\n#\n");
}
| gpl-2.0 |
WhiteNeo-/NeoKernel | drivers/dma/dmatest.c | 5037 | 18033 | /*
* DMA Engine test module
*
* Copyright (C) 2007 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/freezer.h>
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/wait.h>
static unsigned int test_buf_size = 16384;
module_param(test_buf_size, uint, S_IRUGO);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
static char test_channel[20];
module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
static char test_device[20];
module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
static unsigned int threads_per_chan = 1;
module_param(threads_per_chan, uint, S_IRUGO);
MODULE_PARM_DESC(threads_per_chan,
"Number of threads to start per channel (default: 1)");
static unsigned int max_channels;
module_param(max_channels, uint, S_IRUGO);
MODULE_PARM_DESC(max_channels,
"Maximum number of channels to use (default: all)");
static unsigned int iterations;
module_param(iterations, uint, S_IRUGO);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO);
MODULE_PARM_DESC(xor_sources,
"Number of xor source buffers (default: 3)");
static unsigned int pq_sources = 3;
module_param(pq_sources, uint, S_IRUGO);
MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
static int timeout = 3000;
module_param(timeout, uint, S_IRUGO);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
/*
* Initialization patterns. All bytes in the source buffer has bit 7
* set, all bytes in the destination buffer has bit 7 cleared.
*
* Bit 6 is set for all bytes which are to be copied by the DMA
* engine. Bit 5 is set for all bytes which are to be overwritten by
* the DMA engine.
*
* The remaining bits are the inverse of a counter which increments by
* one for each byte address.
*/
#define PATTERN_SRC 0x80
#define PATTERN_DST 0x00
#define PATTERN_COPY 0x40
#define PATTERN_OVERWRITE 0x20
#define PATTERN_COUNT_MASK 0x1f
struct dmatest_thread {
struct list_head node;
struct task_struct *task;
struct dma_chan *chan;
u8 **srcs;
u8 **dsts;
enum dma_transaction_type type;
};
struct dmatest_chan {
struct list_head node;
struct dma_chan *chan;
struct list_head threads;
};
/*
* These are protected by dma_list_mutex since they're only used by
* the DMA filter function callback
*/
static LIST_HEAD(dmatest_channels);
static unsigned int nr_channels;
static bool dmatest_match_channel(struct dma_chan *chan)
{
if (test_channel[0] == '\0')
return true;
return strcmp(dma_chan_name(chan), test_channel) == 0;
}
static bool dmatest_match_device(struct dma_device *device)
{
if (test_device[0] == '\0')
return true;
return strcmp(dev_name(device->dev), test_device) == 0;
}
static unsigned long dmatest_random(void)
{
unsigned long buf;
get_random_bytes(&buf, sizeof(buf));
return buf;
}
static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
{
unsigned int i;
u8 *buf;
for (; (buf = *bufs); bufs++) {
for (i = 0; i < start; i++)
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
for ( ; i < start + len; i++)
buf[i] = PATTERN_SRC | PATTERN_COPY
| (~i & PATTERN_COUNT_MASK);
for ( ; i < test_buf_size; i++)
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
buf++;
}
}
static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
{
unsigned int i;
u8 *buf;
for (; (buf = *bufs); bufs++) {
for (i = 0; i < start; i++)
buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
for ( ; i < start + len; i++)
buf[i] = PATTERN_DST | PATTERN_OVERWRITE
| (~i & PATTERN_COUNT_MASK);
for ( ; i < test_buf_size; i++)
buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
}
}
static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
unsigned int counter, bool is_srcbuf)
{
u8 diff = actual ^ pattern;
u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
const char *thread_name = current->comm;
if (is_srcbuf)
pr_warning("%s: srcbuf[0x%x] overwritten!"
" Expected %02x, got %02x\n",
thread_name, index, expected, actual);
else if ((pattern & PATTERN_COPY)
&& (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
pr_warning("%s: dstbuf[0x%x] not copied!"
" Expected %02x, got %02x\n",
thread_name, index, expected, actual);
else if (diff & PATTERN_SRC)
pr_warning("%s: dstbuf[0x%x] was copied!"
" Expected %02x, got %02x\n",
thread_name, index, expected, actual);
else
pr_warning("%s: dstbuf[0x%x] mismatch!"
" Expected %02x, got %02x\n",
thread_name, index, expected, actual);
}
static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
unsigned int end, unsigned int counter, u8 pattern,
bool is_srcbuf)
{
unsigned int i;
unsigned int error_count = 0;
u8 actual;
u8 expected;
u8 *buf;
unsigned int counter_orig = counter;
for (; (buf = *bufs); bufs++) {
counter = counter_orig;
for (i = start; i < end; i++) {
actual = buf[i];
expected = pattern | (~counter & PATTERN_COUNT_MASK);
if (actual != expected) {
if (error_count < 32)
dmatest_mismatch(actual, pattern, i,
counter, is_srcbuf);
error_count++;
}
counter++;
}
}
if (error_count > 32)
pr_warning("%s: %u errors suppressed\n",
current->comm, error_count - 32);
return error_count;
}
/* poor man's completion - we want to use wait_event_freezable() on it */
struct dmatest_done {
bool done;
wait_queue_head_t *wait;
};
static void dmatest_callback(void *arg)
{
struct dmatest_done *done = arg;
done->done = true;
wake_up_all(done->wait);
}
/*
* This function repeatedly tests DMA transfers of various lengths and
* offsets for a given operation type until it is told to exit by
* kthread_stop(). There may be multiple threads running this function
* in parallel for a single channel, and there may be multiple channels
* being tested in parallel.
*
* Before each test, the source and destination buffer is initialized
* with a known pattern. This pattern is different depending on
* whether it's in an area which is supposed to be copied or
* overwritten, and different in the source and destination buffers.
* So if the DMA engine doesn't copy exactly what we tell it to copy,
* we'll notice.
*/
static int dmatest_func(void *data)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
struct dmatest_thread *thread = data;
struct dmatest_done done = { .wait = &done_wait };
struct dma_chan *chan;
const char *thread_name;
unsigned int src_off, dst_off, len;
unsigned int error_count;
unsigned int failed_tests = 0;
unsigned int total_tests = 0;
dma_cookie_t cookie;
enum dma_status status;
enum dma_ctrl_flags flags;
u8 pq_coefs[pq_sources + 1];
int ret;
int src_cnt;
int dst_cnt;
int i;
thread_name = current->comm;
set_freezable();
ret = -ENOMEM;
smp_rmb();
chan = thread->chan;
if (thread->type == DMA_MEMCPY)
src_cnt = dst_cnt = 1;
else if (thread->type == DMA_XOR) {
src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
dst_cnt = 1;
} else if (thread->type == DMA_PQ) {
src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
dst_cnt = 2;
for (i = 0; i < src_cnt; i++)
pq_coefs[i] = 1;
} else
goto err_srcs;
thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
if (!thread->srcs)
goto err_srcs;
for (i = 0; i < src_cnt; i++) {
thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
if (!thread->srcs[i])
goto err_srcbuf;
}
thread->srcs[i] = NULL;
thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
if (!thread->dsts)
goto err_dsts;
for (i = 0; i < dst_cnt; i++) {
thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
if (!thread->dsts[i])
goto err_dstbuf;
}
thread->dsts[i] = NULL;
set_user_nice(current, 10);
/*
* src buffers are freed by the DMAEngine code with dma_unmap_single()
* dst buffers are freed by ourselves below
*/
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
| DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
while (!kthread_should_stop()
&& !(iterations && total_tests >= iterations)) {
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
u8 align = 0;
total_tests++;
/* honor alignment restrictions */
if (thread->type == DMA_MEMCPY)
align = dev->copy_align;
else if (thread->type == DMA_XOR)
align = dev->xor_align;
else if (thread->type == DMA_PQ)
align = dev->pq_align;
if (1 << align > test_buf_size) {
pr_err("%u-byte buffer too small for %d-byte alignment\n",
test_buf_size, 1 << align);
break;
}
len = dmatest_random() % test_buf_size + 1;
len = (len >> align) << align;
if (!len)
len = 1 << align;
src_off = dmatest_random() % (test_buf_size - len + 1);
dst_off = dmatest_random() % (test_buf_size - len + 1);
src_off = (src_off >> align) << align;
dst_off = (dst_off >> align) << align;
dmatest_init_srcs(thread->srcs, src_off, len);
dmatest_init_dsts(thread->dsts, dst_off, len);
for (i = 0; i < src_cnt; i++) {
u8 *buf = thread->srcs[i] + src_off;
dma_srcs[i] = dma_map_single(dev->dev, buf, len,
DMA_TO_DEVICE);
}
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
for (i = 0; i < dst_cnt; i++) {
dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
test_buf_size,
DMA_BIDIRECTIONAL);
}
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
dma_dsts[0] + dst_off,
dma_srcs[0], len,
flags);
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
dma_dsts[0] + dst_off,
dma_srcs, src_cnt,
len, flags);
else if (thread->type == DMA_PQ) {
dma_addr_t dma_pq[dst_cnt];
for (i = 0; i < dst_cnt; i++)
dma_pq[i] = dma_dsts[i] + dst_off;
tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
src_cnt, pq_coefs,
len, flags);
}
if (!tx) {
for (i = 0; i < src_cnt; i++)
dma_unmap_single(dev->dev, dma_srcs[i], len,
DMA_TO_DEVICE);
for (i = 0; i < dst_cnt; i++)
dma_unmap_single(dev->dev, dma_dsts[i],
test_buf_size,
DMA_BIDIRECTIONAL);
pr_warning("%s: #%u: prep error with src_off=0x%x "
"dst_off=0x%x len=0x%x\n",
thread_name, total_tests - 1,
src_off, dst_off, len);
msleep(100);
failed_tests++;
continue;
}
done.done = false;
tx->callback = dmatest_callback;
tx->callback_param = &done;
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
pr_warning("%s: #%u: submit error %d with src_off=0x%x "
"dst_off=0x%x len=0x%x\n",
thread_name, total_tests - 1, cookie,
src_off, dst_off, len);
msleep(100);
failed_tests++;
continue;
}
dma_async_issue_pending(chan);
wait_event_freezable_timeout(done_wait, done.done,
msecs_to_jiffies(timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (!done.done) {
/*
* We're leaving the timed out dma operation with
* dangling pointer to done_wait. To make this
* correct, we'll need to allocate wait_done for
* each test iteration and perform "who's gonna
* free it this time?" dancing. For now, just
* leave it dangling.
*/
pr_warning("%s: #%u: test timed out\n",
thread_name, total_tests - 1);
failed_tests++;
continue;
} else if (status != DMA_SUCCESS) {
pr_warning("%s: #%u: got completion callback,"
" but status is \'%s\'\n",
thread_name, total_tests - 1,
status == DMA_ERROR ? "error" : "in progress");
failed_tests++;
continue;
}
/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
for (i = 0; i < dst_cnt; i++)
dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size,
DMA_BIDIRECTIONAL);
error_count = 0;
pr_debug("%s: verifying source buffer...\n", thread_name);
error_count += dmatest_verify(thread->srcs, 0, src_off,
0, PATTERN_SRC, true);
error_count += dmatest_verify(thread->srcs, src_off,
src_off + len, src_off,
PATTERN_SRC | PATTERN_COPY, true);
error_count += dmatest_verify(thread->srcs, src_off + len,
test_buf_size, src_off + len,
PATTERN_SRC, true);
pr_debug("%s: verifying dest buffer...\n",
thread->task->comm);
error_count += dmatest_verify(thread->dsts, 0, dst_off,
0, PATTERN_DST, false);
error_count += dmatest_verify(thread->dsts, dst_off,
dst_off + len, src_off,
PATTERN_SRC | PATTERN_COPY, false);
error_count += dmatest_verify(thread->dsts, dst_off + len,
test_buf_size, dst_off + len,
PATTERN_DST, false);
if (error_count) {
pr_warning("%s: #%u: %u errors with "
"src_off=0x%x dst_off=0x%x len=0x%x\n",
thread_name, total_tests - 1, error_count,
src_off, dst_off, len);
failed_tests++;
} else {
pr_debug("%s: #%u: No errors with "
"src_off=0x%x dst_off=0x%x len=0x%x\n",
thread_name, total_tests - 1,
src_off, dst_off, len);
}
}
ret = 0;
for (i = 0; thread->dsts[i]; i++)
kfree(thread->dsts[i]);
err_dstbuf:
kfree(thread->dsts);
err_dsts:
for (i = 0; thread->srcs[i]; i++)
kfree(thread->srcs[i]);
err_srcbuf:
kfree(thread->srcs);
err_srcs:
pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
thread_name, total_tests, failed_tests, ret);
/* terminate all transfers on specified channels */
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
if (iterations > 0)
while (!kthread_should_stop()) {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
interruptible_sleep_on(&wait_dmatest_exit);
}
return ret;
}
static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
{
struct dmatest_thread *thread;
struct dmatest_thread *_thread;
int ret;
list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
ret = kthread_stop(thread->task);
pr_debug("dmatest: thread %s exited with status %d\n",
thread->task->comm, ret);
list_del(&thread->node);
kfree(thread);
}
/* terminate all transfers on specified channels */
dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
kfree(dtc);
}
static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type)
{
struct dmatest_thread *thread;
struct dma_chan *chan = dtc->chan;
char *op;
unsigned int i;
if (type == DMA_MEMCPY)
op = "copy";
else if (type == DMA_XOR)
op = "xor";
else if (type == DMA_PQ)
op = "pq";
else
return -EINVAL;
for (i = 0; i < threads_per_chan; i++) {
thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
if (!thread) {
pr_warning("dmatest: No memory for %s-%s%u\n",
dma_chan_name(chan), op, i);
break;
}
thread->chan = dtc->chan;
thread->type = type;
smp_wmb();
thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
dma_chan_name(chan), op, i);
if (IS_ERR(thread->task)) {
pr_warning("dmatest: Failed to run thread %s-%s%u\n",
dma_chan_name(chan), op, i);
kfree(thread);
break;
}
/* srcbuf and dstbuf are allocated by the thread itself */
list_add_tail(&thread->node, &dtc->threads);
}
return i;
}
static int dmatest_add_channel(struct dma_chan *chan)
{
struct dmatest_chan *dtc;
struct dma_device *dma_dev = chan->device;
unsigned int thread_count = 0;
int cnt;
dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
if (!dtc) {
pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
return -ENOMEM;
}
dtc->chan = chan;
INIT_LIST_HEAD(&dtc->threads);
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
thread_count += cnt > 0 ? cnt : 0;
}
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
cnt = dmatest_add_threads(dtc, DMA_XOR);
thread_count += cnt > 0 ? cnt : 0;
}
if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
cnt = dmatest_add_threads(dtc, DMA_PQ);
thread_count += cnt > 0 ? cnt : 0;
}
pr_info("dmatest: Started %u threads using %s\n",
thread_count, dma_chan_name(chan));
list_add_tail(&dtc->node, &dmatest_channels);
nr_channels++;
return 0;
}
static bool filter(struct dma_chan *chan, void *param)
{
if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
return false;
else
return true;
}
static int __init dmatest_init(void)
{
dma_cap_mask_t mask;
struct dma_chan *chan;
int err = 0;
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
for (;;) {
chan = dma_request_channel(mask, filter, NULL);
if (chan) {
err = dmatest_add_channel(chan);
if (err) {
dma_release_channel(chan);
break; /* add_channel failed, punt */
}
} else
break; /* no more channels available */
if (max_channels && nr_channels >= max_channels)
break; /* we have all we need */
}
return err;
}
/* when compiled-in wait for drivers to load first */
late_initcall(dmatest_init);
static void __exit dmatest_exit(void)
{
struct dmatest_chan *dtc, *_dtc;
struct dma_chan *chan;
list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
list_del(&dtc->node);
chan = dtc->chan;
dmatest_cleanup_channel(dtc);
pr_debug("dmatest: dropped channel %s\n",
dma_chan_name(chan));
dma_release_channel(chan);
}
}
module_exit(dmatest_exit);
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
DevriesL/SM-G9208_ImageBreaker | arch/arm/mach-rpc/dma.c | 11693 | 8767 | /*
* linux/arch/arm/mach-rpc/dma.c
*
* Copyright (C) 1998 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* DMA functions specific to RiscPC architecture
*/
#include <linux/mman.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <asm/page.h>
#include <asm/dma.h>
#include <asm/fiq.h>
#include <asm/irq.h>
#include <mach/hardware.h>
#include <asm/uaccess.h>
#include <asm/mach/dma.h>
#include <asm/hardware/iomd.h>
struct iomd_dma {
struct dma_struct dma;
unsigned int state;
unsigned long base; /* Controller base address */
int irq; /* Controller IRQ */
struct scatterlist cur_sg; /* Current controller buffer */
dma_addr_t dma_addr;
unsigned int dma_len;
};
#if 0
typedef enum {
dma_size_8 = 1,
dma_size_16 = 2,
dma_size_32 = 4,
dma_size_128 = 16
} dma_size_t;
#endif
#define TRANSFER_SIZE 2
#define CURA (0)
#define ENDA (IOMD_IO0ENDA - IOMD_IO0CURA)
#define CURB (IOMD_IO0CURB - IOMD_IO0CURA)
#define ENDB (IOMD_IO0ENDB - IOMD_IO0CURA)
#define CR (IOMD_IO0CR - IOMD_IO0CURA)
#define ST (IOMD_IO0ST - IOMD_IO0CURA)
static void iomd_get_next_sg(struct scatterlist *sg, struct iomd_dma *idma)
{
unsigned long end, offset, flags = 0;
if (idma->dma.sg) {
sg->dma_address = idma->dma_addr;
offset = sg->dma_address & ~PAGE_MASK;
end = offset + idma->dma_len;
if (end > PAGE_SIZE)
end = PAGE_SIZE;
if (offset + TRANSFER_SIZE >= end)
flags |= DMA_END_L;
sg->length = end - TRANSFER_SIZE;
idma->dma_len -= end - offset;
idma->dma_addr += end - offset;
if (idma->dma_len == 0) {
if (idma->dma.sgcount > 1) {
idma->dma.sg = sg_next(idma->dma.sg);
idma->dma_addr = idma->dma.sg->dma_address;
idma->dma_len = idma->dma.sg->length;
idma->dma.sgcount--;
} else {
idma->dma.sg = NULL;
flags |= DMA_END_S;
}
}
} else {
flags = DMA_END_S | DMA_END_L;
sg->dma_address = 0;
sg->length = 0;
}
sg->length |= flags;
}
static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
{
struct iomd_dma *idma = dev_id;
unsigned long base = idma->base;
do {
unsigned int status;
status = iomd_readb(base + ST);
if (!(status & DMA_ST_INT))
return IRQ_HANDLED;
if ((idma->state ^ status) & DMA_ST_AB)
iomd_get_next_sg(&idma->cur_sg, idma);
switch (status & (DMA_ST_OFL | DMA_ST_AB)) {
case DMA_ST_OFL: /* OIA */
case DMA_ST_AB: /* .IB */
iomd_writel(idma->cur_sg.dma_address, base + CURA);
iomd_writel(idma->cur_sg.length, base + ENDA);
idma->state = DMA_ST_AB;
break;
case DMA_ST_OFL | DMA_ST_AB: /* OIB */
case 0: /* .IA */
iomd_writel(idma->cur_sg.dma_address, base + CURB);
iomd_writel(idma->cur_sg.length, base + ENDB);
idma->state = 0;
break;
}
if (status & DMA_ST_OFL &&
idma->cur_sg.length == (DMA_END_S|DMA_END_L))
break;
} while (1);
idma->state = ~DMA_ST_AB;
disable_irq(irq);
return IRQ_HANDLED;
}
static int iomd_request_dma(unsigned int chan, dma_t *dma)
{
struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
return request_irq(idma->irq, iomd_dma_handle,
IRQF_DISABLED, idma->dma.device_id, idma);
}
static void iomd_free_dma(unsigned int chan, dma_t *dma)
{
struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
free_irq(idma->irq, idma);
}
static void iomd_enable_dma(unsigned int chan, dma_t *dma)
{
struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
unsigned long dma_base = idma->base;
unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E;
if (idma->dma.invalid) {
idma->dma.invalid = 0;
/*
* Cope with ISA-style drivers which expect cache
* coherence.
*/
if (!idma->dma.sg) {
idma->dma.sg = &idma->dma.buf;
idma->dma.sgcount = 1;
idma->dma.buf.length = idma->dma.count;
idma->dma.buf.dma_address = dma_map_single(NULL,
idma->dma.addr, idma->dma.count,
idma->dma.dma_mode == DMA_MODE_READ ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
iomd_writeb(DMA_CR_C, dma_base + CR);
idma->state = DMA_ST_AB;
}
if (idma->dma.dma_mode == DMA_MODE_READ)
ctrl |= DMA_CR_D;
iomd_writeb(ctrl, dma_base + CR);
enable_irq(idma->irq);
}
static void iomd_disable_dma(unsigned int chan, dma_t *dma)
{
struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
unsigned long dma_base = idma->base;
unsigned long flags;
local_irq_save(flags);
if (idma->state != ~DMA_ST_AB)
disable_irq(idma->irq);
iomd_writeb(0, dma_base + CR);
local_irq_restore(flags);
}
static int iomd_set_dma_speed(unsigned int chan, dma_t *dma, int cycle)
{
int tcr, speed;
if (cycle < 188)
speed = 3;
else if (cycle <= 250)
speed = 2;
else if (cycle < 438)
speed = 1;
else
speed = 0;
tcr = iomd_readb(IOMD_DMATCR);
speed &= 3;
switch (chan) {
case DMA_0:
tcr = (tcr & ~0x03) | speed;
break;
case DMA_1:
tcr = (tcr & ~0x0c) | (speed << 2);
break;
case DMA_2:
tcr = (tcr & ~0x30) | (speed << 4);
break;
case DMA_3:
tcr = (tcr & ~0xc0) | (speed << 6);
break;
default:
break;
}
iomd_writeb(tcr, IOMD_DMATCR);
return speed;
}
static struct dma_ops iomd_dma_ops = {
.type = "IOMD",
.request = iomd_request_dma,
.free = iomd_free_dma,
.enable = iomd_enable_dma,
.disable = iomd_disable_dma,
.setspeed = iomd_set_dma_speed,
};
static struct fiq_handler fh = {
.name = "floppydma"
};
struct floppy_dma {
struct dma_struct dma;
unsigned int fiq;
};
static void floppy_enable_dma(unsigned int chan, dma_t *dma)
{
struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
void *fiqhandler_start;
unsigned int fiqhandler_length;
struct pt_regs regs;
if (fdma->dma.sg)
BUG();
if (fdma->dma.dma_mode == DMA_MODE_READ) {
extern unsigned char floppy_fiqin_start, floppy_fiqin_end;
fiqhandler_start = &floppy_fiqin_start;
fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start;
} else {
extern unsigned char floppy_fiqout_start, floppy_fiqout_end;
fiqhandler_start = &floppy_fiqout_start;
fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start;
}
regs.ARM_r9 = fdma->dma.count;
regs.ARM_r10 = (unsigned long)fdma->dma.addr;
regs.ARM_fp = (unsigned long)FLOPPYDMA_BASE;
if (claim_fiq(&fh)) {
printk("floppydma: couldn't claim FIQ.\n");
return;
}
set_fiq_handler(fiqhandler_start, fiqhandler_length);
set_fiq_regs(®s);
enable_fiq(fdma->fiq);
}
static void floppy_disable_dma(unsigned int chan, dma_t *dma)
{
struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
disable_fiq(fdma->fiq);
release_fiq(&fh);
}
static int floppy_get_residue(unsigned int chan, dma_t *dma)
{
struct pt_regs regs;
get_fiq_regs(®s);
return regs.ARM_r9;
}
static struct dma_ops floppy_dma_ops = {
.type = "FIQDMA",
.enable = floppy_enable_dma,
.disable = floppy_disable_dma,
.residue = floppy_get_residue,
};
/*
* This is virtual DMA - we don't need anything here.
*/
static void sound_enable_disable_dma(unsigned int chan, dma_t *dma)
{
}
static struct dma_ops sound_dma_ops = {
.type = "VIRTUAL",
.enable = sound_enable_disable_dma,
.disable = sound_enable_disable_dma,
};
static struct iomd_dma iomd_dma[6];
static struct floppy_dma floppy_dma = {
.dma = {
.d_ops = &floppy_dma_ops,
},
.fiq = FIQ_FLOPPYDATA,
};
static dma_t sound_dma = {
.d_ops = &sound_dma_ops,
};
static int __init rpc_dma_init(void)
{
unsigned int i;
int ret;
iomd_writeb(0, IOMD_IO0CR);
iomd_writeb(0, IOMD_IO1CR);
iomd_writeb(0, IOMD_IO2CR);
iomd_writeb(0, IOMD_IO3CR);
iomd_writeb(0xa0, IOMD_DMATCR);
/*
* Setup DMA channels 2,3 to be for podules
* and channels 0,1 for internal devices
*/
iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT);
iomd_dma[DMA_0].base = IOMD_IO0CURA;
iomd_dma[DMA_0].irq = IRQ_DMA0;
iomd_dma[DMA_1].base = IOMD_IO1CURA;
iomd_dma[DMA_1].irq = IRQ_DMA1;
iomd_dma[DMA_2].base = IOMD_IO2CURA;
iomd_dma[DMA_2].irq = IRQ_DMA2;
iomd_dma[DMA_3].base = IOMD_IO3CURA;
iomd_dma[DMA_3].irq = IRQ_DMA3;
iomd_dma[DMA_S0].base = IOMD_SD0CURA;
iomd_dma[DMA_S0].irq = IRQ_DMAS0;
iomd_dma[DMA_S1].base = IOMD_SD1CURA;
iomd_dma[DMA_S1].irq = IRQ_DMAS1;
for (i = DMA_0; i <= DMA_S1; i++) {
iomd_dma[i].dma.d_ops = &iomd_dma_ops;
ret = isa_dma_add(i, &iomd_dma[i].dma);
if (ret)
printk("IOMDDMA%u: unable to register: %d\n", i, ret);
}
ret = isa_dma_add(DMA_VIRTUAL_FLOPPY, &floppy_dma.dma);
if (ret)
printk("IOMDFLOPPY: unable to register: %d\n", ret);
ret = isa_dma_add(DMA_VIRTUAL_SOUND, &sound_dma);
if (ret)
printk("IOMDSOUND: unable to register: %d\n", ret);
return 0;
}
core_initcall(rpc_dma_init);
| gpl-2.0 |
illyah/samsung_kernel_volans | drivers/usb/gadget/dummy_hcd.c | 174 | 50634 | /*
* dummy_hcd.c -- Dummy/Loopback USB host and device emulator driver.
*
* Maintainer: Alan Stern <stern@rowland.harvard.edu>
*
* Copyright (C) 2003 David Brownell
* Copyright (C) 2003-2005 Alan Stern
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* This exposes a device side "USB gadget" API, driven by requests to a
* Linux-USB host controller driver. USB traffic is simulated; there's
* no need for USB hardware. Use this with two other drivers:
*
* - Gadget driver, responding to requests (slave);
* - Host-side device driver, as already familiar in Linux.
*
* Having this all in one kernel can help some stages of development,
* bypassing some hardware (and driver) issues. UML could help too.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/gadget.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/unaligned.h>
#include "../core/hcd.h"
#define DRIVER_DESC "USB Host+Gadget Emulator"
#define DRIVER_VERSION "02 May 2005"
#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
static const char driver_name [] = "dummy_hcd";
static const char driver_desc [] = "USB Host+Gadget Emulator";
static const char gadget_name [] = "dummy_udc";
MODULE_DESCRIPTION (DRIVER_DESC);
MODULE_AUTHOR ("David Brownell");
MODULE_LICENSE ("GPL");
/*-------------------------------------------------------------------------*/
/* gadget side driver data structres */
struct dummy_ep {
struct list_head queue;
unsigned long last_io; /* jiffies timestamp */
struct usb_gadget *gadget;
const struct usb_endpoint_descriptor *desc;
struct usb_ep ep;
unsigned halted : 1;
unsigned wedged : 1;
unsigned already_seen : 1;
unsigned setup_stage : 1;
};
struct dummy_request {
struct list_head queue; /* ep's requests */
struct usb_request req;
};
static inline struct dummy_ep *usb_ep_to_dummy_ep (struct usb_ep *_ep)
{
return container_of (_ep, struct dummy_ep, ep);
}
static inline struct dummy_request *usb_request_to_dummy_request
(struct usb_request *_req)
{
return container_of (_req, struct dummy_request, req);
}
/*-------------------------------------------------------------------------*/
/*
* Every device has ep0 for control requests, plus up to 30 more endpoints,
* in one of two types:
*
* - Configurable: direction (in/out), type (bulk, iso, etc), and endpoint
* number can be changed. Names like "ep-a" are used for this type.
*
* - Fixed Function: in other cases. some characteristics may be mutable;
* that'd be hardware-specific. Names like "ep12out-bulk" are used.
*
* Gadget drivers are responsible for not setting up conflicting endpoint
* configurations, illegal or unsupported packet lengths, and so on.
*/
static const char ep0name [] = "ep0";
static const char *const ep_name [] = {
ep0name, /* everyone has ep0 */
/* act like a net2280: high speed, six configurable endpoints */
"ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f",
/* or like pxa250: fifteen fixed function endpoints */
"ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
"ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
"ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
"ep15in-int",
/* or like sa1100: two fixed function endpoints */
"ep1out-bulk", "ep2in-bulk",
};
#define DUMMY_ENDPOINTS ARRAY_SIZE(ep_name)
/*-------------------------------------------------------------------------*/
#define FIFO_SIZE 64
struct urbp {
struct urb *urb;
struct list_head urbp_list;
};
enum dummy_rh_state {
DUMMY_RH_RESET,
DUMMY_RH_SUSPENDED,
DUMMY_RH_RUNNING
};
struct dummy {
spinlock_t lock;
/*
* SLAVE/GADGET side support
*/
struct dummy_ep ep [DUMMY_ENDPOINTS];
int address;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct dummy_request fifo_req;
u8 fifo_buf [FIFO_SIZE];
u16 devstatus;
unsigned udc_suspended:1;
unsigned pullup:1;
unsigned active:1;
unsigned old_active:1;
/*
* MASTER/HOST side support
*/
enum dummy_rh_state rh_state;
struct timer_list timer;
u32 port_status;
u32 old_status;
unsigned resuming:1;
unsigned long re_timeout;
struct usb_device *udev;
struct list_head urbp_list;
};
static inline struct dummy *hcd_to_dummy (struct usb_hcd *hcd)
{
return (struct dummy *) (hcd->hcd_priv);
}
static inline struct usb_hcd *dummy_to_hcd (struct dummy *dum)
{
return container_of((void *) dum, struct usb_hcd, hcd_priv);
}
static inline struct device *dummy_dev (struct dummy *dum)
{
return dummy_to_hcd(dum)->self.controller;
}
static inline struct device *udc_dev (struct dummy *dum)
{
return dum->gadget.dev.parent;
}
static inline struct dummy *ep_to_dummy (struct dummy_ep *ep)
{
return container_of (ep->gadget, struct dummy, gadget);
}
static inline struct dummy *gadget_to_dummy (struct usb_gadget *gadget)
{
return container_of (gadget, struct dummy, gadget);
}
static inline struct dummy *gadget_dev_to_dummy (struct device *dev)
{
return container_of (dev, struct dummy, gadget.dev);
}
static struct dummy *the_controller;
/*-------------------------------------------------------------------------*/
/* SLAVE/GADGET SIDE UTILITY ROUTINES */
/* called with spinlock held */
static void nuke (struct dummy *dum, struct dummy_ep *ep)
{
while (!list_empty (&ep->queue)) {
struct dummy_request *req;
req = list_entry (ep->queue.next, struct dummy_request, queue);
list_del_init (&req->queue);
req->req.status = -ESHUTDOWN;
spin_unlock (&dum->lock);
req->req.complete (&ep->ep, &req->req);
spin_lock (&dum->lock);
}
}
/* caller must hold lock */
static void
stop_activity (struct dummy *dum)
{
struct dummy_ep *ep;
/* prevent any more requests */
dum->address = 0;
/* The timer is left running so that outstanding URBs can fail */
/* nuke any pending requests first, so driver i/o is quiesced */
list_for_each_entry (ep, &dum->gadget.ep_list, ep.ep_list)
nuke (dum, ep);
/* driver now does any non-usb quiescing necessary */
}
/* caller must hold lock */
static void
set_link_state (struct dummy *dum)
{
dum->active = 0;
if ((dum->port_status & USB_PORT_STAT_POWER) == 0)
dum->port_status = 0;
/* UDC suspend must cause a disconnect */
else if (!dum->pullup || dum->udc_suspended) {
dum->port_status &= ~(USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_ENABLE |
USB_PORT_STAT_LOW_SPEED |
USB_PORT_STAT_HIGH_SPEED |
USB_PORT_STAT_SUSPEND);
if ((dum->old_status & USB_PORT_STAT_CONNECTION) != 0)
dum->port_status |= (USB_PORT_STAT_C_CONNECTION << 16);
} else {
dum->port_status |= USB_PORT_STAT_CONNECTION;
if ((dum->old_status & USB_PORT_STAT_CONNECTION) == 0)
dum->port_status |= (USB_PORT_STAT_C_CONNECTION << 16);
if ((dum->port_status & USB_PORT_STAT_ENABLE) == 0)
dum->port_status &= ~USB_PORT_STAT_SUSPEND;
else if ((dum->port_status & USB_PORT_STAT_SUSPEND) == 0 &&
dum->rh_state != DUMMY_RH_SUSPENDED)
dum->active = 1;
}
if ((dum->port_status & USB_PORT_STAT_ENABLE) == 0 || dum->active)
dum->resuming = 0;
if ((dum->port_status & USB_PORT_STAT_CONNECTION) == 0 ||
(dum->port_status & USB_PORT_STAT_RESET) != 0) {
if ((dum->old_status & USB_PORT_STAT_CONNECTION) != 0 &&
(dum->old_status & USB_PORT_STAT_RESET) == 0 &&
dum->driver) {
stop_activity (dum);
spin_unlock (&dum->lock);
dum->driver->disconnect (&dum->gadget);
spin_lock (&dum->lock);
}
} else if (dum->active != dum->old_active) {
if (dum->old_active && dum->driver->suspend) {
spin_unlock (&dum->lock);
dum->driver->suspend (&dum->gadget);
spin_lock (&dum->lock);
} else if (!dum->old_active && dum->driver->resume) {
spin_unlock (&dum->lock);
dum->driver->resume (&dum->gadget);
spin_lock (&dum->lock);
}
}
dum->old_status = dum->port_status;
dum->old_active = dum->active;
}
/*-------------------------------------------------------------------------*/
/* SLAVE/GADGET SIDE DRIVER
*
* This only tracks gadget state. All the work is done when the host
* side tries some (emulated) i/o operation. Real device controller
* drivers would do real i/o using dma, fifos, irqs, timers, etc.
*/
#define is_enabled(dum) \
(dum->port_status & USB_PORT_STAT_ENABLE)
static int
dummy_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct dummy *dum;
struct dummy_ep *ep;
unsigned max;
int retval;
ep = usb_ep_to_dummy_ep (_ep);
if (!_ep || !desc || ep->desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dum = ep_to_dummy (ep);
if (!dum->driver || !is_enabled (dum))
return -ESHUTDOWN;
max = le16_to_cpu(desc->wMaxPacketSize) & 0x3ff;
/* drivers must not request bad settings, since lower levels
* (hardware or its drivers) may not check. some endpoints
* can't do iso, many have maxpacket limitations, etc.
*
* since this "hardware" driver is here to help debugging, we
* have some extra sanity checks. (there could be more though,
* especially for "ep9out" style fixed function ones.)
*/
retval = -EINVAL;
switch (desc->bmAttributes & 0x03) {
case USB_ENDPOINT_XFER_BULK:
if (strstr (ep->ep.name, "-iso")
|| strstr (ep->ep.name, "-int")) {
goto done;
}
switch (dum->gadget.speed) {
case USB_SPEED_HIGH:
if (max == 512)
break;
goto done;
case USB_SPEED_FULL:
if (max == 8 || max == 16 || max == 32 || max == 64)
/* we'll fake any legal size */
break;
/* save a return statement */
default:
goto done;
}
break;
case USB_ENDPOINT_XFER_INT:
if (strstr (ep->ep.name, "-iso")) /* bulk is ok */
goto done;
/* real hardware might not handle all packet sizes */
switch (dum->gadget.speed) {
case USB_SPEED_HIGH:
if (max <= 1024)
break;
/* save a return statement */
case USB_SPEED_FULL:
if (max <= 64)
break;
/* save a return statement */
default:
if (max <= 8)
break;
goto done;
}
break;
case USB_ENDPOINT_XFER_ISOC:
if (strstr (ep->ep.name, "-bulk")
|| strstr (ep->ep.name, "-int"))
goto done;
/* real hardware might not handle all packet sizes */
switch (dum->gadget.speed) {
case USB_SPEED_HIGH:
if (max <= 1024)
break;
/* save a return statement */
case USB_SPEED_FULL:
if (max <= 1023)
break;
/* save a return statement */
default:
goto done;
}
break;
default:
/* few chips support control except on ep0 */
goto done;
}
_ep->maxpacket = max;
ep->desc = desc;
dev_dbg (udc_dev(dum), "enabled %s (ep%d%s-%s) maxpacket %d\n",
_ep->name,
desc->bEndpointAddress & 0x0f,
(desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
({ char *val;
switch (desc->bmAttributes & 0x03) {
case USB_ENDPOINT_XFER_BULK: val = "bulk"; break;
case USB_ENDPOINT_XFER_ISOC: val = "iso"; break;
case USB_ENDPOINT_XFER_INT: val = "intr"; break;
default: val = "ctrl"; break;
}; val; }),
max);
/* at this point real hardware should be NAKing transfers
* to that endpoint, until a buffer is queued to it.
*/
ep->halted = ep->wedged = 0;
retval = 0;
done:
return retval;
}
static int dummy_disable (struct usb_ep *_ep)
{
struct dummy_ep *ep;
struct dummy *dum;
unsigned long flags;
int retval;
ep = usb_ep_to_dummy_ep (_ep);
if (!_ep || !ep->desc || _ep->name == ep0name)
return -EINVAL;
dum = ep_to_dummy (ep);
spin_lock_irqsave (&dum->lock, flags);
ep->desc = NULL;
retval = 0;
nuke (dum, ep);
spin_unlock_irqrestore (&dum->lock, flags);
dev_dbg (udc_dev(dum), "disabled %s\n", _ep->name);
return retval;
}
static struct usb_request *
dummy_alloc_request (struct usb_ep *_ep, gfp_t mem_flags)
{
struct dummy_ep *ep;
struct dummy_request *req;
if (!_ep)
return NULL;
ep = usb_ep_to_dummy_ep (_ep);
req = kzalloc(sizeof(*req), mem_flags);
if (!req)
return NULL;
INIT_LIST_HEAD (&req->queue);
return &req->req;
}
static void
dummy_free_request (struct usb_ep *_ep, struct usb_request *_req)
{
struct dummy_ep *ep;
struct dummy_request *req;
ep = usb_ep_to_dummy_ep (_ep);
if (!ep || !_req || (!ep->desc && _ep->name != ep0name))
return;
req = usb_request_to_dummy_request (_req);
WARN_ON (!list_empty (&req->queue));
kfree (req);
}
static void
fifo_complete (struct usb_ep *ep, struct usb_request *req)
{
}
static int
dummy_queue (struct usb_ep *_ep, struct usb_request *_req,
gfp_t mem_flags)
{
struct dummy_ep *ep;
struct dummy_request *req;
struct dummy *dum;
unsigned long flags;
req = usb_request_to_dummy_request (_req);
if (!_req || !list_empty (&req->queue) || !_req->complete)
return -EINVAL;
ep = usb_ep_to_dummy_ep (_ep);
if (!_ep || (!ep->desc && _ep->name != ep0name))
return -EINVAL;
dum = ep_to_dummy (ep);
if (!dum->driver || !is_enabled (dum))
return -ESHUTDOWN;
#if 0
dev_dbg (udc_dev(dum), "ep %p queue req %p to %s, len %d buf %p\n",
ep, _req, _ep->name, _req->length, _req->buf);
#endif
_req->status = -EINPROGRESS;
_req->actual = 0;
spin_lock_irqsave (&dum->lock, flags);
/* implement an emulated single-request FIFO */
if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
list_empty (&dum->fifo_req.queue) &&
list_empty (&ep->queue) &&
_req->length <= FIFO_SIZE) {
req = &dum->fifo_req;
req->req = *_req;
req->req.buf = dum->fifo_buf;
memcpy (dum->fifo_buf, _req->buf, _req->length);
req->req.context = dum;
req->req.complete = fifo_complete;
list_add_tail(&req->queue, &ep->queue);
spin_unlock (&dum->lock);
_req->actual = _req->length;
_req->status = 0;
_req->complete (_ep, _req);
spin_lock (&dum->lock);
} else
list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore (&dum->lock, flags);
/* real hardware would likely enable transfers here, in case
* it'd been left NAKing.
*/
return 0;
}
static int dummy_dequeue (struct usb_ep *_ep, struct usb_request *_req)
{
struct dummy_ep *ep;
struct dummy *dum;
int retval = -EINVAL;
unsigned long flags;
struct dummy_request *req = NULL;
if (!_ep || !_req)
return retval;
ep = usb_ep_to_dummy_ep (_ep);
dum = ep_to_dummy (ep);
if (!dum->driver)
return -ESHUTDOWN;
local_irq_save (flags);
spin_lock (&dum->lock);
list_for_each_entry (req, &ep->queue, queue) {
if (&req->req == _req) {
list_del_init (&req->queue);
_req->status = -ECONNRESET;
retval = 0;
break;
}
}
spin_unlock (&dum->lock);
if (retval == 0) {
dev_dbg (udc_dev(dum),
"dequeued req %p from %s, len %d buf %p\n",
req, _ep->name, _req->length, _req->buf);
_req->complete (_ep, _req);
}
local_irq_restore (flags);
return retval;
}
static int
dummy_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
{
struct dummy_ep *ep;
struct dummy *dum;
if (!_ep)
return -EINVAL;
ep = usb_ep_to_dummy_ep (_ep);
dum = ep_to_dummy (ep);
if (!dum->driver)
return -ESHUTDOWN;
if (!value)
ep->halted = ep->wedged = 0;
else if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
!list_empty (&ep->queue))
return -EAGAIN;
else {
ep->halted = 1;
if (wedged)
ep->wedged = 1;
}
/* FIXME clear emulated data toggle too */
return 0;
}
static int
dummy_set_halt(struct usb_ep *_ep, int value)
{
return dummy_set_halt_and_wedge(_ep, value, 0);
}
static int dummy_set_wedge(struct usb_ep *_ep)
{
if (!_ep || _ep->name == ep0name)
return -EINVAL;
return dummy_set_halt_and_wedge(_ep, 1, 1);
}
static const struct usb_ep_ops dummy_ep_ops = {
.enable = dummy_enable,
.disable = dummy_disable,
.alloc_request = dummy_alloc_request,
.free_request = dummy_free_request,
.queue = dummy_queue,
.dequeue = dummy_dequeue,
.set_halt = dummy_set_halt,
.set_wedge = dummy_set_wedge,
};
/*-------------------------------------------------------------------------*/
/* there are both host and device side versions of this call ... */
static int dummy_g_get_frame (struct usb_gadget *_gadget)
{
struct timeval tv;
do_gettimeofday (&tv);
return tv.tv_usec / 1000;
}
static int dummy_wakeup (struct usb_gadget *_gadget)
{
struct dummy *dum;
dum = gadget_to_dummy (_gadget);
if (!(dum->devstatus & ( (1 << USB_DEVICE_B_HNP_ENABLE)
| (1 << USB_DEVICE_REMOTE_WAKEUP))))
return -EINVAL;
if ((dum->port_status & USB_PORT_STAT_CONNECTION) == 0)
return -ENOLINK;
if ((dum->port_status & USB_PORT_STAT_SUSPEND) == 0 &&
dum->rh_state != DUMMY_RH_SUSPENDED)
return -EIO;
/* FIXME: What if the root hub is suspended but the port isn't? */
/* hub notices our request, issues downstream resume, etc */
dum->resuming = 1;
dum->re_timeout = jiffies + msecs_to_jiffies(20);
mod_timer (&dummy_to_hcd (dum)->rh_timer, dum->re_timeout);
return 0;
}
static int dummy_set_selfpowered (struct usb_gadget *_gadget, int value)
{
struct dummy *dum;
dum = gadget_to_dummy (_gadget);
if (value)
dum->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
else
dum->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
return 0;
}
static int dummy_pullup (struct usb_gadget *_gadget, int value)
{
struct dummy *dum;
unsigned long flags;
dum = gadget_to_dummy (_gadget);
spin_lock_irqsave (&dum->lock, flags);
dum->pullup = (value != 0);
set_link_state (dum);
spin_unlock_irqrestore (&dum->lock, flags);
usb_hcd_poll_rh_status (dummy_to_hcd (dum));
return 0;
}
static const struct usb_gadget_ops dummy_ops = {
.get_frame = dummy_g_get_frame,
.wakeup = dummy_wakeup,
.set_selfpowered = dummy_set_selfpowered,
.pullup = dummy_pullup,
};
/*-------------------------------------------------------------------------*/
/* "function" sysfs attribute */
static ssize_t
show_function (struct device *dev, struct device_attribute *attr, char *buf)
{
struct dummy *dum = gadget_dev_to_dummy (dev);
if (!dum->driver || !dum->driver->function)
return 0;
return scnprintf (buf, PAGE_SIZE, "%s\n", dum->driver->function);
}
static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
/*-------------------------------------------------------------------------*/
/*
* Driver registration/unregistration.
*
* This is basically hardware-specific; there's usually only one real USB
* device (not host) controller since that's how USB devices are intended
* to work. So most implementations of these api calls will rely on the
* fact that only one driver will ever bind to the hardware. But curious
* hardware can be built with discrete components, so the gadget API doesn't
* require that assumption.
*
* For this emulator, it might be convenient to create a usb slave device
* for each driver that registers: just add to a big root hub.
*/
int
usb_gadget_register_driver (struct usb_gadget_driver *driver)
{
struct dummy *dum = the_controller;
int retval, i;
if (!dum)
return -EINVAL;
if (dum->driver)
return -EBUSY;
if (!driver->bind || !driver->setup
|| driver->speed == USB_SPEED_UNKNOWN)
return -EINVAL;
/*
* SLAVE side init ... the layer above hardware, which
* can't enumerate without help from the driver we're binding.
*/
dum->devstatus = 0;
INIT_LIST_HEAD (&dum->gadget.ep_list);
for (i = 0; i < DUMMY_ENDPOINTS; i++) {
struct dummy_ep *ep = &dum->ep [i];
if (!ep_name [i])
break;
ep->ep.name = ep_name [i];
ep->ep.ops = &dummy_ep_ops;
list_add_tail (&ep->ep.ep_list, &dum->gadget.ep_list);
ep->halted = ep->wedged = ep->already_seen =
ep->setup_stage = 0;
ep->ep.maxpacket = ~0;
ep->last_io = jiffies;
ep->gadget = &dum->gadget;
ep->desc = NULL;
INIT_LIST_HEAD (&ep->queue);
}
dum->gadget.ep0 = &dum->ep [0].ep;
dum->ep [0].ep.maxpacket = 64;
list_del_init (&dum->ep [0].ep.ep_list);
INIT_LIST_HEAD(&dum->fifo_req.queue);
driver->driver.bus = NULL;
dum->driver = driver;
dum->gadget.dev.driver = &driver->driver;
dev_dbg (udc_dev(dum), "binding gadget driver '%s'\n",
driver->driver.name);
retval = driver->bind(&dum->gadget);
if (retval) {
dum->driver = NULL;
dum->gadget.dev.driver = NULL;
return retval;
}
/* khubd will enumerate this in a while */
spin_lock_irq (&dum->lock);
dum->pullup = 1;
set_link_state (dum);
spin_unlock_irq (&dum->lock);
usb_hcd_poll_rh_status (dummy_to_hcd (dum));
return 0;
}
EXPORT_SYMBOL (usb_gadget_register_driver);
int
usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
{
struct dummy *dum = the_controller;
unsigned long flags;
if (!dum)
return -ENODEV;
if (!driver || driver != dum->driver || !driver->unbind)
return -EINVAL;
dev_dbg (udc_dev(dum), "unregister gadget driver '%s'\n",
driver->driver.name);
spin_lock_irqsave (&dum->lock, flags);
dum->pullup = 0;
set_link_state (dum);
spin_unlock_irqrestore (&dum->lock, flags);
driver->unbind (&dum->gadget);
dum->gadget.dev.driver = NULL;
dum->driver = NULL;
spin_lock_irqsave (&dum->lock, flags);
dum->pullup = 0;
set_link_state (dum);
spin_unlock_irqrestore (&dum->lock, flags);
usb_hcd_poll_rh_status (dummy_to_hcd (dum));
return 0;
}
EXPORT_SYMBOL (usb_gadget_unregister_driver);
#undef is_enabled
/* just declare this in any driver that really need it */
extern int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode);
int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode)
{
return -ENOSYS;
}
EXPORT_SYMBOL (net2280_set_fifo_mode);
/* The gadget structure is stored inside the hcd structure and will be
* released along with it. */
static void
dummy_gadget_release (struct device *dev)
{
struct dummy *dum = gadget_dev_to_dummy (dev);
usb_put_hcd (dummy_to_hcd (dum));
}
static int dummy_udc_probe (struct platform_device *pdev)
{
struct dummy *dum = the_controller;
int rc;
dum->gadget.name = gadget_name;
dum->gadget.ops = &dummy_ops;
dum->gadget.is_dualspeed = 1;
/* maybe claim OTG support, though we won't complete HNP */
dum->gadget.is_otg = (dummy_to_hcd(dum)->self.otg_port != 0);
dev_set_name(&dum->gadget.dev, "gadget");
dum->gadget.dev.parent = &pdev->dev;
dum->gadget.dev.release = dummy_gadget_release;
rc = device_register (&dum->gadget.dev);
if (rc < 0)
return rc;
usb_get_hcd (dummy_to_hcd (dum));
platform_set_drvdata (pdev, dum);
rc = device_create_file (&dum->gadget.dev, &dev_attr_function);
if (rc < 0)
device_unregister (&dum->gadget.dev);
return rc;
}
static int dummy_udc_remove (struct platform_device *pdev)
{
struct dummy *dum = platform_get_drvdata (pdev);
platform_set_drvdata (pdev, NULL);
device_remove_file (&dum->gadget.dev, &dev_attr_function);
device_unregister (&dum->gadget.dev);
return 0;
}
static int dummy_udc_suspend (struct platform_device *pdev, pm_message_t state)
{
struct dummy *dum = platform_get_drvdata(pdev);
dev_dbg (&pdev->dev, "%s\n", __func__);
spin_lock_irq (&dum->lock);
dum->udc_suspended = 1;
set_link_state (dum);
spin_unlock_irq (&dum->lock);
usb_hcd_poll_rh_status (dummy_to_hcd (dum));
return 0;
}
static int dummy_udc_resume (struct platform_device *pdev)
{
struct dummy *dum = platform_get_drvdata(pdev);
dev_dbg (&pdev->dev, "%s\n", __func__);
spin_lock_irq (&dum->lock);
dum->udc_suspended = 0;
set_link_state (dum);
spin_unlock_irq (&dum->lock);
usb_hcd_poll_rh_status (dummy_to_hcd (dum));
return 0;
}
static struct platform_driver dummy_udc_driver = {
.probe = dummy_udc_probe,
.remove = dummy_udc_remove,
.suspend = dummy_udc_suspend,
.resume = dummy_udc_resume,
.driver = {
.name = (char *) gadget_name,
.owner = THIS_MODULE,
},
};
/*-------------------------------------------------------------------------*/
/* MASTER/HOST SIDE DRIVER
*
* this uses the hcd framework to hook up to host side drivers.
* its root hub will only have one device, otherwise it acts like
* a normal host controller.
*
* when urbs are queued, they're just stuck on a list that we
* scan in a timer callback. that callback connects writes from
* the host with reads from the device, and so on, based on the
* usb 2.0 rules.
*/
static int dummy_urb_enqueue (
struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags
) {
struct dummy *dum;
struct urbp *urbp;
unsigned long flags;
int rc;
if (!urb->transfer_buffer && urb->transfer_buffer_length)
return -EINVAL;
urbp = kmalloc (sizeof *urbp, mem_flags);
if (!urbp)
return -ENOMEM;
urbp->urb = urb;
dum = hcd_to_dummy (hcd);
spin_lock_irqsave (&dum->lock, flags);
rc = usb_hcd_link_urb_to_ep(hcd, urb);
if (rc) {
kfree(urbp);
goto done;
}
if (!dum->udev) {
dum->udev = urb->dev;
usb_get_dev (dum->udev);
} else if (unlikely (dum->udev != urb->dev))
dev_err (dummy_dev(dum), "usb_device address has changed!\n");
list_add_tail (&urbp->urbp_list, &dum->urbp_list);
urb->hcpriv = urbp;
if (usb_pipetype (urb->pipe) == PIPE_CONTROL)
urb->error_count = 1; /* mark as a new urb */
/* kick the scheduler, it'll do the rest */
if (!timer_pending (&dum->timer))
mod_timer (&dum->timer, jiffies + 1);
done:
spin_unlock_irqrestore(&dum->lock, flags);
return rc;
}
static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct dummy *dum;
unsigned long flags;
int rc;
/* giveback happens automatically in timer callback,
* so make sure the callback happens */
dum = hcd_to_dummy (hcd);
spin_lock_irqsave (&dum->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (!rc && dum->rh_state != DUMMY_RH_RUNNING &&
!list_empty(&dum->urbp_list))
mod_timer (&dum->timer, jiffies);
spin_unlock_irqrestore (&dum->lock, flags);
return rc;
}
/* transfer up to a frame's worth; caller must own lock */
static int
transfer(struct dummy *dum, struct urb *urb, struct dummy_ep *ep, int limit,
int *status)
{
struct dummy_request *req;
top:
/* if there's no request queued, the device is NAKing; return */
list_for_each_entry (req, &ep->queue, queue) {
unsigned host_len, dev_len, len;
int is_short, to_host;
int rescan = 0;
/* 1..N packets of ep->ep.maxpacket each ... the last one
* may be short (including zero length).
*
* writer can send a zlp explicitly (length 0) or implicitly
* (length mod maxpacket zero, and 'zero' flag); they always
* terminate reads.
*/
host_len = urb->transfer_buffer_length - urb->actual_length;
dev_len = req->req.length - req->req.actual;
len = min (host_len, dev_len);
/* FIXME update emulated data toggle too */
to_host = usb_pipein (urb->pipe);
if (unlikely (len == 0))
is_short = 1;
else {
char *ubuf, *rbuf;
/* not enough bandwidth left? */
if (limit < ep->ep.maxpacket && limit < len)
break;
len = min (len, (unsigned) limit);
if (len == 0)
break;
/* use an extra pass for the final short packet */
if (len > ep->ep.maxpacket) {
rescan = 1;
len -= (len % ep->ep.maxpacket);
}
is_short = (len % ep->ep.maxpacket) != 0;
/* else transfer packet(s) */
ubuf = urb->transfer_buffer + urb->actual_length;
rbuf = req->req.buf + req->req.actual;
if (to_host)
memcpy (ubuf, rbuf, len);
else
memcpy (rbuf, ubuf, len);
ep->last_io = jiffies;
limit -= len;
urb->actual_length += len;
req->req.actual += len;
}
/* short packets terminate, maybe with overflow/underflow.
* it's only really an error to write too much.
*
* partially filling a buffer optionally blocks queue advances
* (so completion handlers can clean up the queue) but we don't
* need to emulate such data-in-flight.
*/
if (is_short) {
if (host_len == dev_len) {
req->req.status = 0;
*status = 0;
} else if (to_host) {
req->req.status = 0;
if (dev_len > host_len)
*status = -EOVERFLOW;
else
*status = 0;
} else if (!to_host) {
*status = 0;
if (host_len > dev_len)
req->req.status = -EOVERFLOW;
else
req->req.status = 0;
}
/* many requests terminate without a short packet */
} else {
if (req->req.length == req->req.actual
&& !req->req.zero)
req->req.status = 0;
if (urb->transfer_buffer_length == urb->actual_length
&& !(urb->transfer_flags
& URB_ZERO_PACKET))
*status = 0;
}
/* device side completion --> continuable */
if (req->req.status != -EINPROGRESS) {
list_del_init (&req->queue);
spin_unlock (&dum->lock);
req->req.complete (&ep->ep, &req->req);
spin_lock (&dum->lock);
/* requests might have been unlinked... */
rescan = 1;
}
/* host side completion --> terminate */
if (*status != -EINPROGRESS)
break;
/* rescan to continue with any other queued i/o */
if (rescan)
goto top;
}
return limit;
}
static int periodic_bytes (struct dummy *dum, struct dummy_ep *ep)
{
int limit = ep->ep.maxpacket;
if (dum->gadget.speed == USB_SPEED_HIGH) {
int tmp;
/* high bandwidth mode */
tmp = le16_to_cpu(ep->desc->wMaxPacketSize);
tmp = (tmp >> 11) & 0x03;
tmp *= 8 /* applies to entire frame */;
limit += limit * tmp;
}
return limit;
}
#define is_active(dum) ((dum->port_status & \
(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE | \
USB_PORT_STAT_SUSPEND)) \
== (USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE))
static struct dummy_ep *find_endpoint (struct dummy *dum, u8 address)
{
int i;
if (!is_active (dum))
return NULL;
if ((address & ~USB_DIR_IN) == 0)
return &dum->ep [0];
for (i = 1; i < DUMMY_ENDPOINTS; i++) {
struct dummy_ep *ep = &dum->ep [i];
if (!ep->desc)
continue;
if (ep->desc->bEndpointAddress == address)
return ep;
}
return NULL;
}
#undef is_active
#define Dev_Request (USB_TYPE_STANDARD | USB_RECIP_DEVICE)
#define Dev_InRequest (Dev_Request | USB_DIR_IN)
#define Intf_Request (USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
#define Intf_InRequest (Intf_Request | USB_DIR_IN)
#define Ep_Request (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
#define Ep_InRequest (Ep_Request | USB_DIR_IN)
/* drive both sides of the transfers; looks like irq handlers to
* both drivers except the callbacks aren't in_irq().
*/
static void dummy_timer (unsigned long _dum)
{
struct dummy *dum = (struct dummy *) _dum;
struct urbp *urbp, *tmp;
unsigned long flags;
int limit, total;
int i;
/* simplistic model for one frame's bandwidth */
switch (dum->gadget.speed) {
case USB_SPEED_LOW:
total = 8/*bytes*/ * 12/*packets*/;
break;
case USB_SPEED_FULL:
total = 64/*bytes*/ * 19/*packets*/;
break;
case USB_SPEED_HIGH:
total = 512/*bytes*/ * 13/*packets*/ * 8/*uframes*/;
break;
default:
dev_err (dummy_dev(dum), "bogus device speed\n");
return;
}
/* FIXME if HZ != 1000 this will probably misbehave ... */
/* look at each urb queued by the host side driver */
spin_lock_irqsave (&dum->lock, flags);
if (!dum->udev) {
dev_err (dummy_dev(dum),
"timer fired with no URBs pending?\n");
spin_unlock_irqrestore (&dum->lock, flags);
return;
}
for (i = 0; i < DUMMY_ENDPOINTS; i++) {
if (!ep_name [i])
break;
dum->ep [i].already_seen = 0;
}
restart:
list_for_each_entry_safe (urbp, tmp, &dum->urbp_list, urbp_list) {
struct urb *urb;
struct dummy_request *req;
u8 address;
struct dummy_ep *ep = NULL;
int type;
int status = -EINPROGRESS;
urb = urbp->urb;
if (urb->unlinked)
goto return_urb;
else if (dum->rh_state != DUMMY_RH_RUNNING)
continue;
type = usb_pipetype (urb->pipe);
/* used up this frame's non-periodic bandwidth?
* FIXME there's infinite bandwidth for control and
* periodic transfers ... unrealistic.
*/
if (total <= 0 && type == PIPE_BULK)
continue;
/* find the gadget's ep for this request (if configured) */
address = usb_pipeendpoint (urb->pipe);
if (usb_pipein (urb->pipe))
address |= USB_DIR_IN;
ep = find_endpoint(dum, address);
if (!ep) {
/* set_configuration() disagreement */
dev_dbg (dummy_dev(dum),
"no ep configured for urb %p\n",
urb);
status = -EPROTO;
goto return_urb;
}
if (ep->already_seen)
continue;
ep->already_seen = 1;
if (ep == &dum->ep [0] && urb->error_count) {
ep->setup_stage = 1; /* a new urb */
urb->error_count = 0;
}
if (ep->halted && !ep->setup_stage) {
/* NOTE: must not be iso! */
dev_dbg (dummy_dev(dum), "ep %s halted, urb %p\n",
ep->ep.name, urb);
status = -EPIPE;
goto return_urb;
}
/* FIXME make sure both ends agree on maxpacket */
/* handle control requests */
if (ep == &dum->ep [0] && ep->setup_stage) {
struct usb_ctrlrequest setup;
int value = 1;
struct dummy_ep *ep2;
unsigned w_index;
unsigned w_value;
setup = *(struct usb_ctrlrequest*) urb->setup_packet;
w_index = le16_to_cpu(setup.wIndex);
w_value = le16_to_cpu(setup.wValue);
if (le16_to_cpu(setup.wLength) !=
urb->transfer_buffer_length) {
status = -EOVERFLOW;
goto return_urb;
}
/* paranoia, in case of stale queued data */
list_for_each_entry (req, &ep->queue, queue) {
list_del_init (&req->queue);
req->req.status = -EOVERFLOW;
dev_dbg (udc_dev(dum), "stale req = %p\n",
req);
spin_unlock (&dum->lock);
req->req.complete (&ep->ep, &req->req);
spin_lock (&dum->lock);
ep->already_seen = 0;
goto restart;
}
/* gadget driver never sees set_address or operations
* on standard feature flags. some hardware doesn't
* even expose them.
*/
ep->last_io = jiffies;
ep->setup_stage = 0;
ep->halted = 0;
switch (setup.bRequest) {
case USB_REQ_SET_ADDRESS:
if (setup.bRequestType != Dev_Request)
break;
dum->address = w_value;
status = 0;
dev_dbg (udc_dev(dum), "set_address = %d\n",
w_value);
value = 0;
break;
case USB_REQ_SET_FEATURE:
if (setup.bRequestType == Dev_Request) {
value = 0;
switch (w_value) {
case USB_DEVICE_REMOTE_WAKEUP:
break;
case USB_DEVICE_B_HNP_ENABLE:
dum->gadget.b_hnp_enable = 1;
break;
case USB_DEVICE_A_HNP_SUPPORT:
dum->gadget.a_hnp_support = 1;
break;
case USB_DEVICE_A_ALT_HNP_SUPPORT:
dum->gadget.a_alt_hnp_support
= 1;
break;
default:
value = -EOPNOTSUPP;
}
if (value == 0) {
dum->devstatus |=
(1 << w_value);
status = 0;
}
} else if (setup.bRequestType == Ep_Request) {
// endpoint halt
ep2 = find_endpoint (dum, w_index);
if (!ep2 || ep2->ep.name == ep0name) {
value = -EOPNOTSUPP;
break;
}
ep2->halted = 1;
value = 0;
status = 0;
}
break;
case USB_REQ_CLEAR_FEATURE:
if (setup.bRequestType == Dev_Request) {
switch (w_value) {
case USB_DEVICE_REMOTE_WAKEUP:
dum->devstatus &= ~(1 <<
USB_DEVICE_REMOTE_WAKEUP);
value = 0;
status = 0;
break;
default:
value = -EOPNOTSUPP;
break;
}
} else if (setup.bRequestType == Ep_Request) {
// endpoint halt
ep2 = find_endpoint (dum, w_index);
if (!ep2) {
value = -EOPNOTSUPP;
break;
}
if (!ep2->wedged)
ep2->halted = 0;
value = 0;
status = 0;
}
break;
case USB_REQ_GET_STATUS:
if (setup.bRequestType == Dev_InRequest
|| setup.bRequestType
== Intf_InRequest
|| setup.bRequestType
== Ep_InRequest
) {
char *buf;
// device: remote wakeup, selfpowered
// interface: nothing
// endpoint: halt
buf = (char *)urb->transfer_buffer;
if (urb->transfer_buffer_length > 0) {
if (setup.bRequestType ==
Ep_InRequest) {
ep2 = find_endpoint (dum, w_index);
if (!ep2) {
value = -EOPNOTSUPP;
break;
}
buf [0] = ep2->halted;
} else if (setup.bRequestType ==
Dev_InRequest) {
buf [0] = (u8)
dum->devstatus;
} else
buf [0] = 0;
}
if (urb->transfer_buffer_length > 1)
buf [1] = 0;
urb->actual_length = min (2,
urb->transfer_buffer_length);
value = 0;
status = 0;
}
break;
}
/* gadget driver handles all other requests. block
* until setup() returns; no reentrancy issues etc.
*/
if (value > 0) {
spin_unlock (&dum->lock);
value = dum->driver->setup (&dum->gadget,
&setup);
spin_lock (&dum->lock);
if (value >= 0) {
/* no delays (max 64KB data stage) */
limit = 64*1024;
goto treat_control_like_bulk;
}
/* error, see below */
}
if (value < 0) {
if (value != -EOPNOTSUPP)
dev_dbg (udc_dev(dum),
"setup --> %d\n",
value);
status = -EPIPE;
urb->actual_length = 0;
}
goto return_urb;
}
/* non-control requests */
limit = total;
switch (usb_pipetype (urb->pipe)) {
case PIPE_ISOCHRONOUS:
/* FIXME is it urb->interval since the last xfer?
* use urb->iso_frame_desc[i].
* complete whether or not ep has requests queued.
* report random errors, to debug drivers.
*/
limit = max (limit, periodic_bytes (dum, ep));
status = -ENOSYS;
break;
case PIPE_INTERRUPT:
/* FIXME is it urb->interval since the last xfer?
* this almost certainly polls too fast.
*/
limit = max (limit, periodic_bytes (dum, ep));
/* FALLTHROUGH */
// case PIPE_BULK: case PIPE_CONTROL:
default:
treat_control_like_bulk:
ep->last_io = jiffies;
total = transfer(dum, urb, ep, limit, &status);
break;
}
/* incomplete transfer? */
if (status == -EINPROGRESS)
continue;
return_urb:
list_del (&urbp->urbp_list);
kfree (urbp);
if (ep)
ep->already_seen = ep->setup_stage = 0;
usb_hcd_unlink_urb_from_ep(dummy_to_hcd(dum), urb);
spin_unlock (&dum->lock);
usb_hcd_giveback_urb(dummy_to_hcd(dum), urb, status);
spin_lock (&dum->lock);
goto restart;
}
if (list_empty (&dum->urbp_list)) {
usb_put_dev (dum->udev);
dum->udev = NULL;
} else if (dum->rh_state == DUMMY_RH_RUNNING) {
/* want a 1 msec delay here */
mod_timer (&dum->timer, jiffies + msecs_to_jiffies(1));
}
spin_unlock_irqrestore (&dum->lock, flags);
}
/*-------------------------------------------------------------------------*/
#define PORT_C_MASK \
((USB_PORT_STAT_C_CONNECTION \
| USB_PORT_STAT_C_ENABLE \
| USB_PORT_STAT_C_SUSPEND \
| USB_PORT_STAT_C_OVERCURRENT \
| USB_PORT_STAT_C_RESET) << 16)
static int dummy_hub_status (struct usb_hcd *hcd, char *buf)
{
struct dummy *dum;
unsigned long flags;
int retval = 0;
dum = hcd_to_dummy (hcd);
spin_lock_irqsave (&dum->lock, flags);
if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
goto done;
if (dum->resuming && time_after_eq (jiffies, dum->re_timeout)) {
dum->port_status |= (USB_PORT_STAT_C_SUSPEND << 16);
dum->port_status &= ~USB_PORT_STAT_SUSPEND;
set_link_state (dum);
}
if ((dum->port_status & PORT_C_MASK) != 0) {
*buf = (1 << 1);
dev_dbg (dummy_dev(dum), "port status 0x%08x has changes\n",
dum->port_status);
retval = 1;
if (dum->rh_state == DUMMY_RH_SUSPENDED)
usb_hcd_resume_root_hub (hcd);
}
done:
spin_unlock_irqrestore (&dum->lock, flags);
return retval;
}
static inline void
hub_descriptor (struct usb_hub_descriptor *desc)
{
memset (desc, 0, sizeof *desc);
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
desc->wHubCharacteristics = cpu_to_le16(0x0001);
desc->bNbrPorts = 1;
desc->bitmap [0] = 0xff;
desc->bitmap [1] = 0xff;
}
static int dummy_hub_control (
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
u16 wIndex,
char *buf,
u16 wLength
) {
struct dummy *dum;
int retval = 0;
unsigned long flags;
if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
return -ETIMEDOUT;
dum = hcd_to_dummy (hcd);
spin_lock_irqsave (&dum->lock, flags);
switch (typeReq) {
case ClearHubFeature:
break;
case ClearPortFeature:
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if (dum->port_status & USB_PORT_STAT_SUSPEND) {
/* 20msec resume signaling */
dum->resuming = 1;
dum->re_timeout = jiffies +
msecs_to_jiffies(20);
}
break;
case USB_PORT_FEAT_POWER:
if (dum->port_status & USB_PORT_STAT_POWER)
dev_dbg (dummy_dev(dum), "power-off\n");
/* FALLS THROUGH */
default:
dum->port_status &= ~(1 << wValue);
set_link_state (dum);
}
break;
case GetHubDescriptor:
hub_descriptor ((struct usb_hub_descriptor *) buf);
break;
case GetHubStatus:
*(__le32 *) buf = __constant_cpu_to_le32 (0);
break;
case GetPortStatus:
if (wIndex != 1)
retval = -EPIPE;
/* whoever resets or resumes must GetPortStatus to
* complete it!!
*/
if (dum->resuming &&
time_after_eq (jiffies, dum->re_timeout)) {
dum->port_status |= (USB_PORT_STAT_C_SUSPEND << 16);
dum->port_status &= ~USB_PORT_STAT_SUSPEND;
}
if ((dum->port_status & USB_PORT_STAT_RESET) != 0 &&
time_after_eq (jiffies, dum->re_timeout)) {
dum->port_status |= (USB_PORT_STAT_C_RESET << 16);
dum->port_status &= ~USB_PORT_STAT_RESET;
if (dum->pullup) {
dum->port_status |= USB_PORT_STAT_ENABLE;
/* give it the best speed we agree on */
dum->gadget.speed = dum->driver->speed;
dum->gadget.ep0->maxpacket = 64;
switch (dum->gadget.speed) {
case USB_SPEED_HIGH:
dum->port_status |=
USB_PORT_STAT_HIGH_SPEED;
break;
case USB_SPEED_LOW:
dum->gadget.ep0->maxpacket = 8;
dum->port_status |=
USB_PORT_STAT_LOW_SPEED;
break;
default:
dum->gadget.speed = USB_SPEED_FULL;
break;
}
}
}
set_link_state (dum);
((__le16 *) buf)[0] = cpu_to_le16 (dum->port_status);
((__le16 *) buf)[1] = cpu_to_le16 (dum->port_status >> 16);
break;
case SetHubFeature:
retval = -EPIPE;
break;
case SetPortFeature:
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if (dum->active) {
dum->port_status |= USB_PORT_STAT_SUSPEND;
/* HNP would happen here; for now we
* assume b_bus_req is always true.
*/
set_link_state (dum);
if (((1 << USB_DEVICE_B_HNP_ENABLE)
& dum->devstatus) != 0)
dev_dbg (dummy_dev(dum),
"no HNP yet!\n");
}
break;
case USB_PORT_FEAT_POWER:
dum->port_status |= USB_PORT_STAT_POWER;
set_link_state (dum);
break;
case USB_PORT_FEAT_RESET:
/* if it's already enabled, disable */
dum->port_status &= ~(USB_PORT_STAT_ENABLE
| USB_PORT_STAT_LOW_SPEED
| USB_PORT_STAT_HIGH_SPEED);
dum->devstatus = 0;
/* 50msec reset signaling */
dum->re_timeout = jiffies + msecs_to_jiffies(50);
/* FALLS THROUGH */
default:
if ((dum->port_status & USB_PORT_STAT_POWER) != 0) {
dum->port_status |= (1 << wValue);
set_link_state (dum);
}
}
break;
default:
dev_dbg (dummy_dev(dum),
"hub control req%04x v%04x i%04x l%d\n",
typeReq, wValue, wIndex, wLength);
/* "protocol stall" on error */
retval = -EPIPE;
}
spin_unlock_irqrestore (&dum->lock, flags);
if ((dum->port_status & PORT_C_MASK) != 0)
usb_hcd_poll_rh_status (hcd);
return retval;
}
static int dummy_bus_suspend (struct usb_hcd *hcd)
{
struct dummy *dum = hcd_to_dummy (hcd);
dev_dbg (&hcd->self.root_hub->dev, "%s\n", __func__);
spin_lock_irq (&dum->lock);
dum->rh_state = DUMMY_RH_SUSPENDED;
set_link_state (dum);
hcd->state = HC_STATE_SUSPENDED;
spin_unlock_irq (&dum->lock);
return 0;
}
static int dummy_bus_resume (struct usb_hcd *hcd)
{
struct dummy *dum = hcd_to_dummy (hcd);
int rc = 0;
dev_dbg (&hcd->self.root_hub->dev, "%s\n", __func__);
spin_lock_irq (&dum->lock);
if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
rc = -ESHUTDOWN;
} else {
dum->rh_state = DUMMY_RH_RUNNING;
set_link_state (dum);
if (!list_empty(&dum->urbp_list))
mod_timer (&dum->timer, jiffies);
hcd->state = HC_STATE_RUNNING;
}
spin_unlock_irq (&dum->lock);
return rc;
}
/*-------------------------------------------------------------------------*/
static inline ssize_t
show_urb (char *buf, size_t size, struct urb *urb)
{
int ep = usb_pipeendpoint (urb->pipe);
return snprintf (buf, size,
"urb/%p %s ep%d%s%s len %d/%d\n",
urb,
({ char *s;
switch (urb->dev->speed) {
case USB_SPEED_LOW: s = "ls"; break;
case USB_SPEED_FULL: s = "fs"; break;
case USB_SPEED_HIGH: s = "hs"; break;
default: s = "?"; break;
}; s; }),
ep, ep ? (usb_pipein (urb->pipe) ? "in" : "out") : "",
({ char *s; \
switch (usb_pipetype (urb->pipe)) { \
case PIPE_CONTROL: s = ""; break; \
case PIPE_BULK: s = "-bulk"; break; \
case PIPE_INTERRUPT: s = "-int"; break; \
default: s = "-iso"; break; \
}; s;}),
urb->actual_length, urb->transfer_buffer_length);
}
static ssize_t
show_urbs (struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_hcd *hcd = dev_get_drvdata (dev);
struct dummy *dum = hcd_to_dummy (hcd);
struct urbp *urbp;
size_t size = 0;
unsigned long flags;
spin_lock_irqsave (&dum->lock, flags);
list_for_each_entry (urbp, &dum->urbp_list, urbp_list) {
size_t temp;
temp = show_urb (buf, PAGE_SIZE - size, urbp->urb);
buf += temp;
size += temp;
}
spin_unlock_irqrestore (&dum->lock, flags);
return size;
}
static DEVICE_ATTR (urbs, S_IRUGO, show_urbs, NULL);
static int dummy_start (struct usb_hcd *hcd)
{
struct dummy *dum;
dum = hcd_to_dummy (hcd);
/*
* MASTER side init ... we emulate a root hub that'll only ever
* talk to one device (the slave side). Also appears in sysfs,
* just like more familiar pci-based HCDs.
*/
spin_lock_init (&dum->lock);
init_timer (&dum->timer);
dum->timer.function = dummy_timer;
dum->timer.data = (unsigned long) dum;
dum->rh_state = DUMMY_RH_RUNNING;
INIT_LIST_HEAD (&dum->urbp_list);
hcd->power_budget = POWER_BUDGET;
hcd->state = HC_STATE_RUNNING;
hcd->uses_new_polling = 1;
#ifdef CONFIG_USB_OTG
hcd->self.otg_port = 1;
#endif
/* FIXME 'urbs' should be a per-device thing, maybe in usbcore */
return device_create_file (dummy_dev(dum), &dev_attr_urbs);
}
static void dummy_stop (struct usb_hcd *hcd)
{
struct dummy *dum;
dum = hcd_to_dummy (hcd);
device_remove_file (dummy_dev(dum), &dev_attr_urbs);
usb_gadget_unregister_driver (dum->driver);
dev_info (dummy_dev(dum), "stopped\n");
}
/*-------------------------------------------------------------------------*/
static int dummy_h_get_frame (struct usb_hcd *hcd)
{
return dummy_g_get_frame (NULL);
}
static const struct hc_driver dummy_hcd = {
.description = (char *) driver_name,
.product_desc = "Dummy host controller",
.hcd_priv_size = sizeof(struct dummy),
.flags = HCD_USB2,
.start = dummy_start,
.stop = dummy_stop,
.urb_enqueue = dummy_urb_enqueue,
.urb_dequeue = dummy_urb_dequeue,
.get_frame_number = dummy_h_get_frame,
.hub_status_data = dummy_hub_status,
.hub_control = dummy_hub_control,
.bus_suspend = dummy_bus_suspend,
.bus_resume = dummy_bus_resume,
};
static int dummy_hcd_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
int retval;
dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
if (!hcd)
return -ENOMEM;
the_controller = hcd_to_dummy (hcd);
retval = usb_add_hcd(hcd, 0, 0);
if (retval != 0) {
usb_put_hcd (hcd);
the_controller = NULL;
}
return retval;
}
static int dummy_hcd_remove (struct platform_device *pdev)
{
struct usb_hcd *hcd;
hcd = platform_get_drvdata (pdev);
usb_remove_hcd (hcd);
usb_put_hcd (hcd);
the_controller = NULL;
return 0;
}
static int dummy_hcd_suspend (struct platform_device *pdev, pm_message_t state)
{
struct usb_hcd *hcd;
struct dummy *dum;
int rc = 0;
dev_dbg (&pdev->dev, "%s\n", __func__);
hcd = platform_get_drvdata (pdev);
dum = hcd_to_dummy (hcd);
if (dum->rh_state == DUMMY_RH_RUNNING) {
dev_warn(&pdev->dev, "Root hub isn't suspended!\n");
rc = -EBUSY;
} else
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
return rc;
}
static int dummy_hcd_resume (struct platform_device *pdev)
{
struct usb_hcd *hcd;
dev_dbg (&pdev->dev, "%s\n", __func__);
hcd = platform_get_drvdata (pdev);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
usb_hcd_poll_rh_status (hcd);
return 0;
}
static struct platform_driver dummy_hcd_driver = {
.probe = dummy_hcd_probe,
.remove = dummy_hcd_remove,
.suspend = dummy_hcd_suspend,
.resume = dummy_hcd_resume,
.driver = {
.name = (char *) driver_name,
.owner = THIS_MODULE,
},
};
/*-------------------------------------------------------------------------*/
static struct platform_device *the_udc_pdev;
static struct platform_device *the_hcd_pdev;
static int __init init (void)
{
int retval = -ENOMEM;
if (usb_disabled ())
return -ENODEV;
the_hcd_pdev = platform_device_alloc(driver_name, -1);
if (!the_hcd_pdev)
return retval;
the_udc_pdev = platform_device_alloc(gadget_name, -1);
if (!the_udc_pdev)
goto err_alloc_udc;
retval = platform_driver_register(&dummy_hcd_driver);
if (retval < 0)
goto err_register_hcd_driver;
retval = platform_driver_register(&dummy_udc_driver);
if (retval < 0)
goto err_register_udc_driver;
retval = platform_device_add(the_hcd_pdev);
if (retval < 0)
goto err_add_hcd;
retval = platform_device_add(the_udc_pdev);
if (retval < 0)
goto err_add_udc;
return retval;
err_add_udc:
platform_device_del(the_hcd_pdev);
err_add_hcd:
platform_driver_unregister(&dummy_udc_driver);
err_register_udc_driver:
platform_driver_unregister(&dummy_hcd_driver);
err_register_hcd_driver:
platform_device_put(the_udc_pdev);
err_alloc_udc:
platform_device_put(the_hcd_pdev);
return retval;
}
module_init (init);
static void __exit cleanup (void)
{
platform_device_unregister(the_udc_pdev);
platform_device_unregister(the_hcd_pdev);
platform_driver_unregister(&dummy_udc_driver);
platform_driver_unregister(&dummy_hcd_driver);
}
module_exit (cleanup);
| gpl-2.0 |
jthornber/linux-2.6 | net/ipv4/tcp_timer.c | 174 | 18849 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Implementation of the Transmission Control Protocol(TCP).
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk>
* Corey Minyard <wf-rch!minyard@relay.EU.net>
* Florian La Roche, <flla@stud.uni-sb.de>
* Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
* Linus Torvalds, <torvalds@cs.helsinki.fi>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Matthew Dillon, <dillon@apollo.west.oic.com>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Jorge Cwik, <jorge@laser.satlink.net>
*/
#include <linux/module.h>
#include <linux/gfp.h>
#include <net/tcp.h>
int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES;
int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
int sysctl_tcp_orphan_retries __read_mostly;
int sysctl_tcp_thin_linear_timeouts __read_mostly;
static void tcp_write_err(struct sock *sk)
{
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
sk->sk_error_report(sk);
tcp_done(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
}
/* Do not allow orphaned sockets to eat all our resources.
* This is direct violation of TCP specs, but it is required
* to prevent DoS attacks. It is called when a retransmission timeout
* or zero probe timeout occurs on orphaned socket.
*
* Criteria is still not confirmed experimentally and may change.
* We kill the socket, if:
* 1. If number of orphaned sockets exceeds an administratively configured
* limit.
* 2. If we have strong memory pressure.
*/
static int tcp_out_of_resources(struct sock *sk, bool do_reset)
{
struct tcp_sock *tp = tcp_sk(sk);
int shift = 0;
/* If peer does not open window for long time, or did not transmit
* anything for long time, penalize it. */
if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
shift++;
/* If some dubious ICMP arrived, penalize even more. */
if (sk->sk_err_soft)
shift++;
if (tcp_check_oom(sk, shift)) {
/* Catch exceptional cases, when connection requires reset.
* 1. Last segment was sent recently. */
if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
/* 2. Window is closed. */
(!tp->snd_wnd && !tp->packets_out))
do_reset = true;
if (do_reset)
tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_done(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
return 1;
}
return 0;
}
/* Calculate maximal number or retries on an orphaned socket. */
static int tcp_orphan_retries(struct sock *sk, int alive)
{
int retries = sysctl_tcp_orphan_retries; /* May be zero. */
/* We know from an ICMP that something is wrong. */
if (sk->sk_err_soft && !alive)
retries = 0;
/* However, if socket sent something recently, select some safe
* number of retries. 8 corresponds to >100 seconds with minimal
* RTO of 200msec. */
if (retries == 0 && alive)
retries = 8;
return retries;
}
static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
{
struct net *net = sock_net(sk);
/* Black hole detection */
if (net->ipv4.sysctl_tcp_mtu_probing) {
if (!icsk->icsk_mtup.enabled) {
icsk->icsk_mtup.enabled = 1;
icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
} else {
struct net *net = sock_net(sk);
struct tcp_sock *tp = tcp_sk(sk);
int mss;
mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
mss = max(mss, 68 - tp->tcp_header_len);
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
}
}
/* This function calculates a "timeout" which is equivalent to the timeout of a
* TCP connection after "boundary" unsuccessful, exponentially backed-off
* retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
* syn_set flag is set.
*/
static bool retransmits_timed_out(struct sock *sk,
unsigned int boundary,
unsigned int timeout,
bool syn_set)
{
unsigned int linear_backoff_thresh, start_ts;
unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
if (!inet_csk(sk)->icsk_retransmits)
return false;
start_ts = tcp_sk(sk)->retrans_stamp;
if (unlikely(!start_ts))
start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk));
if (likely(timeout == 0)) {
linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
if (boundary <= linear_backoff_thresh)
timeout = ((2 << boundary) - 1) * rto_base;
else
timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
}
return (tcp_time_stamp - start_ts) >= timeout;
}
/* A write timeout has occurred. Process the after effects. */
static int tcp_write_timeout(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int retry_until;
bool do_reset, syn_set = false;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (icsk->icsk_retransmits) {
dst_negative_advice(sk);
if (tp->syn_fastopen || tp->syn_data)
tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
if (tp->syn_data)
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
}
retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
syn_set = true;
} else {
if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
/* Black hole detection */
tcp_mtu_probing(icsk, sk);
dst_negative_advice(sk);
}
retry_until = sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) {
const int alive = icsk->icsk_rto < TCP_RTO_MAX;
retry_until = tcp_orphan_retries(sk, alive);
do_reset = alive ||
!retransmits_timed_out(sk, retry_until, 0, 0);
if (tcp_out_of_resources(sk, do_reset))
return 1;
}
}
if (retransmits_timed_out(sk, retry_until,
syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
/* Has it gone just too far? */
tcp_write_err(sk);
return 1;
}
return 0;
}
void tcp_delack_timer_handler(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
sk_mem_reclaim_partial(sk);
if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
goto out;
if (time_after(icsk->icsk_ack.timeout, jiffies)) {
sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
goto out;
}
icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
if (!skb_queue_empty(&tp->ucopy.prequeue)) {
struct sk_buff *skb;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk_backlog_rcv(sk, skb);
tp->ucopy.memory = 0;
}
if (inet_csk_ack_scheduled(sk)) {
if (!icsk->icsk_ack.pingpong) {
/* Delayed ACK missed: inflate ATO. */
icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
} else {
/* Delayed ACK missed: leave pingpong mode and
* deflate ATO.
*/
icsk->icsk_ack.pingpong = 0;
icsk->icsk_ack.ato = TCP_ATO_MIN;
}
tcp_send_ack(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
}
out:
if (sk_under_memory_pressure(sk))
sk_mem_reclaim(sk);
}
static void tcp_delack_timer(unsigned long data)
{
struct sock *sk = (struct sock *)data;
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
tcp_delack_timer_handler(sk);
} else {
inet_csk(sk)->icsk_ack.blocked = 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
/* deleguate our work to tcp_release_cb() */
if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
sock_hold(sk);
}
bh_unlock_sock(sk);
sock_put(sk);
}
static void tcp_probe_timer(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int max_probes;
u32 start_ts;
if (tp->packets_out || !tcp_send_head(sk)) {
icsk->icsk_probes_out = 0;
return;
}
/* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
* long as the receiver continues to respond probes. We support this by
* default and reset icsk_probes_out with incoming ACKs. But if the
* socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
* kill the socket when the retry count and the time exceeds the
* corresponding system limit. We also implement similar policy when
* we use RTO to probe window in tcp_retransmit_timer().
*/
start_ts = tcp_skb_timestamp(tcp_send_head(sk));
if (!start_ts)
skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp);
else if (icsk->icsk_user_timeout &&
(s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
goto abort;
max_probes = sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) {
const int alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
max_probes = tcp_orphan_retries(sk, alive);
if (!alive && icsk->icsk_backoff >= max_probes)
goto abort;
if (tcp_out_of_resources(sk, true))
return;
}
if (icsk->icsk_probes_out > max_probes) {
abort: tcp_write_err(sk);
} else {
/* Only send another probe if we didn't close things up. */
tcp_send_probe0(sk);
}
}
/*
* Timer for Fast Open socket to retransmit SYNACK. Note that the
* sk here is the child socket, not the parent (listener) socket.
*/
static void tcp_fastopen_synack_timer(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
int max_retries = icsk->icsk_syn_retries ? :
sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
struct request_sock *req;
req = tcp_sk(sk)->fastopen_rsk;
req->rsk_ops->syn_ack_timeout(req);
if (req->num_timeout >= max_retries) {
tcp_write_err(sk);
return;
}
/* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
* returned from rtx_syn_ack() to make it more persistent like
* regular retransmit because if the child socket has been accepted
* it's not good to give up too easily.
*/
inet_rtx_syn_ack(sk, req);
req->num_timeout++;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
}
/*
* The TCP retransmit timer.
*/
void tcp_retransmit_timer(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
if (tp->fastopen_rsk) {
WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
sk->sk_state != TCP_FIN_WAIT1);
tcp_fastopen_synack_timer(sk);
/* Before we receive ACK to our SYN-ACK don't retransmit
* anything else (e.g., data or FIN segments).
*/
return;
}
if (!tp->packets_out)
goto out;
WARN_ON(tcp_write_queue_empty(sk));
tp->tlp_high_seq = 0;
if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
!((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
/* Receiver dastardly shrinks window. Our retransmits
* become zero probes, but we should not timeout this
* connection. If the socket is an orphan, time it out,
* we cannot allow such beasts to hang infinitely.
*/
struct inet_sock *inet = inet_sk(sk);
if (sk->sk_family == AF_INET) {
net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
&inet->inet_daddr,
ntohs(inet->inet_dport),
inet->inet_num,
tp->snd_una, tp->snd_nxt);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (sk->sk_family == AF_INET6) {
net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
&sk->sk_v6_daddr,
ntohs(inet->inet_dport),
inet->inet_num,
tp->snd_una, tp->snd_nxt);
}
#endif
if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
tcp_write_err(sk);
goto out;
}
tcp_enter_loss(sk);
tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
__sk_dst_reset(sk);
goto out_reset_timer;
}
if (tcp_write_timeout(sk))
goto out;
if (icsk->icsk_retransmits == 0) {
int mib_idx;
if (icsk->icsk_ca_state == TCP_CA_Recovery) {
if (tcp_is_sack(tp))
mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
else
mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
mib_idx = LINUX_MIB_TCPLOSSFAILURES;
} else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
tp->sacked_out) {
if (tcp_is_sack(tp))
mib_idx = LINUX_MIB_TCPSACKFAILURES;
else
mib_idx = LINUX_MIB_TCPRENOFAILURES;
} else {
mib_idx = LINUX_MIB_TCPTIMEOUTS;
}
NET_INC_STATS_BH(sock_net(sk), mib_idx);
}
tcp_enter_loss(sk);
if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
/* Retransmission failed because of local congestion,
* do not backoff.
*/
if (!icsk->icsk_retransmits)
icsk->icsk_retransmits = 1;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
TCP_RTO_MAX);
goto out;
}
/* Increase the timeout each time we retransmit. Note that
* we do not increase the rtt estimate. rto is initialized
* from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
* that doubling rto each time is the least we can get away with.
* In KA9Q, Karn uses this for the first few times, and then
* goes to quadratic. netBSD doubles, but only goes up to *64,
* and clamps at 1 to 64 sec afterwards. Note that 120 sec is
* defined in the protocol as the maximum possible RTT. I guess
* we'll have to use something other than TCP to talk to the
* University of Mars.
*
* PAWS allows us longer timeouts and large windows, so once
* implemented ftp to mars will work nicely. We will have to fix
* the 120 second clamps though!
*/
icsk->icsk_backoff++;
icsk->icsk_retransmits++;
out_reset_timer:
/* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
* used to reset timer, set to 0. Recalculate 'icsk_rto' as this
* might be increased if the stream oscillates between thin and thick,
* thus the old value might already be too high compared to the value
* set by 'tcp_set_rto' in tcp_input.c which resets the rto without
* backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
* exponential backoff behaviour to avoid continue hammering
* linear-timeout retransmissions into a black hole
*/
if (sk->sk_state == TCP_ESTABLISHED &&
(tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
tcp_stream_is_thin(tp) &&
icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
icsk->icsk_backoff = 0;
icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
} else {
/* Use normal (exponential) backoff */
icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
__sk_dst_reset(sk);
out:;
}
void tcp_write_timer_handler(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
int event;
if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
goto out;
if (time_after(icsk->icsk_timeout, jiffies)) {
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
goto out;
}
event = icsk->icsk_pending;
switch (event) {
case ICSK_TIME_EARLY_RETRANS:
tcp_resume_early_retransmit(sk);
break;
case ICSK_TIME_LOSS_PROBE:
tcp_send_loss_probe(sk);
break;
case ICSK_TIME_RETRANS:
icsk->icsk_pending = 0;
tcp_retransmit_timer(sk);
break;
case ICSK_TIME_PROBE0:
icsk->icsk_pending = 0;
tcp_probe_timer(sk);
break;
}
out:
sk_mem_reclaim(sk);
}
static void tcp_write_timer(unsigned long data)
{
struct sock *sk = (struct sock *)data;
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
tcp_write_timer_handler(sk);
} else {
/* deleguate our work to tcp_release_cb() */
if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
sock_hold(sk);
}
bh_unlock_sock(sk);
sock_put(sk);
}
void tcp_syn_ack_timeout(const struct request_sock *req)
{
struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS);
}
EXPORT_SYMBOL(tcp_syn_ack_timeout);
void tcp_set_keepalive(struct sock *sk, int val)
{
if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
return;
if (val && !sock_flag(sk, SOCK_KEEPOPEN))
inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
else if (!val)
inet_csk_delete_keepalive_timer(sk);
}
static void tcp_keepalive_timer (unsigned long data)
{
struct sock *sk = (struct sock *) data;
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
u32 elapsed;
/* Only process if socket is not in use. */
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
/* Try again later. */
inet_csk_reset_keepalive_timer (sk, HZ/20);
goto out;
}
if (sk->sk_state == TCP_LISTEN) {
pr_err("Hmm... keepalive on a LISTEN ???\n");
goto out;
}
if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
if (tp->linger2 >= 0) {
const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
if (tmo > 0) {
tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
goto out;
}
}
tcp_send_active_reset(sk, GFP_ATOMIC);
goto death;
}
if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
goto out;
elapsed = keepalive_time_when(tp);
/* It is alive without keepalive 8) */
if (tp->packets_out || tcp_send_head(sk))
goto resched;
elapsed = keepalive_time_elapsed(tp);
if (elapsed >= keepalive_time_when(tp)) {
/* If the TCP_USER_TIMEOUT option is enabled, use that
* to determine when to timeout instead.
*/
if ((icsk->icsk_user_timeout != 0 &&
elapsed >= icsk->icsk_user_timeout &&
icsk->icsk_probes_out > 0) ||
(icsk->icsk_user_timeout == 0 &&
icsk->icsk_probes_out >= keepalive_probes(tp))) {
tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_write_err(sk);
goto out;
}
if (tcp_write_wakeup(sk) <= 0) {
icsk->icsk_probes_out++;
elapsed = keepalive_intvl_when(tp);
} else {
/* If keepalive was lost due to local congestion,
* try harder.
*/
elapsed = TCP_RESOURCE_PROBE_INTERVAL;
}
} else {
/* It is tp->rcv_tstamp + keepalive_time_when(tp) */
elapsed = keepalive_time_when(tp) - elapsed;
}
sk_mem_reclaim(sk);
resched:
inet_csk_reset_keepalive_timer (sk, elapsed);
goto out;
death:
tcp_done(sk);
out:
bh_unlock_sock(sk);
sock_put(sk);
}
void tcp_init_xmit_timers(struct sock *sk)
{
inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
&tcp_keepalive_timer);
}
EXPORT_SYMBOL(tcp_init_xmit_timers);
| gpl-2.0 |
jackfagner/twrp-kernel-sony-kitkami | sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c | 686 | 12984 | /* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <sound/apr_audio-v2.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/q6asm-v2.h>
#include <sound/pcm.h>
#include <sound/initval.h>
#include <sound/control.h>
#include <sound/tlv.h>
#include <asm/dma.h>
#include "msm-pcm-routing-v2.h"
#define LOOPBACK_VOL_MAX_STEPS 0x2000
static const DECLARE_TLV_DB_LINEAR(loopback_rx_vol_gain, 0,
LOOPBACK_VOL_MAX_STEPS);
struct msm_pcm_loopback {
struct snd_pcm_substream *playback_substream;
struct snd_pcm_substream *capture_substream;
int instance;
struct mutex lock;
uint32_t samp_rate;
uint32_t channel_mode;
int playback_start;
int capture_start;
int session_id;
struct audio_client *audio_client;
int volume;
};
static void stop_pcm(struct msm_pcm_loopback *pcm);
static const struct snd_pcm_hardware dummy_pcm_hardware = {
.formats = 0xffffffff,
.channels_min = 1,
.channels_max = UINT_MAX,
/* Random values to keep userspace happy when checking constraints */
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER,
.buffer_bytes_max = 128*1024,
.period_bytes_min = 1024,
.period_bytes_max = 1024*2,
.periods_min = 2,
.periods_max = 128,
};
static void msm_pcm_route_event_handler(enum msm_pcm_routing_event event,
void *priv_data)
{
struct msm_pcm_loopback *pcm = priv_data;
BUG_ON(!pcm);
pr_debug("%s: event 0x%x\n", __func__, event);
switch (event) {
case MSM_PCM_RT_EVT_DEVSWITCH:
q6asm_cmd(pcm->audio_client, CMD_PAUSE);
q6asm_cmd(pcm->audio_client, CMD_FLUSH);
q6asm_run(pcm->audio_client, 0, 0, 0);
default:
pr_err("%s: default event 0x%x\n", __func__, event);
break;
}
}
static void msm_pcm_loopback_event_handler(uint32_t opcode, uint32_t token,
uint32_t *payload, void *priv)
{
pr_debug("%s:\n", __func__);
switch (opcode) {
case APR_BASIC_RSP_RESULT: {
switch (payload[0]) {
break;
default:
break;
}
}
break;
default:
pr_err("%s: Not Supported Event opcode[0x%x]\n",
__func__, opcode);
break;
}
}
static int pcm_loopback_set_volume(struct msm_pcm_loopback *prtd, int volume)
{
int rc = -EINVAL;
pr_debug("%s: Setting volume 0x%x\n", __func__, volume);
if (prtd && prtd->audio_client) {
rc = q6asm_set_volume(prtd->audio_client, volume);
if (rc < 0) {
pr_err("%s: Send Volume command failed rc = %d\n",
__func__, rc);
return rc;
}
prtd->volume = volume;
}
return rc;
}
static int msm_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
struct msm_pcm_loopback *pcm;
int ret = 0;
uint16_t bits_per_sample = 16;
struct msm_pcm_routing_evt event;
struct asm_session_mtmx_strtr_param_window_v2_t asm_mtmx_strtr_window;
uint32_t param_id;
pcm = dev_get_drvdata(rtd->platform->dev);
mutex_lock(&pcm->lock);
snd_soc_set_runtime_hwparams(substream, &dummy_pcm_hardware);
pcm->volume = 0x2000;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
pcm->playback_substream = substream;
else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
pcm->capture_substream = substream;
pcm->instance++;
dev_dbg(rtd->platform->dev, "%s: pcm out open: %d,%d\n", __func__,
pcm->instance, substream->stream);
if (pcm->instance == 2) {
struct snd_soc_pcm_runtime *soc_pcm_rx =
pcm->playback_substream->private_data;
struct snd_soc_pcm_runtime *soc_pcm_tx =
pcm->capture_substream->private_data;
if (pcm->audio_client != NULL)
stop_pcm(pcm);
pcm->audio_client = q6asm_audio_client_alloc(
(app_cb)msm_pcm_loopback_event_handler, pcm);
if (!pcm->audio_client) {
dev_err(rtd->platform->dev,
"%s: Could not allocate memory\n", __func__);
mutex_unlock(&pcm->lock);
return -ENOMEM;
}
pcm->session_id = pcm->audio_client->session;
pcm->audio_client->perf_mode = false;
ret = q6asm_open_loopback_v2(pcm->audio_client,
bits_per_sample);
if (ret < 0) {
dev_err(rtd->platform->dev,
"%s: pcm out open failed\n", __func__);
q6asm_audio_client_free(pcm->audio_client);
mutex_unlock(&pcm->lock);
return -ENOMEM;
}
event.event_func = msm_pcm_route_event_handler;
event.priv_data = (void *) pcm;
msm_pcm_routing_reg_phy_stream(soc_pcm_tx->dai_link->be_id,
pcm->audio_client->perf_mode,
pcm->session_id, pcm->capture_substream->stream);
msm_pcm_routing_reg_phy_stream_v2(soc_pcm_rx->dai_link->be_id,
pcm->audio_client->perf_mode,
pcm->session_id, pcm->playback_substream->stream,
event);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
pcm->playback_substream = substream;
ret = pcm_loopback_set_volume(pcm, pcm->volume);
if (ret < 0)
dev_err(rtd->platform->dev,
"Error %d setting volume", ret);
}
/* Set to largest negative value */
asm_mtmx_strtr_window.window_lsw = 0x00000000;
asm_mtmx_strtr_window.window_msw = 0x80000000;
param_id = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_WINDOW_START_V2;
q6asm_send_mtmx_strtr_window(pcm->audio_client,
&asm_mtmx_strtr_window,
param_id);
/* Set to largest positive value */
asm_mtmx_strtr_window.window_lsw = 0xffffffff;
asm_mtmx_strtr_window.window_msw = 0x7fffffff;
param_id = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_WINDOW_END_V2;
q6asm_send_mtmx_strtr_window(pcm->audio_client,
&asm_mtmx_strtr_window,
param_id);
}
dev_info(rtd->platform->dev, "%s: Instance = %d, Stream ID = %s\n",
__func__ , pcm->instance, substream->pcm->id);
runtime->private_data = pcm;
mutex_unlock(&pcm->lock);
return 0;
}
static void stop_pcm(struct msm_pcm_loopback *pcm)
{
struct snd_soc_pcm_runtime *soc_pcm_rx;
struct snd_soc_pcm_runtime *soc_pcm_tx;
if (pcm->audio_client == NULL)
return;
q6asm_cmd(pcm->audio_client, CMD_CLOSE);
if (pcm->playback_substream != NULL) {
soc_pcm_rx = pcm->playback_substream->private_data;
msm_pcm_routing_dereg_phy_stream(soc_pcm_rx->dai_link->be_id,
SNDRV_PCM_STREAM_PLAYBACK);
}
if (pcm->capture_substream != NULL) {
soc_pcm_tx = pcm->capture_substream->private_data;
msm_pcm_routing_dereg_phy_stream(soc_pcm_tx->dai_link->be_id,
SNDRV_PCM_STREAM_CAPTURE);
}
q6asm_audio_client_free(pcm->audio_client);
pcm->audio_client = NULL;
}
static int msm_pcm_close(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_pcm_loopback *pcm = runtime->private_data;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
int ret = 0;
mutex_lock(&pcm->lock);
dev_dbg(rtd->platform->dev, "%s: end pcm call:%d\n",
__func__, substream->stream);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
pcm->playback_start = 0;
else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
pcm->capture_start = 0;
pcm->instance--;
if (!pcm->playback_start || !pcm->capture_start) {
dev_dbg(rtd->platform->dev, "%s: end pcm call\n", __func__);
stop_pcm(pcm);
}
mutex_unlock(&pcm->lock);
return ret;
}
static int msm_pcm_prepare(struct snd_pcm_substream *substream)
{
int ret = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_pcm_loopback *pcm = runtime->private_data;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
mutex_lock(&pcm->lock);
dev_dbg(rtd->platform->dev, "%s: ASM loopback stream:%d\n",
__func__, substream->stream);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (!pcm->playback_start)
pcm->playback_start = 1;
} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
if (!pcm->capture_start)
pcm->capture_start = 1;
}
mutex_unlock(&pcm->lock);
return ret;
}
static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_pcm_loopback *pcm = runtime->private_data;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
dev_dbg(rtd->platform->dev,
"%s: playback_start:%d,capture_start:%d\n", __func__,
pcm->playback_start, pcm->capture_start);
if (pcm->playback_start && pcm->capture_start)
q6asm_run_nowait(pcm->audio_client, 0, 0, 0);
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_STOP:
dev_dbg(rtd->platform->dev,
"%s:Pause/Stop - playback_start:%d,capture_start:%d\n",
__func__, pcm->playback_start, pcm->capture_start);
if (pcm->playback_start && pcm->capture_start)
q6asm_cmd_nowait(pcm->audio_client, CMD_PAUSE);
break;
default:
pr_err("%s: default cmd %d\n", __func__, cmd);
break;
}
return 0;
}
static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
dev_dbg(rtd->platform->dev, "%s: ASM loopback\n", __func__);
return snd_pcm_lib_alloc_vmalloc_buffer(substream,
params_buffer_bytes(params));
}
static int msm_pcm_hw_free(struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_vmalloc_buffer(substream);
}
static struct snd_pcm_ops msm_pcm_ops = {
.open = msm_pcm_open,
.hw_params = msm_pcm_hw_params,
.hw_free = msm_pcm_hw_free,
.close = msm_pcm_close,
.prepare = msm_pcm_prepare,
.trigger = msm_pcm_trigger,
};
static int msm_pcm_volume_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int rc = 0;
struct snd_pcm_volume *vol = kcontrol->private_data;
struct snd_pcm_substream *substream = vol->pcm->streams[0].substream;
struct msm_pcm_loopback *prtd = substream->runtime->private_data;
int volume = ucontrol->value.integer.value[0];
rc = pcm_loopback_set_volume(prtd, volume);
return rc;
}
static int msm_pcm_add_controls(struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm->streams[0].pcm;
struct snd_pcm_volume *volume_info;
struct snd_kcontrol *kctl;
int ret = 0;
dev_dbg(rtd->dev, "%s, Volume cntrl add\n", __func__);
ret = snd_pcm_add_volume_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
NULL, 1,
rtd->dai_link->be_id,
&volume_info);
if (ret < 0)
return ret;
kctl = volume_info->kctl;
kctl->put = msm_pcm_volume_ctl_put;
kctl->tlv.p = loopback_rx_vol_gain;
return 0;
}
static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
int ret = 0;
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
ret = msm_pcm_add_controls(rtd);
if (ret)
dev_err(rtd->dev, "%s, kctl add failed\n", __func__);
return ret;
}
static struct snd_soc_platform_driver msm_soc_platform = {
.ops = &msm_pcm_ops,
.pcm_new = msm_asoc_pcm_new,
};
static int msm_pcm_probe(struct platform_device *pdev)
{
struct msm_pcm_loopback *pcm;
dev_dbg(&pdev->dev, "%s: dev name %s\n",
__func__, dev_name(&pdev->dev));
pcm = kzalloc(sizeof(struct msm_pcm_loopback), GFP_KERNEL);
if (!pcm) {
dev_err(&pdev->dev, "%s Failed to allocate memory for pcm\n",
__func__);
return -ENOMEM;
} else {
mutex_init(&pcm->lock);
dev_set_drvdata(&pdev->dev, pcm);
}
return snd_soc_register_platform(&pdev->dev,
&msm_soc_platform);
}
static int msm_pcm_remove(struct platform_device *pdev)
{
struct msm_pcm_loopback *pcm;
pcm = dev_get_drvdata(&pdev->dev);
mutex_destroy(&pcm->lock);
kfree(pcm);
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
static const struct of_device_id msm_pcm_loopback_dt_match[] = {
{.compatible = "qcom,msm-pcm-loopback"},
{}
};
static struct platform_driver msm_pcm_driver = {
.driver = {
.name = "msm-pcm-loopback",
.owner = THIS_MODULE,
.of_match_table = msm_pcm_loopback_dt_match,
},
.probe = msm_pcm_probe,
.remove = msm_pcm_remove,
};
static int __init msm_soc_platform_init(void)
{
return platform_driver_register(&msm_pcm_driver);
}
module_init(msm_soc_platform_init);
static void __exit msm_soc_platform_exit(void)
{
platform_driver_unregister(&msm_pcm_driver);
}
module_exit(msm_soc_platform_exit);
MODULE_DESCRIPTION("PCM loopback platform driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
xerpi/linux | arch/parisc/kernel/irq.c | 686 | 15513 | /*
* Code to handle x86 style IRQs plus some generic interrupt stuff.
*
* Copyright (C) 1992 Linus Torvalds
* Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
* Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
* Copyright (C) 1999-2000 Grant Grundler
* Copyright (c) 2005 Matthew Wilcox
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/ldcw.h>
#undef PARISC_IRQ_CR16_COUNTS
extern irqreturn_t timer_interrupt(int, void *);
extern irqreturn_t ipi_interrupt(int, void *);
#define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
/* Bits in EIEM correlate with cpu_irq_action[].
** Numbered *Big Endian*! (ie bit 0 is MSB)
*/
static volatile unsigned long cpu_eiem = 0;
/*
** local ACK bitmap ... habitually set to 1, but reset to zero
** between ->ack() and ->end() of the interrupt to prevent
** re-interruption of a processing interrupt.
*/
static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
static void cpu_mask_irq(struct irq_data *d)
{
unsigned long eirr_bit = EIEM_MASK(d->irq);
cpu_eiem &= ~eirr_bit;
/* Do nothing on the other CPUs. If they get this interrupt,
* The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
* handle it, and the set_eiem() at the bottom will ensure it
* then gets disabled */
}
static void __cpu_unmask_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
cpu_eiem |= eirr_bit;
/* This is just a simple NOP IPI. But what it does is cause
* all the other CPUs to do a set_eiem(cpu_eiem) at the end
* of the interrupt handler */
smp_send_all_nop();
}
static void cpu_unmask_irq(struct irq_data *d)
{
__cpu_unmask_irq(d->irq);
}
void cpu_ack_irq(struct irq_data *d)
{
unsigned long mask = EIEM_MASK(d->irq);
int cpu = smp_processor_id();
/* Clear in EIEM so we can no longer process */
per_cpu(local_ack_eiem, cpu) &= ~mask;
/* disable the interrupt */
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
/* and now ack it */
mtctl(mask, 23);
}
void cpu_eoi_irq(struct irq_data *d)
{
unsigned long mask = EIEM_MASK(d->irq);
int cpu = smp_processor_id();
/* set it in the eiems---it's no longer in process */
per_cpu(local_ack_eiem, cpu) |= mask;
/* enable the interrupt */
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
}
#ifdef CONFIG_SMP
int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
{
int cpu_dest;
/* timer and ipi have to always be received on all CPUs */
if (irqd_is_per_cpu(d))
return -EINVAL;
/* whatever mask they set, we just allow one CPU */
cpu_dest = cpumask_first_and(dest, cpu_online_mask);
return cpu_dest;
}
static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
bool force)
{
int cpu_dest;
cpu_dest = cpu_check_affinity(d, dest);
if (cpu_dest < 0)
return -1;
cpumask_copy(irq_data_get_affinity_mask(d), dest);
return 0;
}
#endif
static struct irq_chip cpu_interrupt_type = {
.name = "CPU",
.irq_mask = cpu_mask_irq,
.irq_unmask = cpu_unmask_irq,
.irq_ack = cpu_ack_irq,
.irq_eoi = cpu_eoi_irq,
#ifdef CONFIG_SMP
.irq_set_affinity = cpu_set_affinity_irq,
#endif
/* XXX: Needs to be written. We managed without it so far, but
* we really ought to write it.
*/
.irq_retrigger = NULL,
};
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define irq_stats(x) (&per_cpu(irq_stat, x))
/*
* /proc/interrupts printing for arch specific interrupts
*/
int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
#ifdef CONFIG_DEBUG_STACKOVERFLOW
seq_printf(p, "%*s: ", prec, "STK");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
seq_puts(p, " Kernel stack usage\n");
# ifdef CONFIG_IRQSTACKS
seq_printf(p, "%*s: ", prec, "IST");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
seq_puts(p, " Interrupt stack usage\n");
# endif
#endif
#ifdef CONFIG_SMP
seq_printf(p, "%*s: ", prec, "RES");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
seq_puts(p, " Rescheduling interrupts\n");
#endif
seq_printf(p, "%*s: ", prec, "UAH");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
seq_puts(p, " Unaligned access handler traps\n");
seq_printf(p, "%*s: ", prec, "FPA");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
seq_puts(p, " Floating point assist traps\n");
seq_printf(p, "%*s: ", prec, "TLB");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
seq_puts(p, " TLB shootdowns\n");
return 0;
}
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
unsigned long flags;
if (i == 0) {
seq_puts(p, " ");
for_each_online_cpu(j)
seq_printf(p, " CPU%d", j);
#ifdef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " [min/avg/max] (CPU cycle counts)");
#endif
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
struct irq_desc *desc = irq_to_desc(i);
struct irqaction *action;
raw_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
if (!action)
goto skip;
seq_printf(p, "%3d: ", i);
#ifdef CONFIG_SMP
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#else
seq_printf(p, "%10u ", kstat_irqs(i));
#endif
seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
#ifndef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " %s", action->name);
while ((action = action->next))
seq_printf(p, ", %s", action->name);
#else
for ( ;action; action = action->next) {
unsigned int k, avg, min, max;
min = max = action->cr16_hist[0];
for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
int hist = action->cr16_hist[k];
if (hist) {
avg += hist;
} else
break;
if (hist > max) max = hist;
if (hist < min) min = hist;
}
avg /= k;
seq_printf(p, " %s[%d/%d/%d]", action->name,
min,avg,max);
}
#endif
seq_putc(p, '\n');
skip:
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
if (i == NR_IRQS)
arch_show_interrupts(p, 3);
return 0;
}
/*
** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
**
** To use txn_XXX() interfaces, get a Virtual IRQ first.
** Then use that to get the Transaction address and data.
*/
int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
{
if (irq_has_action(irq))
return -EBUSY;
if (irq_get_chip(irq) != &cpu_interrupt_type)
return -EBUSY;
/* for iosapic interrupts */
if (type) {
irq_set_chip_and_handler(irq, type, handle_percpu_irq);
irq_set_chip_data(irq, data);
__cpu_unmask_irq(irq);
}
return 0;
}
int txn_claim_irq(int irq)
{
return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
}
/*
* The bits_wide parameter accommodates the limitations of the HW/SW which
* use these bits:
* Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
* V-class (EPIC): 6 bits
* N/L/A-class (iosapic): 8 bits
* PCI 2.2 MSI: 16 bits
* Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric)
*
* On the service provider side:
* o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
* o PA 2.0 wide mode 6-bits (per processor)
* o IA64 8-bits (0-256 total)
*
* So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported
* by the processor...and the N/L-class I/O subsystem supports more bits than
* PA2.0 has. The first case is the problem.
*/
int txn_alloc_irq(unsigned int bits_wide)
{
int irq;
/* never return irq 0 cause that's the interval timer */
for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
if (cpu_claim_irq(irq, NULL, NULL) < 0)
continue;
if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
continue;
return irq;
}
/* unlikely, but be prepared */
return -1;
}
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{
#ifdef CONFIG_SMP
struct irq_data *d = irq_get_irq_data(irq);
cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu));
#endif
return per_cpu(cpu_data, cpu).txn_addr;
}
unsigned long txn_alloc_addr(unsigned int virt_irq)
{
static int next_cpu = -1;
next_cpu++; /* assign to "next" CPU we want this bugger on */
/* validate entry */
while ((next_cpu < nr_cpu_ids) &&
(!per_cpu(cpu_data, next_cpu).txn_addr ||
!cpu_online(next_cpu)))
next_cpu++;
if (next_cpu >= nr_cpu_ids)
next_cpu = 0; /* nothing else, assign monarch */
return txn_affinity_addr(virt_irq, next_cpu);
}
unsigned int txn_alloc_data(unsigned int virt_irq)
{
return virt_irq - CPU_IRQ_BASE;
}
static inline int eirr_to_irq(unsigned long eirr)
{
int bit = fls_long(eirr);
return (BITS_PER_LONG - bit) + TIMER_IRQ;
}
#ifdef CONFIG_IRQSTACKS
/*
* IRQ STACK - used for irq handler
*/
#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
union irq_stack_union {
unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
volatile unsigned int slock[4];
volatile unsigned int lock[1];
};
DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
.slock = { 1,1,1,1 },
};
#endif
int sysctl_panic_on_stackoverflow = 1;
static inline void stack_overflow_check(struct pt_regs *regs)
{
#ifdef CONFIG_DEBUG_STACKOVERFLOW
#define STACK_MARGIN (256*6)
/* Our stack starts directly behind the thread_info struct. */
unsigned long stack_start = (unsigned long) current_thread_info();
unsigned long sp = regs->gr[30];
unsigned long stack_usage;
unsigned int *last_usage;
int cpu = smp_processor_id();
/* if sr7 != 0, we interrupted a userspace process which we do not want
* to check for stack overflow. We will only check the kernel stack. */
if (regs->sr[7])
return;
/* calculate kernel stack usage */
stack_usage = sp - stack_start;
#ifdef CONFIG_IRQSTACKS
if (likely(stack_usage <= THREAD_SIZE))
goto check_kernel_stack; /* found kernel stack */
/* check irq stack usage */
stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
stack_usage = sp - stack_start;
last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
if (unlikely(stack_usage > *last_usage))
*last_usage = stack_usage;
if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
return;
pr_emerg("stackcheck: %s will most likely overflow irq stack "
"(sp:%lx, stk bottom-top:%lx-%lx)\n",
current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
goto panic_check;
check_kernel_stack:
#endif
/* check kernel stack usage */
last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
if (unlikely(stack_usage > *last_usage))
*last_usage = stack_usage;
if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
return;
pr_emerg("stackcheck: %s will most likely overflow kernel stack "
"(sp:%lx, stk bottom-top:%lx-%lx)\n",
current->comm, sp, stack_start, stack_start + THREAD_SIZE);
#ifdef CONFIG_IRQSTACKS
panic_check:
#endif
if (sysctl_panic_on_stackoverflow)
panic("low stack detected by irq handler - check messages\n");
#endif
}
#ifdef CONFIG_IRQSTACKS
/* in entry.S: */
void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
static void execute_on_irq_stack(void *func, unsigned long param1)
{
union irq_stack_union *union_ptr;
unsigned long irq_stack;
volatile unsigned int *irq_stack_in_use;
union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
irq_stack = (unsigned long) &union_ptr->stack;
irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
64); /* align for stack frame usage */
/* We may be called recursive. If we are already using the irq stack,
* just continue to use it. Use spinlocks to serialize
* the irq stack usage.
*/
irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
if (!__ldcw(irq_stack_in_use)) {
void (*direct_call)(unsigned long p1) = func;
/* We are using the IRQ stack already.
* Do direct call on current stack. */
direct_call(param1);
return;
}
/* This is where we switch to the IRQ stack. */
call_on_stack(param1, func, irq_stack);
/* free up irq stack usage. */
*irq_stack_in_use = 1;
}
void do_softirq_own_stack(void)
{
execute_on_irq_stack(__do_softirq, 0);
}
#endif /* CONFIG_IRQSTACKS */
/* ONLY called from entry.S:intr_extint() */
void do_cpu_irq_mask(struct pt_regs *regs)
{
struct pt_regs *old_regs;
unsigned long eirr_val;
int irq, cpu = smp_processor_id();
struct irq_data *irq_data;
#ifdef CONFIG_SMP
cpumask_t dest;
#endif
old_regs = set_irq_regs(regs);
local_irq_disable();
irq_enter();
eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
if (!eirr_val)
goto set_out;
irq = eirr_to_irq(eirr_val);
irq_data = irq_get_irq_data(irq);
/* Filter out spurious interrupts, mostly from serial port at bootup */
if (unlikely(!irq_desc_has_action(irq_data_to_desc(irq_data))))
goto set_out;
#ifdef CONFIG_SMP
cpumask_copy(&dest, irq_data_get_affinity_mask(irq_data));
if (irqd_is_per_cpu(irq_data) &&
!cpumask_test_cpu(smp_processor_id(), &dest)) {
int cpu = cpumask_first(&dest);
printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
irq, smp_processor_id(), cpu);
gsc_writel(irq + CPU_IRQ_BASE,
per_cpu(cpu_data, cpu).hpa);
goto set_out;
}
#endif
stack_overflow_check(regs);
#ifdef CONFIG_IRQSTACKS
execute_on_irq_stack(&generic_handle_irq, irq);
#else
generic_handle_irq(irq);
#endif /* CONFIG_IRQSTACKS */
out:
irq_exit();
set_irq_regs(old_regs);
return;
set_out:
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
goto out;
}
static struct irqaction timer_action = {
.handler = timer_interrupt,
.name = "timer",
.flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
};
#ifdef CONFIG_SMP
static struct irqaction ipi_action = {
.handler = ipi_interrupt,
.name = "IPI",
.flags = IRQF_PERCPU,
};
#endif
static void claim_cpu_irqs(void)
{
int i;
for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
irq_set_chip_and_handler(i, &cpu_interrupt_type,
handle_percpu_irq);
}
irq_set_handler(TIMER_IRQ, handle_percpu_irq);
setup_irq(TIMER_IRQ, &timer_action);
#ifdef CONFIG_SMP
irq_set_handler(IPI_IRQ, handle_percpu_irq);
setup_irq(IPI_IRQ, &ipi_action);
#endif
}
void __init init_IRQ(void)
{
local_irq_disable(); /* PARANOID - should already be disabled */
mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
#ifdef CONFIG_SMP
if (!cpu_eiem) {
claim_cpu_irqs();
cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
}
#else
claim_cpu_irqs();
cpu_eiem = EIEM_MASK(TIMER_IRQ);
#endif
set_eiem(cpu_eiem); /* EIEM : enable all external intr */
}
| gpl-2.0 |
varun10221/linux | tools/lib/traceevent/event-plugin.c | 1198 | 10250 | /*
* Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License (not later!)
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <ctype.h>
#include <stdio.h>
#include <string.h>
#include <dlfcn.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <dirent.h>
#include "event-parse.h"
#include "event-utils.h"
#define LOCAL_PLUGIN_DIR ".traceevent/plugins"
static struct registered_plugin_options {
struct registered_plugin_options *next;
struct pevent_plugin_option *options;
} *registered_options;
static struct trace_plugin_options {
struct trace_plugin_options *next;
char *plugin;
char *option;
char *value;
} *trace_plugin_options;
struct plugin_list {
struct plugin_list *next;
char *name;
void *handle;
};
static void lower_case(char *str)
{
if (!str)
return;
for (; *str; str++)
*str = tolower(*str);
}
static int update_option_value(struct pevent_plugin_option *op, const char *val)
{
char *op_val;
if (!val) {
/* toggle, only if option is boolean */
if (op->value)
/* Warn? */
return 0;
op->set ^= 1;
return 0;
}
/*
* If the option has a value then it takes a string
* otherwise the option is a boolean.
*/
if (op->value) {
op->value = val;
return 0;
}
/* Option is boolean, must be either "1", "0", "true" or "false" */
op_val = strdup(val);
if (!op_val)
return -1;
lower_case(op_val);
if (strcmp(val, "1") == 0 || strcmp(val, "true") == 0)
op->set = 1;
else if (strcmp(val, "0") == 0 || strcmp(val, "false") == 0)
op->set = 0;
free(op_val);
return 0;
}
/**
* traceevent_plugin_list_options - get list of plugin options
*
* Returns an array of char strings that list the currently registered
* plugin options in the format of <plugin>:<option>. This list can be
* used by toggling the option.
*
* Returns NULL if there's no options registered. On error it returns
* INVALID_PLUGIN_LIST_OPTION
*
* Must be freed with traceevent_plugin_free_options_list().
*/
char **traceevent_plugin_list_options(void)
{
struct registered_plugin_options *reg;
struct pevent_plugin_option *op;
char **list = NULL;
char *name;
int count = 0;
for (reg = registered_options; reg; reg = reg->next) {
for (op = reg->options; op->name; op++) {
char *alias = op->plugin_alias ? op->plugin_alias : op->file;
char **temp = list;
name = malloc(strlen(op->name) + strlen(alias) + 2);
if (!name)
goto err;
sprintf(name, "%s:%s", alias, op->name);
list = realloc(list, count + 2);
if (!list) {
list = temp;
free(name);
goto err;
}
list[count++] = name;
list[count] = NULL;
}
}
return list;
err:
while (--count >= 0)
free(list[count]);
free(list);
return INVALID_PLUGIN_LIST_OPTION;
}
void traceevent_plugin_free_options_list(char **list)
{
int i;
if (!list)
return;
if (list == INVALID_PLUGIN_LIST_OPTION)
return;
for (i = 0; list[i]; i++)
free(list[i]);
free(list);
}
static int
update_option(const char *file, struct pevent_plugin_option *option)
{
struct trace_plugin_options *op;
char *plugin;
int ret = 0;
if (option->plugin_alias) {
plugin = strdup(option->plugin_alias);
if (!plugin)
return -1;
} else {
char *p;
plugin = strdup(file);
if (!plugin)
return -1;
p = strstr(plugin, ".");
if (p)
*p = '\0';
}
/* first look for named options */
for (op = trace_plugin_options; op; op = op->next) {
if (!op->plugin)
continue;
if (strcmp(op->plugin, plugin) != 0)
continue;
if (strcmp(op->option, option->name) != 0)
continue;
ret = update_option_value(option, op->value);
if (ret)
goto out;
break;
}
/* first look for unnamed options */
for (op = trace_plugin_options; op; op = op->next) {
if (op->plugin)
continue;
if (strcmp(op->option, option->name) != 0)
continue;
ret = update_option_value(option, op->value);
break;
}
out:
free(plugin);
return ret;
}
/**
* traceevent_plugin_add_options - Add a set of options by a plugin
* @name: The name of the plugin adding the options
* @options: The set of options being loaded
*
* Sets the options with the values that have been added by user.
*/
int traceevent_plugin_add_options(const char *name,
struct pevent_plugin_option *options)
{
struct registered_plugin_options *reg;
reg = malloc(sizeof(*reg));
if (!reg)
return -1;
reg->next = registered_options;
reg->options = options;
registered_options = reg;
while (options->name) {
update_option(name, options);
options++;
}
return 0;
}
/**
* traceevent_plugin_remove_options - remove plugin options that were registered
* @options: Options to removed that were registered with traceevent_plugin_add_options
*/
void traceevent_plugin_remove_options(struct pevent_plugin_option *options)
{
struct registered_plugin_options **last;
struct registered_plugin_options *reg;
for (last = ®istered_options; *last; last = &(*last)->next) {
if ((*last)->options == options) {
reg = *last;
*last = reg->next;
free(reg);
return;
}
}
}
/**
* traceevent_print_plugins - print out the list of plugins loaded
* @s: the trace_seq descripter to write to
* @prefix: The prefix string to add before listing the option name
* @suffix: The suffix string ot append after the option name
* @list: The list of plugins (usually returned by traceevent_load_plugins()
*
* Writes to the trace_seq @s the list of plugins (files) that is
* returned by traceevent_load_plugins(). Use @prefix and @suffix for formating:
* @prefix = " ", @suffix = "\n".
*/
void traceevent_print_plugins(struct trace_seq *s,
const char *prefix, const char *suffix,
const struct plugin_list *list)
{
while (list) {
trace_seq_printf(s, "%s%s%s", prefix, list->name, suffix);
list = list->next;
}
}
static void
load_plugin(struct pevent *pevent, const char *path,
const char *file, void *data)
{
struct plugin_list **plugin_list = data;
pevent_plugin_load_func func;
struct plugin_list *list;
const char *alias;
char *plugin;
void *handle;
plugin = malloc(strlen(path) + strlen(file) + 2);
if (!plugin) {
warning("could not allocate plugin memory\n");
return;
}
strcpy(plugin, path);
strcat(plugin, "/");
strcat(plugin, file);
handle = dlopen(plugin, RTLD_NOW | RTLD_GLOBAL);
if (!handle) {
warning("could not load plugin '%s'\n%s\n",
plugin, dlerror());
goto out_free;
}
alias = dlsym(handle, PEVENT_PLUGIN_ALIAS_NAME);
if (!alias)
alias = file;
func = dlsym(handle, PEVENT_PLUGIN_LOADER_NAME);
if (!func) {
warning("could not find func '%s' in plugin '%s'\n%s\n",
PEVENT_PLUGIN_LOADER_NAME, plugin, dlerror());
goto out_free;
}
list = malloc(sizeof(*list));
if (!list) {
warning("could not allocate plugin memory\n");
goto out_free;
}
list->next = *plugin_list;
list->handle = handle;
list->name = plugin;
*plugin_list = list;
pr_stat("registering plugin: %s", plugin);
func(pevent);
return;
out_free:
free(plugin);
}
static void
load_plugins_dir(struct pevent *pevent, const char *suffix,
const char *path,
void (*load_plugin)(struct pevent *pevent,
const char *path,
const char *name,
void *data),
void *data)
{
struct dirent *dent;
struct stat st;
DIR *dir;
int ret;
ret = stat(path, &st);
if (ret < 0)
return;
if (!S_ISDIR(st.st_mode))
return;
dir = opendir(path);
if (!dir)
return;
while ((dent = readdir(dir))) {
const char *name = dent->d_name;
if (strcmp(name, ".") == 0 ||
strcmp(name, "..") == 0)
continue;
/* Only load plugins that end in suffix */
if (strcmp(name + (strlen(name) - strlen(suffix)), suffix) != 0)
continue;
load_plugin(pevent, path, name, data);
}
closedir(dir);
}
static void
load_plugins(struct pevent *pevent, const char *suffix,
void (*load_plugin)(struct pevent *pevent,
const char *path,
const char *name,
void *data),
void *data)
{
char *home;
char *path;
char *envdir;
if (pevent->flags & PEVENT_DISABLE_PLUGINS)
return;
/*
* If a system plugin directory was defined,
* check that first.
*/
#ifdef PLUGIN_DIR
if (!(pevent->flags & PEVENT_DISABLE_SYS_PLUGINS))
load_plugins_dir(pevent, suffix, PLUGIN_DIR,
load_plugin, data);
#endif
/*
* Next let the environment-set plugin directory
* override the system defaults.
*/
envdir = getenv("TRACEEVENT_PLUGIN_DIR");
if (envdir)
load_plugins_dir(pevent, suffix, envdir, load_plugin, data);
/*
* Now let the home directory override the environment
* or system defaults.
*/
home = getenv("HOME");
if (!home)
return;
path = malloc(strlen(home) + strlen(LOCAL_PLUGIN_DIR) + 2);
if (!path) {
warning("could not allocate plugin memory\n");
return;
}
strcpy(path, home);
strcat(path, "/");
strcat(path, LOCAL_PLUGIN_DIR);
load_plugins_dir(pevent, suffix, path, load_plugin, data);
free(path);
}
struct plugin_list*
traceevent_load_plugins(struct pevent *pevent)
{
struct plugin_list *list = NULL;
load_plugins(pevent, ".so", load_plugin, &list);
return list;
}
void
traceevent_unload_plugins(struct plugin_list *plugin_list, struct pevent *pevent)
{
pevent_plugin_unload_func func;
struct plugin_list *list;
while (plugin_list) {
list = plugin_list;
plugin_list = list->next;
func = dlsym(list->handle, PEVENT_PLUGIN_UNLOADER_NAME);
if (func)
func(pevent);
dlclose(list->handle);
free(list->name);
free(list);
}
}
| gpl-2.0 |
cnbin/linux | arch/um/drivers/ubd_kern.c | 1454 | 35383 | /*
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
/* 2001-09-28...2002-04-17
* Partition stuff by James_McMechan@hotmail.com
* old style ubd by setting UBD_SHIFT to 0
* 2002-09-27...2002-10-18 massive tinkering for 2.5
* partitions have changed in 2.5
* 2003-01-29 more tinkering for 2.5.59-1
* This should now address the sysfs problems and has
* the symlink for devfs to allow for booting with
* the common /dev/ubd/discX/... names rather than
* only /dev/ubdN/discN this version also has lots of
* clean ups preparing for ubd-many.
* James McMechan
*/
#define UBD_SHIFT 4
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/ata.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <asm/tlbflush.h>
#include <kern_util.h>
#include "mconsole_kern.h"
#include <init.h>
#include <irq_kern.h>
#include "ubd.h"
#include <os.h>
#include "cow.h"
enum ubd_req { UBD_READ, UBD_WRITE, UBD_FLUSH };
struct io_thread_req {
struct request *req;
enum ubd_req op;
int fds[2];
unsigned long offsets[2];
unsigned long long offset;
unsigned long length;
char *buffer;
int sectorsize;
unsigned long sector_mask;
unsigned long long cow_offset;
unsigned long bitmap_words[2];
int error;
};
static inline int ubd_test_bit(__u64 bit, unsigned char *data)
{
__u64 n;
int bits, off;
bits = sizeof(data[0]) * 8;
n = bit / bits;
off = bit % bits;
return (data[n] & (1 << off)) != 0;
}
static inline void ubd_set_bit(__u64 bit, unsigned char *data)
{
__u64 n;
int bits, off;
bits = sizeof(data[0]) * 8;
n = bit / bits;
off = bit % bits;
data[n] |= (1 << off);
}
/*End stuff from ubd_user.h*/
#define DRIVER_NAME "uml-blkdev"
static DEFINE_MUTEX(ubd_lock);
static DEFINE_MUTEX(ubd_mutex); /* replaces BKL, might not be needed */
static int ubd_open(struct block_device *bdev, fmode_t mode);
static void ubd_release(struct gendisk *disk, fmode_t mode);
static int ubd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
#define MAX_DEV (16)
static const struct block_device_operations ubd_blops = {
.owner = THIS_MODULE,
.open = ubd_open,
.release = ubd_release,
.ioctl = ubd_ioctl,
.getgeo = ubd_getgeo,
};
/* Protected by ubd_lock */
static int fake_major = UBD_MAJOR;
static struct gendisk *ubd_gendisk[MAX_DEV];
static struct gendisk *fake_gendisk[MAX_DEV];
#ifdef CONFIG_BLK_DEV_UBD_SYNC
#define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 1, .c = 0, \
.cl = 1 })
#else
#define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 0, .c = 0, \
.cl = 1 })
#endif
static struct openflags global_openflags = OPEN_FLAGS;
struct cow {
/* backing file name */
char *file;
/* backing file fd */
int fd;
unsigned long *bitmap;
unsigned long bitmap_len;
int bitmap_offset;
int data_offset;
};
#define MAX_SG 64
struct ubd {
struct list_head restart;
/* name (and fd, below) of the file opened for writing, either the
* backing or the cow file. */
char *file;
int count;
int fd;
__u64 size;
struct openflags boot_openflags;
struct openflags openflags;
unsigned shared:1;
unsigned no_cow:1;
struct cow cow;
struct platform_device pdev;
struct request_queue *queue;
spinlock_t lock;
struct scatterlist sg[MAX_SG];
struct request *request;
int start_sg, end_sg;
sector_t rq_pos;
};
#define DEFAULT_COW { \
.file = NULL, \
.fd = -1, \
.bitmap = NULL, \
.bitmap_offset = 0, \
.data_offset = 0, \
}
#define DEFAULT_UBD { \
.file = NULL, \
.count = 0, \
.fd = -1, \
.size = -1, \
.boot_openflags = OPEN_FLAGS, \
.openflags = OPEN_FLAGS, \
.no_cow = 0, \
.shared = 0, \
.cow = DEFAULT_COW, \
.lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \
.request = NULL, \
.start_sg = 0, \
.end_sg = 0, \
.rq_pos = 0, \
}
/* Protected by ubd_lock */
static struct ubd ubd_devs[MAX_DEV] = { [0 ... MAX_DEV - 1] = DEFAULT_UBD };
/* Only changed by fake_ide_setup which is a setup */
static int fake_ide = 0;
static struct proc_dir_entry *proc_ide_root = NULL;
static struct proc_dir_entry *proc_ide = NULL;
static void make_proc_ide(void)
{
proc_ide_root = proc_mkdir("ide", NULL);
proc_ide = proc_mkdir("ide0", proc_ide_root);
}
static int fake_ide_media_proc_show(struct seq_file *m, void *v)
{
seq_puts(m, "disk\n");
return 0;
}
static int fake_ide_media_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, fake_ide_media_proc_show, NULL);
}
static const struct file_operations fake_ide_media_proc_fops = {
.owner = THIS_MODULE,
.open = fake_ide_media_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void make_ide_entries(const char *dev_name)
{
struct proc_dir_entry *dir, *ent;
char name[64];
if(proc_ide_root == NULL) make_proc_ide();
dir = proc_mkdir(dev_name, proc_ide);
if(!dir) return;
ent = proc_create("media", S_IRUGO, dir, &fake_ide_media_proc_fops);
if(!ent) return;
snprintf(name, sizeof(name), "ide0/%s", dev_name);
proc_symlink(dev_name, proc_ide_root, name);
}
static int fake_ide_setup(char *str)
{
fake_ide = 1;
return 1;
}
__setup("fake_ide", fake_ide_setup);
__uml_help(fake_ide_setup,
"fake_ide\n"
" Create ide0 entries that map onto ubd devices.\n\n"
);
static int parse_unit(char **ptr)
{
char *str = *ptr, *end;
int n = -1;
if(isdigit(*str)) {
n = simple_strtoul(str, &end, 0);
if(end == str)
return -1;
*ptr = end;
}
else if (('a' <= *str) && (*str <= 'z')) {
n = *str - 'a';
str++;
*ptr = str;
}
return n;
}
/* If *index_out == -1 at exit, the passed option was a general one;
* otherwise, the str pointer is used (and owned) inside ubd_devs array, so it
* should not be freed on exit.
*/
static int ubd_setup_common(char *str, int *index_out, char **error_out)
{
struct ubd *ubd_dev;
struct openflags flags = global_openflags;
char *backing_file;
int n, err = 0, i;
if(index_out) *index_out = -1;
n = *str;
if(n == '='){
char *end;
int major;
str++;
if(!strcmp(str, "sync")){
global_openflags = of_sync(global_openflags);
goto out1;
}
err = -EINVAL;
major = simple_strtoul(str, &end, 0);
if((*end != '\0') || (end == str)){
*error_out = "Didn't parse major number";
goto out1;
}
mutex_lock(&ubd_lock);
if (fake_major != UBD_MAJOR) {
*error_out = "Can't assign a fake major twice";
goto out1;
}
fake_major = major;
printk(KERN_INFO "Setting extra ubd major number to %d\n",
major);
err = 0;
out1:
mutex_unlock(&ubd_lock);
return err;
}
n = parse_unit(&str);
if(n < 0){
*error_out = "Couldn't parse device number";
return -EINVAL;
}
if(n >= MAX_DEV){
*error_out = "Device number out of range";
return 1;
}
err = -EBUSY;
mutex_lock(&ubd_lock);
ubd_dev = &ubd_devs[n];
if(ubd_dev->file != NULL){
*error_out = "Device is already configured";
goto out;
}
if (index_out)
*index_out = n;
err = -EINVAL;
for (i = 0; i < sizeof("rscd="); i++) {
switch (*str) {
case 'r':
flags.w = 0;
break;
case 's':
flags.s = 1;
break;
case 'd':
ubd_dev->no_cow = 1;
break;
case 'c':
ubd_dev->shared = 1;
break;
case '=':
str++;
goto break_loop;
default:
*error_out = "Expected '=' or flag letter "
"(r, s, c, or d)";
goto out;
}
str++;
}
if (*str == '=')
*error_out = "Too many flags specified";
else
*error_out = "Missing '='";
goto out;
break_loop:
backing_file = strchr(str, ',');
if (backing_file == NULL)
backing_file = strchr(str, ':');
if(backing_file != NULL){
if(ubd_dev->no_cow){
*error_out = "Can't specify both 'd' and a cow file";
goto out;
}
else {
*backing_file = '\0';
backing_file++;
}
}
err = 0;
ubd_dev->file = str;
ubd_dev->cow.file = backing_file;
ubd_dev->boot_openflags = flags;
out:
mutex_unlock(&ubd_lock);
return err;
}
static int ubd_setup(char *str)
{
char *error;
int err;
err = ubd_setup_common(str, NULL, &error);
if(err)
printk(KERN_ERR "Failed to initialize device with \"%s\" : "
"%s\n", str, error);
return 1;
}
__setup("ubd", ubd_setup);
__uml_help(ubd_setup,
"ubd<n><flags>=<filename>[(:|,)<filename2>]\n"
" This is used to associate a device with a file in the underlying\n"
" filesystem. When specifying two filenames, the first one is the\n"
" COW name and the second is the backing file name. As separator you can\n"
" use either a ':' or a ',': the first one allows writing things like;\n"
" ubd0=~/Uml/root_cow:~/Uml/root_backing_file\n"
" while with a ',' the shell would not expand the 2nd '~'.\n"
" When using only one filename, UML will detect whether to treat it like\n"
" a COW file or a backing file. To override this detection, add the 'd'\n"
" flag:\n"
" ubd0d=BackingFile\n"
" Usually, there is a filesystem in the file, but \n"
" that's not required. Swap devices containing swap files can be\n"
" specified like this. Also, a file which doesn't contain a\n"
" filesystem can have its contents read in the virtual \n"
" machine by running 'dd' on the device. <n> must be in the range\n"
" 0 to 7. Appending an 'r' to the number will cause that device\n"
" to be mounted read-only. For example ubd1r=./ext_fs. Appending\n"
" an 's' will cause data to be written to disk on the host immediately.\n"
" 'c' will cause the device to be treated as being shared between multiple\n"
" UMLs and file locking will be turned off - this is appropriate for a\n"
" cluster filesystem and inappropriate at almost all other times.\n\n"
);
static int udb_setup(char *str)
{
printk("udb%s specified on command line is almost certainly a ubd -> "
"udb TYPO\n", str);
return 1;
}
__setup("udb", udb_setup);
__uml_help(udb_setup,
"udb\n"
" This option is here solely to catch ubd -> udb typos, which can be\n"
" to impossible to catch visually unless you specifically look for\n"
" them. The only result of any option starting with 'udb' is an error\n"
" in the boot output.\n\n"
);
static void do_ubd_request(struct request_queue * q);
/* Only changed by ubd_init, which is an initcall. */
static int thread_fd = -1;
static LIST_HEAD(restart);
/* XXX - move this inside ubd_intr. */
/* Called without dev->lock held, and only in interrupt context. */
static void ubd_handler(void)
{
struct io_thread_req *req;
struct ubd *ubd;
struct list_head *list, *next_ele;
unsigned long flags;
int n;
while(1){
n = os_read_file(thread_fd, &req,
sizeof(struct io_thread_req *));
if(n != sizeof(req)){
if(n == -EAGAIN)
break;
printk(KERN_ERR "spurious interrupt in ubd_handler, "
"err = %d\n", -n);
return;
}
blk_end_request(req->req, 0, req->length);
kfree(req);
}
reactivate_fd(thread_fd, UBD_IRQ);
list_for_each_safe(list, next_ele, &restart){
ubd = container_of(list, struct ubd, restart);
list_del_init(&ubd->restart);
spin_lock_irqsave(&ubd->lock, flags);
do_ubd_request(ubd->queue);
spin_unlock_irqrestore(&ubd->lock, flags);
}
}
static irqreturn_t ubd_intr(int irq, void *dev)
{
ubd_handler();
return IRQ_HANDLED;
}
/* Only changed by ubd_init, which is an initcall. */
static int io_pid = -1;
static void kill_io_thread(void)
{
if(io_pid != -1)
os_kill_process(io_pid, 1);
}
__uml_exitcall(kill_io_thread);
static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out)
{
char *file;
int fd;
int err;
__u32 version;
__u32 align;
char *backing_file;
time_t mtime;
unsigned long long size;
int sector_size;
int bitmap_offset;
if (ubd_dev->file && ubd_dev->cow.file) {
file = ubd_dev->cow.file;
goto out;
}
fd = os_open_file(ubd_dev->file, of_read(OPENFLAGS()), 0);
if (fd < 0)
return fd;
err = read_cow_header(file_reader, &fd, &version, &backing_file, \
&mtime, &size, §or_size, &align, &bitmap_offset);
os_close_file(fd);
if(err == -EINVAL)
file = ubd_dev->file;
else
file = backing_file;
out:
return os_file_size(file, size_out);
}
static int read_cow_bitmap(int fd, void *buf, int offset, int len)
{
int err;
err = os_seek_file(fd, offset);
if (err < 0)
return err;
err = os_read_file(fd, buf, len);
if (err < 0)
return err;
return 0;
}
static int backing_file_mismatch(char *file, __u64 size, time_t mtime)
{
unsigned long modtime;
unsigned long long actual;
int err;
err = os_file_modtime(file, &modtime);
if (err < 0) {
printk(KERN_ERR "Failed to get modification time of backing "
"file \"%s\", err = %d\n", file, -err);
return err;
}
err = os_file_size(file, &actual);
if (err < 0) {
printk(KERN_ERR "Failed to get size of backing file \"%s\", "
"err = %d\n", file, -err);
return err;
}
if (actual != size) {
/*__u64 can be a long on AMD64 and with %lu GCC complains; so
* the typecast.*/
printk(KERN_ERR "Size mismatch (%llu vs %llu) of COW header "
"vs backing file\n", (unsigned long long) size, actual);
return -EINVAL;
}
if (modtime != mtime) {
printk(KERN_ERR "mtime mismatch (%ld vs %ld) of COW header vs "
"backing file\n", mtime, modtime);
return -EINVAL;
}
return 0;
}
static int path_requires_switch(char *from_cmdline, char *from_cow, char *cow)
{
struct uml_stat buf1, buf2;
int err;
if (from_cmdline == NULL)
return 0;
if (!strcmp(from_cmdline, from_cow))
return 0;
err = os_stat_file(from_cmdline, &buf1);
if (err < 0) {
printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cmdline,
-err);
return 0;
}
err = os_stat_file(from_cow, &buf2);
if (err < 0) {
printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cow,
-err);
return 1;
}
if ((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino))
return 0;
printk(KERN_ERR "Backing file mismatch - \"%s\" requested, "
"\"%s\" specified in COW header of \"%s\"\n",
from_cmdline, from_cow, cow);
return 1;
}
static int open_ubd_file(char *file, struct openflags *openflags, int shared,
char **backing_file_out, int *bitmap_offset_out,
unsigned long *bitmap_len_out, int *data_offset_out,
int *create_cow_out)
{
time_t mtime;
unsigned long long size;
__u32 version, align;
char *backing_file;
int fd, err, sectorsize, asked_switch, mode = 0644;
fd = os_open_file(file, *openflags, mode);
if (fd < 0) {
if ((fd == -ENOENT) && (create_cow_out != NULL))
*create_cow_out = 1;
if (!openflags->w ||
((fd != -EROFS) && (fd != -EACCES)))
return fd;
openflags->w = 0;
fd = os_open_file(file, *openflags, mode);
if (fd < 0)
return fd;
}
if (shared)
printk(KERN_INFO "Not locking \"%s\" on the host\n", file);
else {
err = os_lock_file(fd, openflags->w);
if (err < 0) {
printk(KERN_ERR "Failed to lock '%s', err = %d\n",
file, -err);
goto out_close;
}
}
/* Successful return case! */
if (backing_file_out == NULL)
return fd;
err = read_cow_header(file_reader, &fd, &version, &backing_file, &mtime,
&size, §orsize, &align, bitmap_offset_out);
if (err && (*backing_file_out != NULL)) {
printk(KERN_ERR "Failed to read COW header from COW file "
"\"%s\", errno = %d\n", file, -err);
goto out_close;
}
if (err)
return fd;
asked_switch = path_requires_switch(*backing_file_out, backing_file,
file);
/* Allow switching only if no mismatch. */
if (asked_switch && !backing_file_mismatch(*backing_file_out, size,
mtime)) {
printk(KERN_ERR "Switching backing file to '%s'\n",
*backing_file_out);
err = write_cow_header(file, fd, *backing_file_out,
sectorsize, align, &size);
if (err) {
printk(KERN_ERR "Switch failed, errno = %d\n", -err);
goto out_close;
}
} else {
*backing_file_out = backing_file;
err = backing_file_mismatch(*backing_file_out, size, mtime);
if (err)
goto out_close;
}
cow_sizes(version, size, sectorsize, align, *bitmap_offset_out,
bitmap_len_out, data_offset_out);
return fd;
out_close:
os_close_file(fd);
return err;
}
static int create_cow_file(char *cow_file, char *backing_file,
struct openflags flags,
int sectorsize, int alignment, int *bitmap_offset_out,
unsigned long *bitmap_len_out, int *data_offset_out)
{
int err, fd;
flags.c = 1;
fd = open_ubd_file(cow_file, &flags, 0, NULL, NULL, NULL, NULL, NULL);
if (fd < 0) {
err = fd;
printk(KERN_ERR "Open of COW file '%s' failed, errno = %d\n",
cow_file, -err);
goto out;
}
err = init_cow_file(fd, cow_file, backing_file, sectorsize, alignment,
bitmap_offset_out, bitmap_len_out,
data_offset_out);
if (!err)
return fd;
os_close_file(fd);
out:
return err;
}
static void ubd_close_dev(struct ubd *ubd_dev)
{
os_close_file(ubd_dev->fd);
if(ubd_dev->cow.file == NULL)
return;
os_close_file(ubd_dev->cow.fd);
vfree(ubd_dev->cow.bitmap);
ubd_dev->cow.bitmap = NULL;
}
static int ubd_open_dev(struct ubd *ubd_dev)
{
struct openflags flags;
char **back_ptr;
int err, create_cow, *create_ptr;
int fd;
ubd_dev->openflags = ubd_dev->boot_openflags;
create_cow = 0;
create_ptr = (ubd_dev->cow.file != NULL) ? &create_cow : NULL;
back_ptr = ubd_dev->no_cow ? NULL : &ubd_dev->cow.file;
fd = open_ubd_file(ubd_dev->file, &ubd_dev->openflags, ubd_dev->shared,
back_ptr, &ubd_dev->cow.bitmap_offset,
&ubd_dev->cow.bitmap_len, &ubd_dev->cow.data_offset,
create_ptr);
if((fd == -ENOENT) && create_cow){
fd = create_cow_file(ubd_dev->file, ubd_dev->cow.file,
ubd_dev->openflags, 1 << 9, PAGE_SIZE,
&ubd_dev->cow.bitmap_offset,
&ubd_dev->cow.bitmap_len,
&ubd_dev->cow.data_offset);
if(fd >= 0){
printk(KERN_INFO "Creating \"%s\" as COW file for "
"\"%s\"\n", ubd_dev->file, ubd_dev->cow.file);
}
}
if(fd < 0){
printk("Failed to open '%s', errno = %d\n", ubd_dev->file,
-fd);
return fd;
}
ubd_dev->fd = fd;
if(ubd_dev->cow.file != NULL){
blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long));
err = -ENOMEM;
ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len);
if(ubd_dev->cow.bitmap == NULL){
printk(KERN_ERR "Failed to vmalloc COW bitmap\n");
goto error;
}
flush_tlb_kernel_vm();
err = read_cow_bitmap(ubd_dev->fd, ubd_dev->cow.bitmap,
ubd_dev->cow.bitmap_offset,
ubd_dev->cow.bitmap_len);
if(err < 0)
goto error;
flags = ubd_dev->openflags;
flags.w = 0;
err = open_ubd_file(ubd_dev->cow.file, &flags, ubd_dev->shared, NULL,
NULL, NULL, NULL, NULL);
if(err < 0) goto error;
ubd_dev->cow.fd = err;
}
return 0;
error:
os_close_file(ubd_dev->fd);
return err;
}
static void ubd_device_release(struct device *dev)
{
struct ubd *ubd_dev = dev_get_drvdata(dev);
blk_cleanup_queue(ubd_dev->queue);
*ubd_dev = ((struct ubd) DEFAULT_UBD);
}
static int ubd_disk_register(int major, u64 size, int unit,
struct gendisk **disk_out)
{
struct gendisk *disk;
disk = alloc_disk(1 << UBD_SHIFT);
if(disk == NULL)
return -ENOMEM;
disk->major = major;
disk->first_minor = unit << UBD_SHIFT;
disk->fops = &ubd_blops;
set_capacity(disk, size / 512);
if (major == UBD_MAJOR)
sprintf(disk->disk_name, "ubd%c", 'a' + unit);
else
sprintf(disk->disk_name, "ubd_fake%d", unit);
/* sysfs register (not for ide fake devices) */
if (major == UBD_MAJOR) {
ubd_devs[unit].pdev.id = unit;
ubd_devs[unit].pdev.name = DRIVER_NAME;
ubd_devs[unit].pdev.dev.release = ubd_device_release;
dev_set_drvdata(&ubd_devs[unit].pdev.dev, &ubd_devs[unit]);
platform_device_register(&ubd_devs[unit].pdev);
disk->driverfs_dev = &ubd_devs[unit].pdev.dev;
}
disk->private_data = &ubd_devs[unit];
disk->queue = ubd_devs[unit].queue;
add_disk(disk);
*disk_out = disk;
return 0;
}
#define ROUND_BLOCK(n) ((n + ((1 << 9) - 1)) & (-1 << 9))
static int ubd_add(int n, char **error_out)
{
struct ubd *ubd_dev = &ubd_devs[n];
int err = 0;
if(ubd_dev->file == NULL)
goto out;
err = ubd_file_size(ubd_dev, &ubd_dev->size);
if(err < 0){
*error_out = "Couldn't determine size of device's file";
goto out;
}
ubd_dev->size = ROUND_BLOCK(ubd_dev->size);
INIT_LIST_HEAD(&ubd_dev->restart);
sg_init_table(ubd_dev->sg, MAX_SG);
err = -ENOMEM;
ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock);
if (ubd_dev->queue == NULL) {
*error_out = "Failed to initialize device queue";
goto out;
}
ubd_dev->queue->queuedata = ubd_dev;
blk_queue_flush(ubd_dev->queue, REQ_FLUSH);
blk_queue_max_segments(ubd_dev->queue, MAX_SG);
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
if(err){
*error_out = "Failed to register device";
goto out_cleanup;
}
if (fake_major != UBD_MAJOR)
ubd_disk_register(fake_major, ubd_dev->size, n,
&fake_gendisk[n]);
/*
* Perhaps this should also be under the "if (fake_major)" above
* using the fake_disk->disk_name
*/
if (fake_ide)
make_ide_entries(ubd_gendisk[n]->disk_name);
err = 0;
out:
return err;
out_cleanup:
blk_cleanup_queue(ubd_dev->queue);
goto out;
}
static int ubd_config(char *str, char **error_out)
{
int n, ret;
/* This string is possibly broken up and stored, so it's only
* freed if ubd_setup_common fails, or if only general options
* were set.
*/
str = kstrdup(str, GFP_KERNEL);
if (str == NULL) {
*error_out = "Failed to allocate memory";
return -ENOMEM;
}
ret = ubd_setup_common(str, &n, error_out);
if (ret)
goto err_free;
if (n == -1) {
ret = 0;
goto err_free;
}
mutex_lock(&ubd_lock);
ret = ubd_add(n, error_out);
if (ret)
ubd_devs[n].file = NULL;
mutex_unlock(&ubd_lock);
out:
return ret;
err_free:
kfree(str);
goto out;
}
static int ubd_get_config(char *name, char *str, int size, char **error_out)
{
struct ubd *ubd_dev;
int n, len = 0;
n = parse_unit(&name);
if((n >= MAX_DEV) || (n < 0)){
*error_out = "ubd_get_config : device number out of range";
return -1;
}
ubd_dev = &ubd_devs[n];
mutex_lock(&ubd_lock);
if(ubd_dev->file == NULL){
CONFIG_CHUNK(str, size, len, "", 1);
goto out;
}
CONFIG_CHUNK(str, size, len, ubd_dev->file, 0);
if(ubd_dev->cow.file != NULL){
CONFIG_CHUNK(str, size, len, ",", 0);
CONFIG_CHUNK(str, size, len, ubd_dev->cow.file, 1);
}
else CONFIG_CHUNK(str, size, len, "", 1);
out:
mutex_unlock(&ubd_lock);
return len;
}
static int ubd_id(char **str, int *start_out, int *end_out)
{
int n;
n = parse_unit(str);
*start_out = 0;
*end_out = MAX_DEV - 1;
return n;
}
static int ubd_remove(int n, char **error_out)
{
struct gendisk *disk = ubd_gendisk[n];
struct ubd *ubd_dev;
int err = -ENODEV;
mutex_lock(&ubd_lock);
ubd_dev = &ubd_devs[n];
if(ubd_dev->file == NULL)
goto out;
/* you cannot remove a open disk */
err = -EBUSY;
if(ubd_dev->count > 0)
goto out;
ubd_gendisk[n] = NULL;
if(disk != NULL){
del_gendisk(disk);
put_disk(disk);
}
if(fake_gendisk[n] != NULL){
del_gendisk(fake_gendisk[n]);
put_disk(fake_gendisk[n]);
fake_gendisk[n] = NULL;
}
err = 0;
platform_device_unregister(&ubd_dev->pdev);
out:
mutex_unlock(&ubd_lock);
return err;
}
/* All these are called by mconsole in process context and without
* ubd-specific locks. The structure itself is const except for .list.
*/
static struct mc_device ubd_mc = {
.list = LIST_HEAD_INIT(ubd_mc.list),
.name = "ubd",
.config = ubd_config,
.get_config = ubd_get_config,
.id = ubd_id,
.remove = ubd_remove,
};
static int __init ubd_mc_init(void)
{
mconsole_register_dev(&ubd_mc);
return 0;
}
__initcall(ubd_mc_init);
static int __init ubd0_init(void)
{
struct ubd *ubd_dev = &ubd_devs[0];
mutex_lock(&ubd_lock);
if(ubd_dev->file == NULL)
ubd_dev->file = "root_fs";
mutex_unlock(&ubd_lock);
return 0;
}
__initcall(ubd0_init);
/* Used in ubd_init, which is an initcall */
static struct platform_driver ubd_driver = {
.driver = {
.name = DRIVER_NAME,
},
};
static int __init ubd_init(void)
{
char *error;
int i, err;
if (register_blkdev(UBD_MAJOR, "ubd"))
return -1;
if (fake_major != UBD_MAJOR) {
char name[sizeof("ubd_nnn\0")];
snprintf(name, sizeof(name), "ubd_%d", fake_major);
if (register_blkdev(fake_major, "ubd"))
return -1;
}
platform_driver_register(&ubd_driver);
mutex_lock(&ubd_lock);
for (i = 0; i < MAX_DEV; i++){
err = ubd_add(i, &error);
if(err)
printk(KERN_ERR "Failed to initialize ubd device %d :"
"%s\n", i, error);
}
mutex_unlock(&ubd_lock);
return 0;
}
late_initcall(ubd_init);
static int __init ubd_driver_init(void){
unsigned long stack;
int err;
/* Set by CONFIG_BLK_DEV_UBD_SYNC or ubd=sync.*/
if(global_openflags.s){
printk(KERN_INFO "ubd: Synchronous mode\n");
/* Letting ubd=sync be like using ubd#s= instead of ubd#= is
* enough. So use anyway the io thread. */
}
stack = alloc_stack(0, 0);
io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *),
&thread_fd);
if(io_pid < 0){
printk(KERN_ERR
"ubd : Failed to start I/O thread (errno = %d) - "
"falling back to synchronous I/O\n", -io_pid);
io_pid = -1;
return 0;
}
err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr,
0, "ubd", ubd_devs);
if(err != 0)
printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err);
return 0;
}
device_initcall(ubd_driver_init);
static int ubd_open(struct block_device *bdev, fmode_t mode)
{
struct gendisk *disk = bdev->bd_disk;
struct ubd *ubd_dev = disk->private_data;
int err = 0;
mutex_lock(&ubd_mutex);
if(ubd_dev->count == 0){
err = ubd_open_dev(ubd_dev);
if(err){
printk(KERN_ERR "%s: Can't open \"%s\": errno = %d\n",
disk->disk_name, ubd_dev->file, -err);
goto out;
}
}
ubd_dev->count++;
set_disk_ro(disk, !ubd_dev->openflags.w);
/* This should no more be needed. And it didn't work anyway to exclude
* read-write remounting of filesystems.*/
/*if((mode & FMODE_WRITE) && !ubd_dev->openflags.w){
if(--ubd_dev->count == 0) ubd_close_dev(ubd_dev);
err = -EROFS;
}*/
out:
mutex_unlock(&ubd_mutex);
return err;
}
static void ubd_release(struct gendisk *disk, fmode_t mode)
{
struct ubd *ubd_dev = disk->private_data;
mutex_lock(&ubd_mutex);
if(--ubd_dev->count == 0)
ubd_close_dev(ubd_dev);
mutex_unlock(&ubd_mutex);
}
static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask,
__u64 *cow_offset, unsigned long *bitmap,
__u64 bitmap_offset, unsigned long *bitmap_words,
__u64 bitmap_len)
{
__u64 sector = io_offset >> 9;
int i, update_bitmap = 0;
for(i = 0; i < length >> 9; i++){
if(cow_mask != NULL)
ubd_set_bit(i, (unsigned char *) cow_mask);
if(ubd_test_bit(sector + i, (unsigned char *) bitmap))
continue;
update_bitmap = 1;
ubd_set_bit(sector + i, (unsigned char *) bitmap);
}
if(!update_bitmap)
return;
*cow_offset = sector / (sizeof(unsigned long) * 8);
/* This takes care of the case where we're exactly at the end of the
* device, and *cow_offset + 1 is off the end. So, just back it up
* by one word. Thanks to Lynn Kerby for the fix and James McMechan
* for the original diagnosis.
*/
if (*cow_offset == (DIV_ROUND_UP(bitmap_len,
sizeof(unsigned long)) - 1))
(*cow_offset)--;
bitmap_words[0] = bitmap[*cow_offset];
bitmap_words[1] = bitmap[*cow_offset + 1];
*cow_offset *= sizeof(unsigned long);
*cow_offset += bitmap_offset;
}
static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
__u64 bitmap_offset, __u64 bitmap_len)
{
__u64 sector = req->offset >> 9;
int i;
if(req->length > (sizeof(req->sector_mask) * 8) << 9)
panic("Operation too long");
if(req->op == UBD_READ) {
for(i = 0; i < req->length >> 9; i++){
if(ubd_test_bit(sector + i, (unsigned char *) bitmap))
ubd_set_bit(i, (unsigned char *)
&req->sector_mask);
}
}
else cowify_bitmap(req->offset, req->length, &req->sector_mask,
&req->cow_offset, bitmap, bitmap_offset,
req->bitmap_words, bitmap_len);
}
/* Called with dev->lock held */
static void prepare_request(struct request *req, struct io_thread_req *io_req,
unsigned long long offset, int page_offset,
int len, struct page *page)
{
struct gendisk *disk = req->rq_disk;
struct ubd *ubd_dev = disk->private_data;
io_req->req = req;
io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd :
ubd_dev->fd;
io_req->fds[1] = ubd_dev->fd;
io_req->cow_offset = -1;
io_req->offset = offset;
io_req->length = len;
io_req->error = 0;
io_req->sector_mask = 0;
io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE;
io_req->offsets[0] = 0;
io_req->offsets[1] = ubd_dev->cow.data_offset;
io_req->buffer = page_address(page) + page_offset;
io_req->sectorsize = 1 << 9;
if(ubd_dev->cow.file != NULL)
cowify_req(io_req, ubd_dev->cow.bitmap,
ubd_dev->cow.bitmap_offset, ubd_dev->cow.bitmap_len);
}
/* Called with dev->lock held */
static void prepare_flush_request(struct request *req,
struct io_thread_req *io_req)
{
struct gendisk *disk = req->rq_disk;
struct ubd *ubd_dev = disk->private_data;
io_req->req = req;
io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd :
ubd_dev->fd;
io_req->op = UBD_FLUSH;
}
static bool submit_request(struct io_thread_req *io_req, struct ubd *dev)
{
int n = os_write_file(thread_fd, &io_req,
sizeof(io_req));
if (n != sizeof(io_req)) {
if (n != -EAGAIN)
printk("write to io thread failed, "
"errno = %d\n", -n);
else if (list_empty(&dev->restart))
list_add(&dev->restart, &restart);
kfree(io_req);
return false;
}
return true;
}
/* Called with dev->lock held */
static void do_ubd_request(struct request_queue *q)
{
struct io_thread_req *io_req;
struct request *req;
while(1){
struct ubd *dev = q->queuedata;
if(dev->request == NULL){
struct request *req = blk_fetch_request(q);
if(req == NULL)
return;
dev->request = req;
dev->rq_pos = blk_rq_pos(req);
dev->start_sg = 0;
dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
}
req = dev->request;
if (req->cmd_flags & REQ_FLUSH) {
io_req = kmalloc(sizeof(struct io_thread_req),
GFP_ATOMIC);
if (io_req == NULL) {
if (list_empty(&dev->restart))
list_add(&dev->restart, &restart);
return;
}
prepare_flush_request(req, io_req);
if (submit_request(io_req, dev) == false)
return;
}
while(dev->start_sg < dev->end_sg){
struct scatterlist *sg = &dev->sg[dev->start_sg];
io_req = kmalloc(sizeof(struct io_thread_req),
GFP_ATOMIC);
if(io_req == NULL){
if(list_empty(&dev->restart))
list_add(&dev->restart, &restart);
return;
}
prepare_request(req, io_req,
(unsigned long long)dev->rq_pos << 9,
sg->offset, sg->length, sg_page(sg));
if (submit_request(io_req, dev) == false)
return;
dev->rq_pos += sg->length >> 9;
dev->start_sg++;
}
dev->end_sg = 0;
dev->request = NULL;
}
}
static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct ubd *ubd_dev = bdev->bd_disk->private_data;
geo->heads = 128;
geo->sectors = 32;
geo->cylinders = ubd_dev->size / (128 * 32 * 512);
return 0;
}
static int ubd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct ubd *ubd_dev = bdev->bd_disk->private_data;
u16 ubd_id[ATA_ID_WORDS];
switch (cmd) {
struct cdrom_volctrl volume;
case HDIO_GET_IDENTITY:
memset(&ubd_id, 0, ATA_ID_WORDS * 2);
ubd_id[ATA_ID_CYLS] = ubd_dev->size / (128 * 32 * 512);
ubd_id[ATA_ID_HEADS] = 128;
ubd_id[ATA_ID_SECTORS] = 32;
if(copy_to_user((char __user *) arg, (char *) &ubd_id,
sizeof(ubd_id)))
return -EFAULT;
return 0;
case CDROMVOLREAD:
if(copy_from_user(&volume, (char __user *) arg, sizeof(volume)))
return -EFAULT;
volume.channel0 = 255;
volume.channel1 = 255;
volume.channel2 = 255;
volume.channel3 = 255;
if(copy_to_user((char __user *) arg, &volume, sizeof(volume)))
return -EFAULT;
return 0;
}
return -EINVAL;
}
static int update_bitmap(struct io_thread_req *req)
{
int n;
if(req->cow_offset == -1)
return 0;
n = os_seek_file(req->fds[1], req->cow_offset);
if(n < 0){
printk("do_io - bitmap lseek failed : err = %d\n", -n);
return 1;
}
n = os_write_file(req->fds[1], &req->bitmap_words,
sizeof(req->bitmap_words));
if(n != sizeof(req->bitmap_words)){
printk("do_io - bitmap update failed, err = %d fd = %d\n", -n,
req->fds[1]);
return 1;
}
return 0;
}
static void do_io(struct io_thread_req *req)
{
char *buf;
unsigned long len;
int n, nsectors, start, end, bit;
int err;
__u64 off;
if (req->op == UBD_FLUSH) {
/* fds[0] is always either the rw image or our cow file */
n = os_sync_file(req->fds[0]);
if (n != 0) {
printk("do_io - sync failed err = %d "
"fd = %d\n", -n, req->fds[0]);
req->error = 1;
}
return;
}
nsectors = req->length / req->sectorsize;
start = 0;
do {
bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask);
end = start;
while((end < nsectors) &&
(ubd_test_bit(end, (unsigned char *)
&req->sector_mask) == bit))
end++;
off = req->offset + req->offsets[bit] +
start * req->sectorsize;
len = (end - start) * req->sectorsize;
buf = &req->buffer[start * req->sectorsize];
err = os_seek_file(req->fds[bit], off);
if(err < 0){
printk("do_io - lseek failed : err = %d\n", -err);
req->error = 1;
return;
}
if(req->op == UBD_READ){
n = 0;
do {
buf = &buf[n];
len -= n;
n = os_read_file(req->fds[bit], buf, len);
if (n < 0) {
printk("do_io - read failed, err = %d "
"fd = %d\n", -n, req->fds[bit]);
req->error = 1;
return;
}
} while((n < len) && (n != 0));
if (n < len) memset(&buf[n], 0, len - n);
} else {
n = os_write_file(req->fds[bit], buf, len);
if(n != len){
printk("do_io - write failed err = %d "
"fd = %d\n", -n, req->fds[bit]);
req->error = 1;
return;
}
}
start = end;
} while(start < nsectors);
req->error = update_bitmap(req);
}
/* Changed in start_io_thread, which is serialized by being called only
* from ubd_init, which is an initcall.
*/
int kernel_fd = -1;
/* Only changed by the io thread. XXX: currently unused. */
static int io_count = 0;
int io_thread(void *arg)
{
struct io_thread_req *req;
int n;
os_fix_helper_signals();
while(1){
n = os_read_file(kernel_fd, &req,
sizeof(struct io_thread_req *));
if(n != sizeof(struct io_thread_req *)){
if(n < 0)
printk("io_thread - read failed, fd = %d, "
"err = %d\n", kernel_fd, -n);
else {
printk("io_thread - short read, fd = %d, "
"length = %d\n", kernel_fd, n);
}
continue;
}
io_count++;
do_io(req);
n = os_write_file(kernel_fd, &req,
sizeof(struct io_thread_req *));
if(n != sizeof(struct io_thread_req *))
printk("io_thread - write failed, fd = %d, err = %d\n",
kernel_fd, -n);
}
return 0;
}
| gpl-2.0 |
faust93/kernel_meizu_m576 | drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c | 2222 | 7414 | /*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include <subdev/i2c.h>
struct anx9805_i2c_port {
struct nouveau_i2c_port base;
u32 addr;
u32 ctrl;
};
static int
anx9805_train(struct nouveau_i2c_port *port, int link_nr, int link_bw, bool enh)
{
struct anx9805_i2c_port *chan = (void *)port;
struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
u8 tmp, i;
nv_wri2cr(mast, chan->addr, 0xa0, link_bw);
nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
nv_wri2cr(mast, chan->addr, 0xa2, 0x01);
nv_wri2cr(mast, chan->addr, 0xa8, 0x01);
i = 0;
while ((tmp = nv_rdi2cr(mast, chan->addr, 0xa8)) & 0x01) {
mdelay(5);
if (i++ == 100) {
nv_error(port, "link training timed out\n");
return -ETIMEDOUT;
}
}
if (tmp & 0x70) {
nv_error(port, "link training failed: 0x%02x\n", tmp);
return -EIO;
}
return 1;
}
static int
anx9805_aux(struct nouveau_i2c_port *port, u8 type, u32 addr, u8 *data, u8 size)
{
struct anx9805_i2c_port *chan = (void *)port;
struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
int i, ret = -ETIMEDOUT;
u8 tmp;
tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04;
nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04);
nv_wri2cr(mast, chan->ctrl, 0x07, tmp);
nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
nv_wri2cr(mast, chan->addr, 0xe4, 0x80);
for (i = 0; !(type & 1) && i < size; i++)
nv_wri2cr(mast, chan->addr, 0xf0 + i, data[i]);
nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type);
nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >> 0);
nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >> 8);
nv_wri2cr(mast, chan->addr, 0xe8, (addr & 0xf0000) >> 16);
nv_wri2cr(mast, chan->addr, 0xe9, 0x01);
i = 0;
while ((tmp = nv_rdi2cr(mast, chan->addr, 0xe9)) & 0x01) {
mdelay(5);
if (i++ == 32)
goto done;
}
if ((tmp = nv_rdi2cr(mast, chan->ctrl, 0xf7)) & 0x01) {
ret = -EIO;
goto done;
}
for (i = 0; (type & 1) && i < size; i++)
data[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i);
ret = 0;
done:
nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
return ret;
}
static const struct nouveau_i2c_func
anx9805_aux_func = {
.aux = anx9805_aux,
.lnk_ctl = anx9805_train,
};
static int
anx9805_aux_chan_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 index,
struct nouveau_object **pobject)
{
struct nouveau_i2c_port *mast = (void *)parent;
struct anx9805_i2c_port *chan;
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
&nouveau_i2c_aux_algo, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
switch ((oclass->handle & 0xff00) >> 8) {
case 0x0d:
chan->addr = 0x38;
chan->ctrl = 0x39;
break;
case 0x0e:
chan->addr = 0x3c;
chan->ctrl = 0x3b;
break;
default:
BUG_ON(1);
}
if (mast->adapter.algo == &i2c_bit_algo) {
struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
algo->udelay = max(algo->udelay, 40);
}
chan->base.func = &anx9805_aux_func;
return 0;
}
static struct nouveau_ofuncs
anx9805_aux_ofuncs = {
.ctor = anx9805_aux_chan_ctor,
.dtor = _nouveau_i2c_port_dtor,
.init = _nouveau_i2c_port_init,
.fini = _nouveau_i2c_port_fini,
};
static int
anx9805_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
struct anx9805_i2c_port *port = adap->algo_data;
struct nouveau_i2c_port *mast = (void *)nv_object(port)->parent;
struct i2c_msg *msg = msgs;
int ret = -ETIMEDOUT;
int i, j, cnt = num;
u8 seg = 0x00, off = 0x00, tmp;
tmp = nv_rdi2cr(mast, port->ctrl, 0x07) & ~0x10;
nv_wri2cr(mast, port->ctrl, 0x07, tmp | 0x10);
nv_wri2cr(mast, port->ctrl, 0x07, tmp);
nv_wri2cr(mast, port->addr, 0x43, 0x05);
mdelay(5);
while (cnt--) {
if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) {
nv_wri2cr(mast, port->addr, 0x40, msg->addr << 1);
nv_wri2cr(mast, port->addr, 0x41, seg);
nv_wri2cr(mast, port->addr, 0x42, off);
nv_wri2cr(mast, port->addr, 0x44, msg->len);
nv_wri2cr(mast, port->addr, 0x45, 0x00);
nv_wri2cr(mast, port->addr, 0x43, 0x01);
for (i = 0; i < msg->len; i++) {
j = 0;
while (nv_rdi2cr(mast, port->addr, 0x46) & 0x10) {
mdelay(5);
if (j++ == 32)
goto done;
}
msg->buf[i] = nv_rdi2cr(mast, port->addr, 0x47);
}
} else
if (!(msg->flags & I2C_M_RD)) {
if (msg->addr == 0x50 && msg->len == 0x01) {
off = msg->buf[0];
} else
if (msg->addr == 0x30 && msg->len == 0x01) {
seg = msg->buf[0];
} else
goto done;
} else {
goto done;
}
msg++;
}
ret = num;
done:
nv_wri2cr(mast, port->addr, 0x43, 0x00);
return ret;
}
static u32
anx9805_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm
anx9805_i2c_algo = {
.master_xfer = anx9805_xfer,
.functionality = anx9805_func
};
static const struct nouveau_i2c_func
anx9805_i2c_func = {
};
static int
anx9805_ddc_port_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 index,
struct nouveau_object **pobject)
{
struct nouveau_i2c_port *mast = (void *)parent;
struct anx9805_i2c_port *port;
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
&anx9805_i2c_algo, &port);
*pobject = nv_object(port);
if (ret)
return ret;
switch ((oclass->handle & 0xff00) >> 8) {
case 0x0d:
port->addr = 0x3d;
port->ctrl = 0x39;
break;
case 0x0e:
port->addr = 0x3f;
port->ctrl = 0x3b;
break;
default:
BUG_ON(1);
}
if (mast->adapter.algo == &i2c_bit_algo) {
struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
algo->udelay = max(algo->udelay, 40);
}
port->base.func = &anx9805_i2c_func;
return 0;
}
static struct nouveau_ofuncs
anx9805_ddc_ofuncs = {
.ctor = anx9805_ddc_port_ctor,
.dtor = _nouveau_i2c_port_dtor,
.init = _nouveau_i2c_port_init,
.fini = _nouveau_i2c_port_fini,
};
struct nouveau_oclass
nouveau_anx9805_sclass[] = {
{ .handle = NV_I2C_TYPE_EXTDDC(0x0d), .ofuncs = &anx9805_ddc_ofuncs },
{ .handle = NV_I2C_TYPE_EXTAUX(0x0d), .ofuncs = &anx9805_aux_ofuncs },
{ .handle = NV_I2C_TYPE_EXTDDC(0x0e), .ofuncs = &anx9805_ddc_ofuncs },
{ .handle = NV_I2C_TYPE_EXTAUX(0x0e), .ofuncs = &anx9805_aux_ofuncs },
{}
};
| gpl-2.0 |
skeller/linux | sound/soc/codecs/wm8988.c | 2478 | 27373 | /*
* wm8988.c -- WM8988 ALSA SoC audio driver
*
* Copyright 2009 Wolfson Microelectronics plc
* Copyright 2005 Openedhand Ltd.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/tlv.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include "wm8988.h"
/*
* wm8988 register cache
* We can't read the WM8988 register space when we
* are using 2 wire for device control, so we cache them instead.
*/
static const struct reg_default wm8988_reg_defaults[] = {
{ 0, 0x0097 },
{ 1, 0x0097 },
{ 2, 0x0079 },
{ 3, 0x0079 },
{ 5, 0x0008 },
{ 7, 0x000a },
{ 8, 0x0000 },
{ 10, 0x00ff },
{ 11, 0x00ff },
{ 12, 0x000f },
{ 13, 0x000f },
{ 16, 0x0000 },
{ 17, 0x007b },
{ 18, 0x0000 },
{ 19, 0x0032 },
{ 20, 0x0000 },
{ 21, 0x00c3 },
{ 22, 0x00c3 },
{ 23, 0x00c0 },
{ 24, 0x0000 },
{ 25, 0x0000 },
{ 26, 0x0000 },
{ 27, 0x0000 },
{ 31, 0x0000 },
{ 32, 0x0000 },
{ 33, 0x0000 },
{ 34, 0x0050 },
{ 35, 0x0050 },
{ 36, 0x0050 },
{ 37, 0x0050 },
{ 40, 0x0079 },
{ 41, 0x0079 },
{ 42, 0x0079 },
};
static bool wm8988_writeable(struct device *dev, unsigned int reg)
{
switch (reg) {
case WM8988_LINVOL:
case WM8988_RINVOL:
case WM8988_LOUT1V:
case WM8988_ROUT1V:
case WM8988_ADCDAC:
case WM8988_IFACE:
case WM8988_SRATE:
case WM8988_LDAC:
case WM8988_RDAC:
case WM8988_BASS:
case WM8988_TREBLE:
case WM8988_RESET:
case WM8988_3D:
case WM8988_ALC1:
case WM8988_ALC2:
case WM8988_ALC3:
case WM8988_NGATE:
case WM8988_LADC:
case WM8988_RADC:
case WM8988_ADCTL1:
case WM8988_ADCTL2:
case WM8988_PWR1:
case WM8988_PWR2:
case WM8988_ADCTL3:
case WM8988_ADCIN:
case WM8988_LADCIN:
case WM8988_RADCIN:
case WM8988_LOUTM1:
case WM8988_LOUTM2:
case WM8988_ROUTM1:
case WM8988_ROUTM2:
case WM8988_LOUT2V:
case WM8988_ROUT2V:
case WM8988_LPPB:
return true;
default:
return false;
}
}
/* codec private data */
struct wm8988_priv {
struct regmap *regmap;
unsigned int sysclk;
struct snd_pcm_hw_constraint_list *sysclk_constraints;
};
#define wm8988_reset(c) snd_soc_write(c, WM8988_RESET, 0)
/*
* WM8988 Controls
*/
static const char *bass_boost_txt[] = {"Linear Control", "Adaptive Boost"};
static const struct soc_enum bass_boost =
SOC_ENUM_SINGLE(WM8988_BASS, 7, 2, bass_boost_txt);
static const char *bass_filter_txt[] = { "130Hz @ 48kHz", "200Hz @ 48kHz" };
static const struct soc_enum bass_filter =
SOC_ENUM_SINGLE(WM8988_BASS, 6, 2, bass_filter_txt);
static const char *treble_txt[] = {"8kHz", "4kHz"};
static const struct soc_enum treble =
SOC_ENUM_SINGLE(WM8988_TREBLE, 6, 2, treble_txt);
static const char *stereo_3d_lc_txt[] = {"200Hz", "500Hz"};
static const struct soc_enum stereo_3d_lc =
SOC_ENUM_SINGLE(WM8988_3D, 5, 2, stereo_3d_lc_txt);
static const char *stereo_3d_uc_txt[] = {"2.2kHz", "1.5kHz"};
static const struct soc_enum stereo_3d_uc =
SOC_ENUM_SINGLE(WM8988_3D, 6, 2, stereo_3d_uc_txt);
static const char *stereo_3d_func_txt[] = {"Capture", "Playback"};
static const struct soc_enum stereo_3d_func =
SOC_ENUM_SINGLE(WM8988_3D, 7, 2, stereo_3d_func_txt);
static const char *alc_func_txt[] = {"Off", "Right", "Left", "Stereo"};
static const struct soc_enum alc_func =
SOC_ENUM_SINGLE(WM8988_ALC1, 7, 4, alc_func_txt);
static const char *ng_type_txt[] = {"Constant PGA Gain",
"Mute ADC Output"};
static const struct soc_enum ng_type =
SOC_ENUM_SINGLE(WM8988_NGATE, 1, 2, ng_type_txt);
static const char *deemph_txt[] = {"None", "32Khz", "44.1Khz", "48Khz"};
static const struct soc_enum deemph =
SOC_ENUM_SINGLE(WM8988_ADCDAC, 1, 4, deemph_txt);
static const char *adcpol_txt[] = {"Normal", "L Invert", "R Invert",
"L + R Invert"};
static const struct soc_enum adcpol =
SOC_ENUM_SINGLE(WM8988_ADCDAC, 5, 4, adcpol_txt);
static const DECLARE_TLV_DB_SCALE(pga_tlv, -1725, 75, 0);
static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1);
static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
static const struct snd_kcontrol_new wm8988_snd_controls[] = {
SOC_ENUM("Bass Boost", bass_boost),
SOC_ENUM("Bass Filter", bass_filter),
SOC_SINGLE("Bass Volume", WM8988_BASS, 0, 15, 1),
SOC_SINGLE("Treble Volume", WM8988_TREBLE, 0, 15, 0),
SOC_ENUM("Treble Cut-off", treble),
SOC_SINGLE("3D Switch", WM8988_3D, 0, 1, 0),
SOC_SINGLE("3D Volume", WM8988_3D, 1, 15, 0),
SOC_ENUM("3D Lower Cut-off", stereo_3d_lc),
SOC_ENUM("3D Upper Cut-off", stereo_3d_uc),
SOC_ENUM("3D Mode", stereo_3d_func),
SOC_SINGLE("ALC Capture Target Volume", WM8988_ALC1, 0, 7, 0),
SOC_SINGLE("ALC Capture Max Volume", WM8988_ALC1, 4, 7, 0),
SOC_ENUM("ALC Capture Function", alc_func),
SOC_SINGLE("ALC Capture ZC Switch", WM8988_ALC2, 7, 1, 0),
SOC_SINGLE("ALC Capture Hold Time", WM8988_ALC2, 0, 15, 0),
SOC_SINGLE("ALC Capture Decay Time", WM8988_ALC3, 4, 15, 0),
SOC_SINGLE("ALC Capture Attack Time", WM8988_ALC3, 0, 15, 0),
SOC_SINGLE("ALC Capture NG Threshold", WM8988_NGATE, 3, 31, 0),
SOC_ENUM("ALC Capture NG Type", ng_type),
SOC_SINGLE("ALC Capture NG Switch", WM8988_NGATE, 0, 1, 0),
SOC_SINGLE("ZC Timeout Switch", WM8988_ADCTL1, 0, 1, 0),
SOC_DOUBLE_R_TLV("Capture Digital Volume", WM8988_LADC, WM8988_RADC,
0, 255, 0, adc_tlv),
SOC_DOUBLE_R_TLV("Capture Volume", WM8988_LINVOL, WM8988_RINVOL,
0, 63, 0, pga_tlv),
SOC_DOUBLE_R("Capture ZC Switch", WM8988_LINVOL, WM8988_RINVOL, 6, 1, 0),
SOC_DOUBLE_R("Capture Switch", WM8988_LINVOL, WM8988_RINVOL, 7, 1, 1),
SOC_ENUM("Playback De-emphasis", deemph),
SOC_ENUM("Capture Polarity", adcpol),
SOC_SINGLE("Playback 6dB Attenuate", WM8988_ADCDAC, 7, 1, 0),
SOC_SINGLE("Capture 6dB Attenuate", WM8988_ADCDAC, 8, 1, 0),
SOC_DOUBLE_R_TLV("PCM Volume", WM8988_LDAC, WM8988_RDAC, 0, 255, 0, dac_tlv),
SOC_SINGLE_TLV("Left Mixer Left Bypass Volume", WM8988_LOUTM1, 4, 7, 1,
bypass_tlv),
SOC_SINGLE_TLV("Left Mixer Right Bypass Volume", WM8988_LOUTM2, 4, 7, 1,
bypass_tlv),
SOC_SINGLE_TLV("Right Mixer Left Bypass Volume", WM8988_ROUTM1, 4, 7, 1,
bypass_tlv),
SOC_SINGLE_TLV("Right Mixer Right Bypass Volume", WM8988_ROUTM2, 4, 7, 1,
bypass_tlv),
SOC_DOUBLE_R("Output 1 Playback ZC Switch", WM8988_LOUT1V,
WM8988_ROUT1V, 7, 1, 0),
SOC_DOUBLE_R_TLV("Output 1 Playback Volume", WM8988_LOUT1V, WM8988_ROUT1V,
0, 127, 0, out_tlv),
SOC_DOUBLE_R("Output 2 Playback ZC Switch", WM8988_LOUT2V,
WM8988_ROUT2V, 7, 1, 0),
SOC_DOUBLE_R_TLV("Output 2 Playback Volume", WM8988_LOUT2V, WM8988_ROUT2V,
0, 127, 0, out_tlv),
};
/*
* DAPM Controls
*/
static int wm8988_lrc_control(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
u16 adctl2 = snd_soc_read(codec, WM8988_ADCTL2);
/* Use the DAC to gate LRC if active, otherwise use ADC */
if (snd_soc_read(codec, WM8988_PWR2) & 0x180)
adctl2 &= ~0x4;
else
adctl2 |= 0x4;
return snd_soc_write(codec, WM8988_ADCTL2, adctl2);
}
static const char *wm8988_line_texts[] = {
"Line 1", "Line 2", "PGA", "Differential"};
static const unsigned int wm8988_line_values[] = {
0, 1, 3, 4};
static const struct soc_enum wm8988_lline_enum =
SOC_VALUE_ENUM_SINGLE(WM8988_LOUTM1, 0, 7,
ARRAY_SIZE(wm8988_line_texts),
wm8988_line_texts,
wm8988_line_values);
static const struct snd_kcontrol_new wm8988_left_line_controls =
SOC_DAPM_VALUE_ENUM("Route", wm8988_lline_enum);
static const struct soc_enum wm8988_rline_enum =
SOC_VALUE_ENUM_SINGLE(WM8988_ROUTM1, 0, 7,
ARRAY_SIZE(wm8988_line_texts),
wm8988_line_texts,
wm8988_line_values);
static const struct snd_kcontrol_new wm8988_right_line_controls =
SOC_DAPM_VALUE_ENUM("Route", wm8988_lline_enum);
/* Left Mixer */
static const struct snd_kcontrol_new wm8988_left_mixer_controls[] = {
SOC_DAPM_SINGLE("Playback Switch", WM8988_LOUTM1, 8, 1, 0),
SOC_DAPM_SINGLE("Left Bypass Switch", WM8988_LOUTM1, 7, 1, 0),
SOC_DAPM_SINGLE("Right Playback Switch", WM8988_LOUTM2, 8, 1, 0),
SOC_DAPM_SINGLE("Right Bypass Switch", WM8988_LOUTM2, 7, 1, 0),
};
/* Right Mixer */
static const struct snd_kcontrol_new wm8988_right_mixer_controls[] = {
SOC_DAPM_SINGLE("Left Playback Switch", WM8988_ROUTM1, 8, 1, 0),
SOC_DAPM_SINGLE("Left Bypass Switch", WM8988_ROUTM1, 7, 1, 0),
SOC_DAPM_SINGLE("Playback Switch", WM8988_ROUTM2, 8, 1, 0),
SOC_DAPM_SINGLE("Right Bypass Switch", WM8988_ROUTM2, 7, 1, 0),
};
static const char *wm8988_pga_sel[] = {"Line 1", "Line 2", "Differential"};
static const unsigned int wm8988_pga_val[] = { 0, 1, 3 };
/* Left PGA Mux */
static const struct soc_enum wm8988_lpga_enum =
SOC_VALUE_ENUM_SINGLE(WM8988_LADCIN, 6, 3,
ARRAY_SIZE(wm8988_pga_sel),
wm8988_pga_sel,
wm8988_pga_val);
static const struct snd_kcontrol_new wm8988_left_pga_controls =
SOC_DAPM_VALUE_ENUM("Route", wm8988_lpga_enum);
/* Right PGA Mux */
static const struct soc_enum wm8988_rpga_enum =
SOC_VALUE_ENUM_SINGLE(WM8988_RADCIN, 6, 3,
ARRAY_SIZE(wm8988_pga_sel),
wm8988_pga_sel,
wm8988_pga_val);
static const struct snd_kcontrol_new wm8988_right_pga_controls =
SOC_DAPM_VALUE_ENUM("Route", wm8988_rpga_enum);
/* Differential Mux */
static const char *wm8988_diff_sel[] = {"Line 1", "Line 2"};
static const struct soc_enum diffmux =
SOC_ENUM_SINGLE(WM8988_ADCIN, 8, 2, wm8988_diff_sel);
static const struct snd_kcontrol_new wm8988_diffmux_controls =
SOC_DAPM_ENUM("Route", diffmux);
/* Mono ADC Mux */
static const char *wm8988_mono_mux[] = {"Stereo", "Mono (Left)",
"Mono (Right)", "Digital Mono"};
static const struct soc_enum monomux =
SOC_ENUM_SINGLE(WM8988_ADCIN, 6, 4, wm8988_mono_mux);
static const struct snd_kcontrol_new wm8988_monomux_controls =
SOC_DAPM_ENUM("Route", monomux);
static const struct snd_soc_dapm_widget wm8988_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("Mic Bias", WM8988_PWR1, 1, 0, NULL, 0),
SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0,
&wm8988_diffmux_controls),
SND_SOC_DAPM_MUX("Left ADC Mux", SND_SOC_NOPM, 0, 0,
&wm8988_monomux_controls),
SND_SOC_DAPM_MUX("Right ADC Mux", SND_SOC_NOPM, 0, 0,
&wm8988_monomux_controls),
SND_SOC_DAPM_MUX("Left PGA Mux", WM8988_PWR1, 5, 0,
&wm8988_left_pga_controls),
SND_SOC_DAPM_MUX("Right PGA Mux", WM8988_PWR1, 4, 0,
&wm8988_right_pga_controls),
SND_SOC_DAPM_MUX("Left Line Mux", SND_SOC_NOPM, 0, 0,
&wm8988_left_line_controls),
SND_SOC_DAPM_MUX("Right Line Mux", SND_SOC_NOPM, 0, 0,
&wm8988_right_line_controls),
SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8988_PWR1, 2, 0),
SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8988_PWR1, 3, 0),
SND_SOC_DAPM_DAC("Right DAC", "Right Playback", WM8988_PWR2, 7, 0),
SND_SOC_DAPM_DAC("Left DAC", "Left Playback", WM8988_PWR2, 8, 0),
SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0,
&wm8988_left_mixer_controls[0],
ARRAY_SIZE(wm8988_left_mixer_controls)),
SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0,
&wm8988_right_mixer_controls[0],
ARRAY_SIZE(wm8988_right_mixer_controls)),
SND_SOC_DAPM_PGA("Right Out 2", WM8988_PWR2, 3, 0, NULL, 0),
SND_SOC_DAPM_PGA("Left Out 2", WM8988_PWR2, 4, 0, NULL, 0),
SND_SOC_DAPM_PGA("Right Out 1", WM8988_PWR2, 5, 0, NULL, 0),
SND_SOC_DAPM_PGA("Left Out 1", WM8988_PWR2, 6, 0, NULL, 0),
SND_SOC_DAPM_POST("LRC control", wm8988_lrc_control),
SND_SOC_DAPM_OUTPUT("LOUT1"),
SND_SOC_DAPM_OUTPUT("ROUT1"),
SND_SOC_DAPM_OUTPUT("LOUT2"),
SND_SOC_DAPM_OUTPUT("ROUT2"),
SND_SOC_DAPM_OUTPUT("VREF"),
SND_SOC_DAPM_INPUT("LINPUT1"),
SND_SOC_DAPM_INPUT("LINPUT2"),
SND_SOC_DAPM_INPUT("RINPUT1"),
SND_SOC_DAPM_INPUT("RINPUT2"),
};
static const struct snd_soc_dapm_route wm8988_dapm_routes[] = {
{ "Left Line Mux", "Line 1", "LINPUT1" },
{ "Left Line Mux", "Line 2", "LINPUT2" },
{ "Left Line Mux", "PGA", "Left PGA Mux" },
{ "Left Line Mux", "Differential", "Differential Mux" },
{ "Right Line Mux", "Line 1", "RINPUT1" },
{ "Right Line Mux", "Line 2", "RINPUT2" },
{ "Right Line Mux", "PGA", "Right PGA Mux" },
{ "Right Line Mux", "Differential", "Differential Mux" },
{ "Left PGA Mux", "Line 1", "LINPUT1" },
{ "Left PGA Mux", "Line 2", "LINPUT2" },
{ "Left PGA Mux", "Differential", "Differential Mux" },
{ "Right PGA Mux", "Line 1", "RINPUT1" },
{ "Right PGA Mux", "Line 2", "RINPUT2" },
{ "Right PGA Mux", "Differential", "Differential Mux" },
{ "Differential Mux", "Line 1", "LINPUT1" },
{ "Differential Mux", "Line 1", "RINPUT1" },
{ "Differential Mux", "Line 2", "LINPUT2" },
{ "Differential Mux", "Line 2", "RINPUT2" },
{ "Left ADC Mux", "Stereo", "Left PGA Mux" },
{ "Left ADC Mux", "Mono (Left)", "Left PGA Mux" },
{ "Left ADC Mux", "Digital Mono", "Left PGA Mux" },
{ "Right ADC Mux", "Stereo", "Right PGA Mux" },
{ "Right ADC Mux", "Mono (Right)", "Right PGA Mux" },
{ "Right ADC Mux", "Digital Mono", "Right PGA Mux" },
{ "Left ADC", NULL, "Left ADC Mux" },
{ "Right ADC", NULL, "Right ADC Mux" },
{ "Left Line Mux", "Line 1", "LINPUT1" },
{ "Left Line Mux", "Line 2", "LINPUT2" },
{ "Left Line Mux", "PGA", "Left PGA Mux" },
{ "Left Line Mux", "Differential", "Differential Mux" },
{ "Right Line Mux", "Line 1", "RINPUT1" },
{ "Right Line Mux", "Line 2", "RINPUT2" },
{ "Right Line Mux", "PGA", "Right PGA Mux" },
{ "Right Line Mux", "Differential", "Differential Mux" },
{ "Left Mixer", "Playback Switch", "Left DAC" },
{ "Left Mixer", "Left Bypass Switch", "Left Line Mux" },
{ "Left Mixer", "Right Playback Switch", "Right DAC" },
{ "Left Mixer", "Right Bypass Switch", "Right Line Mux" },
{ "Right Mixer", "Left Playback Switch", "Left DAC" },
{ "Right Mixer", "Left Bypass Switch", "Left Line Mux" },
{ "Right Mixer", "Playback Switch", "Right DAC" },
{ "Right Mixer", "Right Bypass Switch", "Right Line Mux" },
{ "Left Out 1", NULL, "Left Mixer" },
{ "LOUT1", NULL, "Left Out 1" },
{ "Right Out 1", NULL, "Right Mixer" },
{ "ROUT1", NULL, "Right Out 1" },
{ "Left Out 2", NULL, "Left Mixer" },
{ "LOUT2", NULL, "Left Out 2" },
{ "Right Out 2", NULL, "Right Mixer" },
{ "ROUT2", NULL, "Right Out 2" },
};
struct _coeff_div {
u32 mclk;
u32 rate;
u16 fs;
u8 sr:5;
u8 usb:1;
};
/* codec hifi mclk clock divider coefficients */
static const struct _coeff_div coeff_div[] = {
/* 8k */
{12288000, 8000, 1536, 0x6, 0x0},
{11289600, 8000, 1408, 0x16, 0x0},
{18432000, 8000, 2304, 0x7, 0x0},
{16934400, 8000, 2112, 0x17, 0x0},
{12000000, 8000, 1500, 0x6, 0x1},
/* 11.025k */
{11289600, 11025, 1024, 0x18, 0x0},
{16934400, 11025, 1536, 0x19, 0x0},
{12000000, 11025, 1088, 0x19, 0x1},
/* 16k */
{12288000, 16000, 768, 0xa, 0x0},
{18432000, 16000, 1152, 0xb, 0x0},
{12000000, 16000, 750, 0xa, 0x1},
/* 22.05k */
{11289600, 22050, 512, 0x1a, 0x0},
{16934400, 22050, 768, 0x1b, 0x0},
{12000000, 22050, 544, 0x1b, 0x1},
/* 32k */
{12288000, 32000, 384, 0xc, 0x0},
{18432000, 32000, 576, 0xd, 0x0},
{12000000, 32000, 375, 0xa, 0x1},
/* 44.1k */
{11289600, 44100, 256, 0x10, 0x0},
{16934400, 44100, 384, 0x11, 0x0},
{12000000, 44100, 272, 0x11, 0x1},
/* 48k */
{12288000, 48000, 256, 0x0, 0x0},
{18432000, 48000, 384, 0x1, 0x0},
{12000000, 48000, 250, 0x0, 0x1},
/* 88.2k */
{11289600, 88200, 128, 0x1e, 0x0},
{16934400, 88200, 192, 0x1f, 0x0},
{12000000, 88200, 136, 0x1f, 0x1},
/* 96k */
{12288000, 96000, 128, 0xe, 0x0},
{18432000, 96000, 192, 0xf, 0x0},
{12000000, 96000, 125, 0xe, 0x1},
};
static inline int get_coeff(int mclk, int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
if (coeff_div[i].rate == rate && coeff_div[i].mclk == mclk)
return i;
}
return -EINVAL;
}
/* The set of rates we can generate from the above for each SYSCLK */
static unsigned int rates_12288[] = {
8000, 12000, 16000, 24000, 24000, 32000, 48000, 96000,
};
static struct snd_pcm_hw_constraint_list constraints_12288 = {
.count = ARRAY_SIZE(rates_12288),
.list = rates_12288,
};
static unsigned int rates_112896[] = {
8000, 11025, 22050, 44100,
};
static struct snd_pcm_hw_constraint_list constraints_112896 = {
.count = ARRAY_SIZE(rates_112896),
.list = rates_112896,
};
static unsigned int rates_12[] = {
8000, 11025, 12000, 16000, 22050, 2400, 32000, 41100, 48000,
48000, 88235, 96000,
};
static struct snd_pcm_hw_constraint_list constraints_12 = {
.count = ARRAY_SIZE(rates_12),
.list = rates_12,
};
/*
* Note that this should be called from init rather than from hw_params.
*/
static int wm8988_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec);
switch (freq) {
case 11289600:
case 18432000:
case 22579200:
case 36864000:
wm8988->sysclk_constraints = &constraints_112896;
wm8988->sysclk = freq;
return 0;
case 12288000:
case 16934400:
case 24576000:
case 33868800:
wm8988->sysclk_constraints = &constraints_12288;
wm8988->sysclk = freq;
return 0;
case 12000000:
case 24000000:
wm8988->sysclk_constraints = &constraints_12;
wm8988->sysclk = freq;
return 0;
}
return -EINVAL;
}
static int wm8988_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 iface = 0;
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
iface = 0x0040;
break;
case SND_SOC_DAIFMT_CBS_CFS:
break;
default:
return -EINVAL;
}
/* interface format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
iface |= 0x0002;
break;
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
iface |= 0x0001;
break;
case SND_SOC_DAIFMT_DSP_A:
iface |= 0x0003;
break;
case SND_SOC_DAIFMT_DSP_B:
iface |= 0x0013;
break;
default:
return -EINVAL;
}
/* clock inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
iface |= 0x0090;
break;
case SND_SOC_DAIFMT_IB_NF:
iface |= 0x0080;
break;
case SND_SOC_DAIFMT_NB_IF:
iface |= 0x0010;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, WM8988_IFACE, iface);
return 0;
}
static int wm8988_pcm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec);
/* The set of sample rates that can be supported depends on the
* MCLK supplied to the CODEC - enforce this.
*/
if (!wm8988->sysclk) {
dev_err(codec->dev,
"No MCLK configured, call set_sysclk() on init\n");
return -EINVAL;
}
snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
wm8988->sysclk_constraints);
return 0;
}
static int wm8988_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec);
u16 iface = snd_soc_read(codec, WM8988_IFACE) & 0x1f3;
u16 srate = snd_soc_read(codec, WM8988_SRATE) & 0x180;
int coeff;
coeff = get_coeff(wm8988->sysclk, params_rate(params));
if (coeff < 0) {
coeff = get_coeff(wm8988->sysclk / 2, params_rate(params));
srate |= 0x40;
}
if (coeff < 0) {
dev_err(codec->dev,
"Unable to configure sample rate %dHz with %dHz MCLK\n",
params_rate(params), wm8988->sysclk);
return coeff;
}
/* bit size */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
break;
case SNDRV_PCM_FORMAT_S20_3LE:
iface |= 0x0004;
break;
case SNDRV_PCM_FORMAT_S24_LE:
iface |= 0x0008;
break;
case SNDRV_PCM_FORMAT_S32_LE:
iface |= 0x000c;
break;
}
/* set iface & srate */
snd_soc_write(codec, WM8988_IFACE, iface);
if (coeff >= 0)
snd_soc_write(codec, WM8988_SRATE, srate |
(coeff_div[coeff].sr << 1) | coeff_div[coeff].usb);
return 0;
}
static int wm8988_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
u16 mute_reg = snd_soc_read(codec, WM8988_ADCDAC) & 0xfff7;
if (mute)
snd_soc_write(codec, WM8988_ADCDAC, mute_reg | 0x8);
else
snd_soc_write(codec, WM8988_ADCDAC, mute_reg);
return 0;
}
static int wm8988_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec);
u16 pwr_reg = snd_soc_read(codec, WM8988_PWR1) & ~0x1c1;
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
/* VREF, VMID=2x50k, digital enabled */
snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x00c0);
break;
case SND_SOC_BIAS_STANDBY:
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
regcache_sync(wm8988->regmap);
/* VREF, VMID=2x5k */
snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x1c1);
/* Charge caps */
msleep(100);
}
/* VREF, VMID=2*500k, digital stopped */
snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x0141);
break;
case SND_SOC_BIAS_OFF:
snd_soc_write(codec, WM8988_PWR1, 0x0000);
break;
}
codec->dapm.bias_level = level;
return 0;
}
#define WM8988_RATES SNDRV_PCM_RATE_8000_96000
#define WM8988_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE)
static const struct snd_soc_dai_ops wm8988_ops = {
.startup = wm8988_pcm_startup,
.hw_params = wm8988_pcm_hw_params,
.set_fmt = wm8988_set_dai_fmt,
.set_sysclk = wm8988_set_dai_sysclk,
.digital_mute = wm8988_mute,
};
static struct snd_soc_dai_driver wm8988_dai = {
.name = "wm8988-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 2,
.rates = WM8988_RATES,
.formats = WM8988_FORMATS,
},
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 2,
.rates = WM8988_RATES,
.formats = WM8988_FORMATS,
},
.ops = &wm8988_ops,
.symmetric_rates = 1,
};
static int wm8988_suspend(struct snd_soc_codec *codec)
{
struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec);
wm8988_set_bias_level(codec, SND_SOC_BIAS_OFF);
regcache_mark_dirty(wm8988->regmap);
return 0;
}
static int wm8988_resume(struct snd_soc_codec *codec)
{
wm8988_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
static int wm8988_probe(struct snd_soc_codec *codec)
{
struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
codec->control_data = wm8988->regmap;
ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_REGMAP);
if (ret < 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
}
ret = wm8988_reset(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset\n");
return ret;
}
/* set the update bits (we always update left then right) */
snd_soc_update_bits(codec, WM8988_RADC, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8988_RDAC, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8988_ROUT1V, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8988_ROUT2V, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8988_RINVOL, 0x0100, 0x0100);
wm8988_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
static int wm8988_remove(struct snd_soc_codec *codec)
{
wm8988_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8988 = {
.probe = wm8988_probe,
.remove = wm8988_remove,
.suspend = wm8988_suspend,
.resume = wm8988_resume,
.set_bias_level = wm8988_set_bias_level,
.controls = wm8988_snd_controls,
.num_controls = ARRAY_SIZE(wm8988_snd_controls),
.dapm_widgets = wm8988_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8988_dapm_widgets),
.dapm_routes = wm8988_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(wm8988_dapm_routes),
};
static struct regmap_config wm8988_regmap = {
.reg_bits = 7,
.val_bits = 9,
.max_register = WM8988_LPPB,
.writeable_reg = wm8988_writeable,
.cache_type = REGCACHE_RBTREE,
.reg_defaults = wm8988_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8988_reg_defaults),
};
#if defined(CONFIG_SPI_MASTER)
static int wm8988_spi_probe(struct spi_device *spi)
{
struct wm8988_priv *wm8988;
int ret;
wm8988 = devm_kzalloc(&spi->dev, sizeof(struct wm8988_priv),
GFP_KERNEL);
if (wm8988 == NULL)
return -ENOMEM;
wm8988->regmap = devm_regmap_init_spi(spi, &wm8988_regmap);
if (IS_ERR(wm8988->regmap)) {
ret = PTR_ERR(wm8988->regmap);
dev_err(&spi->dev, "Failed to init regmap: %d\n", ret);
return ret;
}
spi_set_drvdata(spi, wm8988);
ret = snd_soc_register_codec(&spi->dev,
&soc_codec_dev_wm8988, &wm8988_dai, 1);
return ret;
}
static int wm8988_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
}
static struct spi_driver wm8988_spi_driver = {
.driver = {
.name = "wm8988",
.owner = THIS_MODULE,
},
.probe = wm8988_spi_probe,
.remove = wm8988_spi_remove,
};
#endif /* CONFIG_SPI_MASTER */
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static int wm8988_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8988_priv *wm8988;
int ret;
wm8988 = devm_kzalloc(&i2c->dev, sizeof(struct wm8988_priv),
GFP_KERNEL);
if (wm8988 == NULL)
return -ENOMEM;
i2c_set_clientdata(i2c, wm8988);
wm8988->regmap = devm_regmap_init_i2c(i2c, &wm8988_regmap);
if (IS_ERR(wm8988->regmap)) {
ret = PTR_ERR(wm8988->regmap);
dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret);
return ret;
}
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8988, &wm8988_dai, 1);
return ret;
}
static int wm8988_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
}
static const struct i2c_device_id wm8988_i2c_id[] = {
{ "wm8988", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8988_i2c_id);
static struct i2c_driver wm8988_i2c_driver = {
.driver = {
.name = "wm8988",
.owner = THIS_MODULE,
},
.probe = wm8988_i2c_probe,
.remove = wm8988_i2c_remove,
.id_table = wm8988_i2c_id,
};
#endif
static int __init wm8988_modinit(void)
{
int ret = 0;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
ret = i2c_add_driver(&wm8988_i2c_driver);
if (ret != 0) {
printk(KERN_ERR "Failed to register WM8988 I2C driver: %d\n",
ret);
}
#endif
#if defined(CONFIG_SPI_MASTER)
ret = spi_register_driver(&wm8988_spi_driver);
if (ret != 0) {
printk(KERN_ERR "Failed to register WM8988 SPI driver: %d\n",
ret);
}
#endif
return ret;
}
module_init(wm8988_modinit);
static void __exit wm8988_exit(void)
{
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
i2c_del_driver(&wm8988_i2c_driver);
#endif
#if defined(CONFIG_SPI_MASTER)
spi_unregister_driver(&wm8988_spi_driver);
#endif
}
module_exit(wm8988_exit);
MODULE_DESCRIPTION("ASoC WM8988 driver");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
VM12/kernel_moto_shamu | arch/mips/loongson/common/serial.c | 2478 | 1929 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
*
* Copyright (C) 2009 Lemote, Inc.
* Author: Yan hua (yanhua@lemote.com)
* Author: Wu Zhangjin (wuzhangjin@gmail.com)
*/
#include <linux/io.h>
#include <linux/init.h>
#include <linux/serial_8250.h>
#include <asm/bootinfo.h>
#include <loongson.h>
#include <machine.h>
#define PORT(int) \
{ \
.irq = int, \
.uartclk = 1843200, \
.iotype = UPIO_PORT, \
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
.regshift = 0, \
}
#define PORT_M(int) \
{ \
.irq = MIPS_CPU_IRQ_BASE + (int), \
.uartclk = 3686400, \
.iotype = UPIO_MEM, \
.membase = (void __iomem *)NULL, \
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
.regshift = 0, \
}
static struct plat_serial8250_port uart8250_data[][2] = {
[MACH_LOONGSON_UNKNOWN] {},
[MACH_LEMOTE_FL2E] {PORT(4), {} },
[MACH_LEMOTE_FL2F] {PORT(3), {} },
[MACH_LEMOTE_ML2F7] {PORT_M(3), {} },
[MACH_LEMOTE_YL2F89] {PORT_M(3), {} },
[MACH_DEXXON_GDIUM2F10] {PORT_M(3), {} },
[MACH_LEMOTE_NAS] {PORT_M(3), {} },
[MACH_LEMOTE_LL2F] {PORT(3), {} },
[MACH_LOONGSON_END] {},
};
static struct platform_device uart8250_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
};
static int __init serial_init(void)
{
unsigned char iotype;
iotype = uart8250_data[mips_machtype][0].iotype;
if (UPIO_MEM == iotype)
uart8250_data[mips_machtype][0].membase =
(void __iomem *)_loongson_uart_base;
else if (UPIO_PORT == iotype)
uart8250_data[mips_machtype][0].iobase =
loongson_uart_base - LOONGSON_PCIIO_BASE;
uart8250_device.dev.platform_data = uart8250_data[mips_machtype];
return platform_device_register(&uart8250_device);
}
device_initcall(serial_init);
| gpl-2.0 |
brymaster5000/BrYCS_kernel | drivers/isdn/capi/capilib.c | 4270 | 4630 |
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/isdn/capilli.h>
#define DBG(format, arg...) do { \
printk(KERN_DEBUG "%s: " format "\n" , __func__ , ## arg); \
} while (0)
struct capilib_msgidqueue {
struct capilib_msgidqueue *next;
u16 msgid;
};
struct capilib_ncci {
struct list_head list;
u16 applid;
u32 ncci;
u32 winsize;
int nmsg;
struct capilib_msgidqueue *msgidqueue;
struct capilib_msgidqueue *msgidlast;
struct capilib_msgidqueue *msgidfree;
struct capilib_msgidqueue msgidpool[CAPI_MAXDATAWINDOW];
};
// ---------------------------------------------------------------------------
// NCCI Handling
static inline void mq_init(struct capilib_ncci * np)
{
u_int i;
np->msgidqueue = NULL;
np->msgidlast = NULL;
np->nmsg = 0;
memset(np->msgidpool, 0, sizeof(np->msgidpool));
np->msgidfree = &np->msgidpool[0];
for (i = 1; i < np->winsize; i++) {
np->msgidpool[i].next = np->msgidfree;
np->msgidfree = &np->msgidpool[i];
}
}
static inline int mq_enqueue(struct capilib_ncci * np, u16 msgid)
{
struct capilib_msgidqueue *mq;
if ((mq = np->msgidfree) == NULL)
return 0;
np->msgidfree = mq->next;
mq->msgid = msgid;
mq->next = NULL;
if (np->msgidlast)
np->msgidlast->next = mq;
np->msgidlast = mq;
if (!np->msgidqueue)
np->msgidqueue = mq;
np->nmsg++;
return 1;
}
static inline int mq_dequeue(struct capilib_ncci * np, u16 msgid)
{
struct capilib_msgidqueue **pp;
for (pp = &np->msgidqueue; *pp; pp = &(*pp)->next) {
if ((*pp)->msgid == msgid) {
struct capilib_msgidqueue *mq = *pp;
*pp = mq->next;
if (mq == np->msgidlast)
np->msgidlast = NULL;
mq->next = np->msgidfree;
np->msgidfree = mq;
np->nmsg--;
return 1;
}
}
return 0;
}
void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize)
{
struct capilib_ncci *np;
np = kmalloc(sizeof(*np), GFP_ATOMIC);
if (!np) {
printk(KERN_WARNING "capilib_new_ncci: no memory.\n");
return;
}
if (winsize > CAPI_MAXDATAWINDOW) {
printk(KERN_ERR "capi_new_ncci: winsize %d too big\n",
winsize);
winsize = CAPI_MAXDATAWINDOW;
}
np->applid = applid;
np->ncci = ncci;
np->winsize = winsize;
mq_init(np);
list_add_tail(&np->list, head);
DBG("kcapi: appl %d ncci 0x%x up", applid, ncci);
}
EXPORT_SYMBOL(capilib_new_ncci);
void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci)
{
struct list_head *l;
struct capilib_ncci *np;
list_for_each(l, head) {
np = list_entry(l, struct capilib_ncci, list);
if (np->applid != applid)
continue;
if (np->ncci != ncci)
continue;
printk(KERN_INFO "kcapi: appl %d ncci 0x%x down\n", applid, ncci);
list_del(&np->list);
kfree(np);
return;
}
printk(KERN_ERR "capilib_free_ncci: ncci 0x%x not found\n", ncci);
}
EXPORT_SYMBOL(capilib_free_ncci);
void capilib_release_appl(struct list_head *head, u16 applid)
{
struct list_head *l, *n;
struct capilib_ncci *np;
list_for_each_safe(l, n, head) {
np = list_entry(l, struct capilib_ncci, list);
if (np->applid != applid)
continue;
printk(KERN_INFO "kcapi: appl %d ncci 0x%x forced down\n", applid, np->ncci);
list_del(&np->list);
kfree(np);
}
}
EXPORT_SYMBOL(capilib_release_appl);
void capilib_release(struct list_head *head)
{
struct list_head *l, *n;
struct capilib_ncci *np;
list_for_each_safe(l, n, head) {
np = list_entry(l, struct capilib_ncci, list);
printk(KERN_INFO "kcapi: appl %d ncci 0x%x forced down\n", np->applid, np->ncci);
list_del(&np->list);
kfree(np);
}
}
EXPORT_SYMBOL(capilib_release);
u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid)
{
struct list_head *l;
struct capilib_ncci *np;
list_for_each(l, head) {
np = list_entry(l, struct capilib_ncci, list);
if (np->applid != applid)
continue;
if (np->ncci != ncci)
continue;
if (mq_enqueue(np, msgid) == 0)
return CAPI_SENDQUEUEFULL;
return CAPI_NOERROR;
}
printk(KERN_ERR "capilib_data_b3_req: ncci 0x%x not found\n", ncci);
return CAPI_NOERROR;
}
EXPORT_SYMBOL(capilib_data_b3_req);
void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid)
{
struct list_head *l;
struct capilib_ncci *np;
list_for_each(l, head) {
np = list_entry(l, struct capilib_ncci, list);
if (np->applid != applid)
continue;
if (np->ncci != ncci)
continue;
if (mq_dequeue(np, msgid) == 0) {
printk(KERN_ERR "kcapi: msgid %hu ncci 0x%x not on queue\n",
msgid, ncci);
}
return;
}
printk(KERN_ERR "capilib_data_b3_conf: ncci 0x%x not found\n", ncci);
}
EXPORT_SYMBOL(capilib_data_b3_conf);
| gpl-2.0 |
AnonymousMediatekTeam/android_kernel_aio_otfp | arch/sh/kernel/cpu/sh5/fpu.c | 4782 | 3595 | /*
* arch/sh/kernel/cpu/sh5/fpu.c
*
* Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
* Copyright (C) 2002 STMicroelectronics Limited
* Author : Stuart Menefy
*
* Started from SH4 version:
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sched.h>
#include <linux/signal.h>
#include <asm/processor.h>
void save_fpu(struct task_struct *tsk)
{
asm volatile("fst.p %0, (0*8), fp0\n\t"
"fst.p %0, (1*8), fp2\n\t"
"fst.p %0, (2*8), fp4\n\t"
"fst.p %0, (3*8), fp6\n\t"
"fst.p %0, (4*8), fp8\n\t"
"fst.p %0, (5*8), fp10\n\t"
"fst.p %0, (6*8), fp12\n\t"
"fst.p %0, (7*8), fp14\n\t"
"fst.p %0, (8*8), fp16\n\t"
"fst.p %0, (9*8), fp18\n\t"
"fst.p %0, (10*8), fp20\n\t"
"fst.p %0, (11*8), fp22\n\t"
"fst.p %0, (12*8), fp24\n\t"
"fst.p %0, (13*8), fp26\n\t"
"fst.p %0, (14*8), fp28\n\t"
"fst.p %0, (15*8), fp30\n\t"
"fst.p %0, (16*8), fp32\n\t"
"fst.p %0, (17*8), fp34\n\t"
"fst.p %0, (18*8), fp36\n\t"
"fst.p %0, (19*8), fp38\n\t"
"fst.p %0, (20*8), fp40\n\t"
"fst.p %0, (21*8), fp42\n\t"
"fst.p %0, (22*8), fp44\n\t"
"fst.p %0, (23*8), fp46\n\t"
"fst.p %0, (24*8), fp48\n\t"
"fst.p %0, (25*8), fp50\n\t"
"fst.p %0, (26*8), fp52\n\t"
"fst.p %0, (27*8), fp54\n\t"
"fst.p %0, (28*8), fp56\n\t"
"fst.p %0, (29*8), fp58\n\t"
"fst.p %0, (30*8), fp60\n\t"
"fst.p %0, (31*8), fp62\n\t"
"fgetscr fr63\n\t"
"fst.s %0, (32*8), fr63\n\t"
: /* no output */
: "r" (&tsk->thread.xstate->hardfpu)
: "memory");
}
void restore_fpu(struct task_struct *tsk)
{
asm volatile("fld.p %0, (0*8), fp0\n\t"
"fld.p %0, (1*8), fp2\n\t"
"fld.p %0, (2*8), fp4\n\t"
"fld.p %0, (3*8), fp6\n\t"
"fld.p %0, (4*8), fp8\n\t"
"fld.p %0, (5*8), fp10\n\t"
"fld.p %0, (6*8), fp12\n\t"
"fld.p %0, (7*8), fp14\n\t"
"fld.p %0, (8*8), fp16\n\t"
"fld.p %0, (9*8), fp18\n\t"
"fld.p %0, (10*8), fp20\n\t"
"fld.p %0, (11*8), fp22\n\t"
"fld.p %0, (12*8), fp24\n\t"
"fld.p %0, (13*8), fp26\n\t"
"fld.p %0, (14*8), fp28\n\t"
"fld.p %0, (15*8), fp30\n\t"
"fld.p %0, (16*8), fp32\n\t"
"fld.p %0, (17*8), fp34\n\t"
"fld.p %0, (18*8), fp36\n\t"
"fld.p %0, (19*8), fp38\n\t"
"fld.p %0, (20*8), fp40\n\t"
"fld.p %0, (21*8), fp42\n\t"
"fld.p %0, (22*8), fp44\n\t"
"fld.p %0, (23*8), fp46\n\t"
"fld.p %0, (24*8), fp48\n\t"
"fld.p %0, (25*8), fp50\n\t"
"fld.p %0, (26*8), fp52\n\t"
"fld.p %0, (27*8), fp54\n\t"
"fld.p %0, (28*8), fp56\n\t"
"fld.p %0, (29*8), fp58\n\t"
"fld.p %0, (30*8), fp60\n\t"
"fld.s %0, (32*8), fr63\n\t"
"fputscr fr63\n\t"
"fld.p %0, (31*8), fp62\n\t"
: /* no output */
: "r" (&tsk->thread.xstate->hardfpu)
: "memory");
}
asmlinkage void do_fpu_error(unsigned long ex, struct pt_regs *regs)
{
struct task_struct *tsk = current;
regs->pc += 4;
force_sig(SIGFPE, tsk);
}
| gpl-2.0 |
NoelMacwan/kernel_sony_msm8x27 | arch/arm/mach-shmobile/intc-r8a7740.c | 4782 | 19397 | /*
* R8A7740 processor support
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2011 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/sh_intc.h>
#include <mach/intc.h>
#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
/*
* INTCA
*/
enum {
UNUSED_INTCA = 0,
/* interrupt sources INTCA */
DIRC,
ATAPI,
IIC1_ALI, IIC1_TACKI, IIC1_WAITI, IIC1_DTEI,
AP_ARM_COMMTX, AP_ARM_COMMRX,
MFI, MFIS,
BBIF1, BBIF2,
USBHSDMAC,
USBF_OUL_SOF, USBF_IXL_INT,
SGX540,
CMT1_0, CMT1_1, CMT1_2, CMT1_3,
CMT2,
CMT3,
KEYSC,
SCIFA0, SCIFA1, SCIFA2, SCIFA3,
MSIOF2, MSIOF1,
SCIFA4, SCIFA5, SCIFB,
FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
SDHI0_0, SDHI0_1, SDHI0_2, SDHI0_3,
SDHI1_0, SDHI1_1, SDHI1_2, SDHI1_3,
AP_ARM_L2CINT,
IRDA,
TPU0,
SCIFA6, SCIFA7,
GbEther,
ICBS0,
DDM,
SDHI2_0, SDHI2_1, SDHI2_2, SDHI2_3,
RWDT0,
DMAC1_1_DEI0, DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3,
DMAC1_2_DEI4, DMAC1_2_DEI5, DMAC1_2_DADERR,
DMAC2_1_DEI0, DMAC2_1_DEI1, DMAC2_1_DEI2, DMAC2_1_DEI3,
DMAC2_2_DEI4, DMAC2_2_DEI5, DMAC2_2_DADERR,
DMAC3_1_DEI0, DMAC3_1_DEI1, DMAC3_1_DEI2, DMAC3_1_DEI3,
DMAC3_2_DEI4, DMAC3_2_DEI5, DMAC3_2_DADERR,
SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM,
USBH_INT, USBH_OHCI, USBH_EHCI, USBH_PME, USBH_BIND,
RSPI_OVRF, RSPI_SPTEF, RSPI_SPRF,
SPU2_0, SPU2_1,
FSI, FMSI,
IPMMU,
AP_ARM_CTIIRQ, AP_ARM_PMURQ,
MFIS2,
CPORTR2S,
CMT14, CMT15,
MMCIF_0, MMCIF_1, MMCIF_2,
SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI,
STPRO_0, STPRO_1, STPRO_2, STPRO_3, STPRO_4,
/* interrupt groups INTCA */
DMAC1_1, DMAC1_2,
DMAC2_1, DMAC2_2,
DMAC3_1, DMAC3_2,
AP_ARM1, AP_ARM2,
SDHI0, SDHI1, SDHI2,
SHWYSTAT,
USBF, USBH1, USBH2,
RSPI, SPU2, FLCTL, IIC1,
};
static struct intc_vect intca_vectors[] __initdata = {
INTC_VECT(DIRC, 0x0560),
INTC_VECT(ATAPI, 0x05E0),
INTC_VECT(IIC1_ALI, 0x0780),
INTC_VECT(IIC1_TACKI, 0x07A0),
INTC_VECT(IIC1_WAITI, 0x07C0),
INTC_VECT(IIC1_DTEI, 0x07E0),
INTC_VECT(AP_ARM_COMMTX, 0x0840),
INTC_VECT(AP_ARM_COMMRX, 0x0860),
INTC_VECT(MFI, 0x0900),
INTC_VECT(MFIS, 0x0920),
INTC_VECT(BBIF1, 0x0940),
INTC_VECT(BBIF2, 0x0960),
INTC_VECT(USBHSDMAC, 0x0A00),
INTC_VECT(USBF_OUL_SOF, 0x0A20),
INTC_VECT(USBF_IXL_INT, 0x0A40),
INTC_VECT(SGX540, 0x0A60),
INTC_VECT(CMT1_0, 0x0B00),
INTC_VECT(CMT1_1, 0x0B20),
INTC_VECT(CMT1_2, 0x0B40),
INTC_VECT(CMT1_3, 0x0B60),
INTC_VECT(CMT2, 0x0B80),
INTC_VECT(CMT3, 0x0BA0),
INTC_VECT(KEYSC, 0x0BE0),
INTC_VECT(SCIFA0, 0x0C00),
INTC_VECT(SCIFA1, 0x0C20),
INTC_VECT(SCIFA2, 0x0C40),
INTC_VECT(SCIFA3, 0x0C60),
INTC_VECT(MSIOF2, 0x0C80),
INTC_VECT(MSIOF1, 0x0D00),
INTC_VECT(SCIFA4, 0x0D20),
INTC_VECT(SCIFA5, 0x0D40),
INTC_VECT(SCIFB, 0x0D60),
INTC_VECT(FLCTL_FLSTEI, 0x0D80),
INTC_VECT(FLCTL_FLTENDI, 0x0DA0),
INTC_VECT(FLCTL_FLTREQ0I, 0x0DC0),
INTC_VECT(FLCTL_FLTREQ1I, 0x0DE0),
INTC_VECT(SDHI0_0, 0x0E00),
INTC_VECT(SDHI0_1, 0x0E20),
INTC_VECT(SDHI0_2, 0x0E40),
INTC_VECT(SDHI0_3, 0x0E60),
INTC_VECT(SDHI1_0, 0x0E80),
INTC_VECT(SDHI1_1, 0x0EA0),
INTC_VECT(SDHI1_2, 0x0EC0),
INTC_VECT(SDHI1_3, 0x0EE0),
INTC_VECT(AP_ARM_L2CINT, 0x0FA0),
INTC_VECT(IRDA, 0x0480),
INTC_VECT(TPU0, 0x04A0),
INTC_VECT(SCIFA6, 0x04C0),
INTC_VECT(SCIFA7, 0x04E0),
INTC_VECT(GbEther, 0x0500),
INTC_VECT(ICBS0, 0x0540),
INTC_VECT(DDM, 0x1140),
INTC_VECT(SDHI2_0, 0x1200),
INTC_VECT(SDHI2_1, 0x1220),
INTC_VECT(SDHI2_2, 0x1240),
INTC_VECT(SDHI2_3, 0x1260),
INTC_VECT(RWDT0, 0x1280),
INTC_VECT(DMAC1_1_DEI0, 0x2000),
INTC_VECT(DMAC1_1_DEI1, 0x2020),
INTC_VECT(DMAC1_1_DEI2, 0x2040),
INTC_VECT(DMAC1_1_DEI3, 0x2060),
INTC_VECT(DMAC1_2_DEI4, 0x2080),
INTC_VECT(DMAC1_2_DEI5, 0x20A0),
INTC_VECT(DMAC1_2_DADERR, 0x20C0),
INTC_VECT(DMAC2_1_DEI0, 0x2100),
INTC_VECT(DMAC2_1_DEI1, 0x2120),
INTC_VECT(DMAC2_1_DEI2, 0x2140),
INTC_VECT(DMAC2_1_DEI3, 0x2160),
INTC_VECT(DMAC2_2_DEI4, 0x2180),
INTC_VECT(DMAC2_2_DEI5, 0x21A0),
INTC_VECT(DMAC2_2_DADERR, 0x21C0),
INTC_VECT(DMAC3_1_DEI0, 0x2200),
INTC_VECT(DMAC3_1_DEI1, 0x2220),
INTC_VECT(DMAC3_1_DEI2, 0x2240),
INTC_VECT(DMAC3_1_DEI3, 0x2260),
INTC_VECT(DMAC3_2_DEI4, 0x2280),
INTC_VECT(DMAC3_2_DEI5, 0x22A0),
INTC_VECT(DMAC3_2_DADERR, 0x22C0),
INTC_VECT(SHWYSTAT_RT, 0x1300),
INTC_VECT(SHWYSTAT_HS, 0x1320),
INTC_VECT(SHWYSTAT_COM, 0x1340),
INTC_VECT(USBH_INT, 0x1540),
INTC_VECT(USBH_OHCI, 0x1560),
INTC_VECT(USBH_EHCI, 0x1580),
INTC_VECT(USBH_PME, 0x15A0),
INTC_VECT(USBH_BIND, 0x15C0),
INTC_VECT(RSPI_OVRF, 0x1780),
INTC_VECT(RSPI_SPTEF, 0x17A0),
INTC_VECT(RSPI_SPRF, 0x17C0),
INTC_VECT(SPU2_0, 0x1800),
INTC_VECT(SPU2_1, 0x1820),
INTC_VECT(FSI, 0x1840),
INTC_VECT(FMSI, 0x1860),
INTC_VECT(IPMMU, 0x1920),
INTC_VECT(AP_ARM_CTIIRQ, 0x1980),
INTC_VECT(AP_ARM_PMURQ, 0x19A0),
INTC_VECT(MFIS2, 0x1A00),
INTC_VECT(CPORTR2S, 0x1A20),
INTC_VECT(CMT14, 0x1A40),
INTC_VECT(CMT15, 0x1A60),
INTC_VECT(MMCIF_0, 0x1AA0),
INTC_VECT(MMCIF_1, 0x1AC0),
INTC_VECT(MMCIF_2, 0x1AE0),
INTC_VECT(SIM_ERI, 0x1C00),
INTC_VECT(SIM_RXI, 0x1C20),
INTC_VECT(SIM_TXI, 0x1C40),
INTC_VECT(SIM_TEI, 0x1C60),
INTC_VECT(STPRO_0, 0x1C80),
INTC_VECT(STPRO_1, 0x1CA0),
INTC_VECT(STPRO_2, 0x1CC0),
INTC_VECT(STPRO_3, 0x1CE0),
INTC_VECT(STPRO_4, 0x1D00),
};
static struct intc_group intca_groups[] __initdata = {
INTC_GROUP(DMAC1_1,
DMAC1_1_DEI0, DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3),
INTC_GROUP(DMAC1_2,
DMAC1_2_DEI4, DMAC1_2_DEI5, DMAC1_2_DADERR),
INTC_GROUP(DMAC2_1,
DMAC2_1_DEI0, DMAC2_1_DEI1, DMAC2_1_DEI2, DMAC2_1_DEI3),
INTC_GROUP(DMAC2_2,
DMAC2_2_DEI4, DMAC2_2_DEI5, DMAC2_2_DADERR),
INTC_GROUP(DMAC3_1,
DMAC3_1_DEI0, DMAC3_1_DEI1, DMAC3_1_DEI2, DMAC3_1_DEI3),
INTC_GROUP(DMAC3_2,
DMAC3_2_DEI4, DMAC3_2_DEI5, DMAC3_2_DADERR),
INTC_GROUP(AP_ARM1,
AP_ARM_COMMTX, AP_ARM_COMMRX),
INTC_GROUP(AP_ARM2,
AP_ARM_CTIIRQ, AP_ARM_PMURQ),
INTC_GROUP(USBF,
USBF_OUL_SOF, USBF_IXL_INT),
INTC_GROUP(SDHI0,
SDHI0_0, SDHI0_1, SDHI0_2, SDHI0_3),
INTC_GROUP(SDHI1,
SDHI1_0, SDHI1_1, SDHI1_2, SDHI1_3),
INTC_GROUP(SDHI2,
SDHI2_0, SDHI2_1, SDHI2_2, SDHI2_3),
INTC_GROUP(SHWYSTAT,
SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM),
INTC_GROUP(USBH1, /* FIXME */
USBH_INT, USBH_OHCI),
INTC_GROUP(USBH2, /* FIXME */
USBH_EHCI,
USBH_PME, USBH_BIND),
INTC_GROUP(RSPI,
RSPI_OVRF, RSPI_SPTEF, RSPI_SPRF),
INTC_GROUP(SPU2,
SPU2_0, SPU2_1),
INTC_GROUP(FLCTL,
FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
INTC_GROUP(IIC1,
IIC1_ALI, IIC1_TACKI, IIC1_WAITI, IIC1_DTEI),
};
static struct intc_mask_reg intca_mask_registers[] __initdata = {
{ /* IMR0A / IMCR0A */ 0xe6940080, 0xe69400c0, 8,
{ DMAC2_1_DEI3, DMAC2_1_DEI2, DMAC2_1_DEI1, DMAC2_1_DEI0,
0, 0, AP_ARM_COMMTX, AP_ARM_COMMRX } },
{ /* IMR1A / IMCR1A */ 0xe6940084, 0xe69400c4, 8,
{ ATAPI, 0, DIRC, 0,
DMAC1_1_DEI3, DMAC1_1_DEI2, DMAC1_1_DEI1, DMAC1_1_DEI0 } },
{ /* IMR2A / IMCR2A */ 0xe6940088, 0xe69400c8, 8,
{ 0, 0, 0, 0,
BBIF1, BBIF2, MFIS, MFI } },
{ /* IMR3A / IMCR3A */ 0xe694008c, 0xe69400cc, 8,
{ DMAC3_1_DEI3, DMAC3_1_DEI2, DMAC3_1_DEI1, DMAC3_1_DEI0,
DMAC3_2_DADERR, DMAC3_2_DEI5, DMAC3_2_DEI4, IRDA } },
{ /* IMR4A / IMCR4A */ 0xe6940090, 0xe69400d0, 8,
{ DDM, 0, 0, 0,
0, 0, 0, 0 } },
{ /* IMR5A / IMCR5A */ 0xe6940094, 0xe69400d4, 8,
{ KEYSC, DMAC1_2_DADERR, DMAC1_2_DEI5, DMAC1_2_DEI4,
SCIFA3, SCIFA2, SCIFA1, SCIFA0 } },
{ /* IMR6A / IMCR6A */ 0xe6940098, 0xe69400d8, 8,
{ SCIFB, SCIFA5, SCIFA4, MSIOF1,
0, 0, MSIOF2, 0 } },
{ /* IMR7A / IMCR7A */ 0xe694009c, 0xe69400dc, 8,
{ SDHI0_3, SDHI0_2, SDHI0_1, SDHI0_0,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
{ /* IMR8A / IMCR8A */ 0xe69400a0, 0xe69400e0, 8,
{ SDHI1_3, SDHI1_2, SDHI1_1, SDHI1_0,
0, USBHSDMAC, 0, AP_ARM_L2CINT } },
{ /* IMR9A / IMCR9A */ 0xe69400a4, 0xe69400e4, 8,
{ CMT1_3, CMT1_2, CMT1_1, CMT1_0,
CMT2, USBF_IXL_INT, USBF_OUL_SOF, SGX540 } },
{ /* IMR10A / IMCR10A */ 0xe69400a8, 0xe69400e8, 8,
{ 0, DMAC2_2_DADERR, DMAC2_2_DEI5, DMAC2_2_DEI4,
0, 0, 0, 0 } },
{ /* IMR11A / IMCR11A */ 0xe69400ac, 0xe69400ec, 8,
{ IIC1_DTEI, IIC1_WAITI, IIC1_TACKI, IIC1_ALI,
ICBS0, 0, 0, 0 } },
{ /* IMR12A / IMCR12A */ 0xe69400b0, 0xe69400f0, 8,
{ 0, 0, TPU0, SCIFA6,
SCIFA7, GbEther, 0, 0 } },
{ /* IMR13A / IMCR13A */ 0xe69400b4, 0xe69400f4, 8,
{ SDHI2_3, SDHI2_2, SDHI2_1, SDHI2_0,
0, CMT3, 0, RWDT0 } },
{ /* IMR0A3 / IMCR0A3 */ 0xe6950080, 0xe69500c0, 8,
{ SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, 0,
0, 0, 0, 0 } },
/* IMR1A3 / IMCR1A3 */
{ /* IMR2A3 / IMCR2A3 */ 0xe6950088, 0xe69500c8, 8,
{ 0, 0, USBH_INT, USBH_OHCI,
USBH_EHCI, USBH_PME, USBH_BIND, 0 } },
/* IMR3A3 / IMCR3A3 */
{ /* IMR4A3 / IMCR4A3 */ 0xe6950090, 0xe69500d0, 8,
{ 0, 0, 0, 0,
RSPI_OVRF, RSPI_SPTEF, RSPI_SPRF, 0 } },
{ /* IMR5A3 / IMCR5A3 */ 0xe6950094, 0xe69500d4, 8,
{ SPU2_0, SPU2_1, FSI, FMSI,
0, 0, 0, 0 } },
{ /* IMR6A3 / IMCR6A3 */ 0xe6950098, 0xe69500d8, 8,
{ 0, IPMMU, 0, 0,
AP_ARM_CTIIRQ, AP_ARM_PMURQ, 0, 0 } },
{ /* IMR7A3 / IMCR7A3 */ 0xe695009c, 0xe69500dc, 8,
{ MFIS2, CPORTR2S, CMT14, CMT15,
0, MMCIF_0, MMCIF_1, MMCIF_2 } },
/* IMR8A3 / IMCR8A3 */
{ /* IMR9A3 / IMCR9A3 */ 0xe69500a4, 0xe69500e4, 8,
{ SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI,
STPRO_0, STPRO_1, STPRO_2, STPRO_3 } },
{ /* IMR10A3 / IMCR10A3 */ 0xe69500a8, 0xe69500e8, 8,
{ STPRO_4, 0, 0, 0,
0, 0, 0, 0 } },
};
static struct intc_prio_reg intca_prio_registers[] __initdata = {
{ 0xe6940000, 0, 16, 4, /* IPRAA */ { DMAC3_1, DMAC3_2, CMT2, ICBS0 } },
{ 0xe6940004, 0, 16, 4, /* IPRBA */ { IRDA, 0, BBIF1, BBIF2 } },
{ 0xe6940008, 0, 16, 4, /* IPRCA */ { ATAPI, 0, CMT1_1, AP_ARM1 } },
{ 0xe694000c, 0, 16, 4, /* IPRDA */ { 0, 0, CMT1_2, 0 } },
{ 0xe6940010, 0, 16, 4, /* IPREA */ { DMAC1_1, MFIS, MFI, USBF } },
{ 0xe6940014, 0, 16, 4, /* IPRFA */ { KEYSC, DMAC1_2,
SGX540, CMT1_0 } },
{ 0xe6940018, 0, 16, 4, /* IPRGA */ { SCIFA0, SCIFA1,
SCIFA2, SCIFA3 } },
{ 0xe694001c, 0, 16, 4, /* IPRGH */ { MSIOF2, USBHSDMAC,
FLCTL, SDHI0 } },
{ 0xe6940020, 0, 16, 4, /* IPRIA */ { MSIOF1, SCIFA4, 0, IIC1 } },
{ 0xe6940024, 0, 16, 4, /* IPRJA */ { DMAC2_1, DMAC2_2,
AP_ARM_L2CINT, 0 } },
{ 0xe6940028, 0, 16, 4, /* IPRKA */ { 0, CMT1_3, 0, SDHI1 } },
{ 0xe694002c, 0, 16, 4, /* IPRLA */ { TPU0, SCIFA6,
SCIFA7, GbEther } },
{ 0xe6940030, 0, 16, 4, /* IPRMA */ { 0, CMT3, 0, RWDT0 } },
{ 0xe6940034, 0, 16, 4, /* IPRNA */ { SCIFB, SCIFA5, 0, DDM } },
{ 0xe6940038, 0, 16, 4, /* IPROA */ { 0, 0, DIRC, SDHI2 } },
{ 0xe6950000, 0, 16, 4, /* IPRAA3 */ { SHWYSTAT, 0, 0, 0 } },
/* IPRBA3 */
/* IPRCA3 */
/* IPRDA3 */
{ 0xe6950010, 0, 16, 4, /* IPREA3 */ { USBH1, 0, 0, 0 } },
{ 0xe6950014, 0, 16, 4, /* IPRFA3 */ { USBH2, 0, 0, 0 } },
/* IPRGA3 */
/* IPRHA3 */
/* IPRIA3 */
{ 0xe6950024, 0, 16, 4, /* IPRJA3 */ { RSPI, 0, 0, 0 } },
{ 0xe6950028, 0, 16, 4, /* IPRKA3 */ { SPU2, 0, FSI, FMSI } },
/* IPRLA3 */
{ 0xe6950030, 0, 16, 4, /* IPRMA3 */ { IPMMU, 0, 0, 0 } },
{ 0xe6950034, 0, 16, 4, /* IPRNA3 */ { AP_ARM2, 0, 0, 0 } },
{ 0xe6950038, 0, 16, 4, /* IPROA3 */ { MFIS2, CPORTR2S,
CMT14, CMT15 } },
{ 0xe695003c, 0, 16, 4, /* IPRPA3 */ { 0, MMCIF_0, MMCIF_1, MMCIF_2 } },
/* IPRQA3 */
/* IPRRA3 */
{ 0xe6950048, 0, 16, 4, /* IPRSA3 */ { SIM_ERI, SIM_RXI,
SIM_TXI, SIM_TEI } },
{ 0xe695004c, 0, 16, 4, /* IPRTA3 */ { STPRO_0, STPRO_1,
STPRO_2, STPRO_3 } },
{ 0xe6950050, 0, 16, 4, /* IPRUA3 */ { STPRO_4, 0, 0, 0 } },
};
static DECLARE_INTC_DESC(intca_desc, "r8a7740-intca",
intca_vectors, intca_groups,
intca_mask_registers, intca_prio_registers,
NULL);
INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000,
INTC_VECT, "r8a7740-intca-irq-pins");
/*
* INTCS
*/
enum {
UNUSED_INTCS = 0,
INTCS,
/* interrupt sources INTCS */
/* HUDI */
/* STPRO */
/* RTDMAC(1) */
VPU5HA2,
_2DG_TRAP, _2DG_GPM_INT, _2DG_CER_INT,
/* MFI */
/* BBIF2 */
VPU5F,
_2DG_BRK_INT,
/* SGX540 */
/* 2DDMAC */
/* IPMMU */
/* RTDMAC 2 */
/* KEYSC */
/* MSIOF */
IIC0_ALI, IIC0_TACKI, IIC0_WAITI, IIC0_DTEI,
TMU0_0, TMU0_1, TMU0_2,
CMT0,
/* CMT2 */
LMB,
CTI,
VOU,
/* RWDT0 */
ICB,
VIO6C,
CEU20, CEU21,
JPU,
LCDC0,
LCRC,
/* RTDMAC2(1) */
/* RTDMAC2(2) */
LCDC1,
/* SPU2 */
/* FSI */
/* FMSI */
TMU1_0, TMU1_1, TMU1_2,
CMT4,
DISP,
DSRV,
/* MFIS2 */
CPORTS2R,
/* interrupt groups INTCS */
_2DG1,
IIC0, TMU1,
};
static struct intc_vect intcs_vectors[] = {
/* HUDI */
/* STPRO */
/* RTDMAC(1) */
INTCS_VECT(VPU5HA2, 0x0880),
INTCS_VECT(_2DG_TRAP, 0x08A0),
INTCS_VECT(_2DG_GPM_INT, 0x08C0),
INTCS_VECT(_2DG_CER_INT, 0x08E0),
/* MFI */
/* BBIF2 */
INTCS_VECT(VPU5F, 0x0980),
INTCS_VECT(_2DG_BRK_INT, 0x09A0),
/* SGX540 */
/* 2DDMAC */
/* IPMMU */
/* RTDMAC(2) */
/* KEYSC */
/* MSIOF */
INTCS_VECT(IIC0_ALI, 0x0E00),
INTCS_VECT(IIC0_TACKI, 0x0E20),
INTCS_VECT(IIC0_WAITI, 0x0E40),
INTCS_VECT(IIC0_DTEI, 0x0E60),
INTCS_VECT(TMU0_0, 0x0E80),
INTCS_VECT(TMU0_1, 0x0EA0),
INTCS_VECT(TMU0_2, 0x0EC0),
INTCS_VECT(CMT0, 0x0F00),
/* CMT2 */
INTCS_VECT(LMB, 0x0F60),
INTCS_VECT(CTI, 0x0400),
INTCS_VECT(VOU, 0x0420),
/* RWDT0 */
INTCS_VECT(ICB, 0x0480),
INTCS_VECT(VIO6C, 0x04E0),
INTCS_VECT(CEU20, 0x0500),
INTCS_VECT(CEU21, 0x0520),
INTCS_VECT(JPU, 0x0560),
INTCS_VECT(LCDC0, 0x0580),
INTCS_VECT(LCRC, 0x05A0),
/* RTDMAC2(1) */
/* RTDMAC2(2) */
INTCS_VECT(LCDC1, 0x1780),
/* SPU2 */
/* FSI */
/* FMSI */
INTCS_VECT(TMU1_0, 0x1900),
INTCS_VECT(TMU1_1, 0x1920),
INTCS_VECT(TMU1_2, 0x1940),
INTCS_VECT(CMT4, 0x1980),
INTCS_VECT(DISP, 0x19A0),
INTCS_VECT(DSRV, 0x19C0),
/* MFIS2 */
INTCS_VECT(CPORTS2R, 0x1A20),
INTC_VECT(INTCS, 0xf80),
};
static struct intc_group intcs_groups[] __initdata = {
INTC_GROUP(_2DG1, /*FIXME*/
_2DG_CER_INT, _2DG_GPM_INT, _2DG_TRAP),
INTC_GROUP(IIC0,
IIC0_DTEI, IIC0_WAITI, IIC0_TACKI, IIC0_ALI),
INTC_GROUP(TMU1,
TMU1_0, TMU1_1, TMU1_2),
};
static struct intc_mask_reg intcs_mask_registers[] = {
/* IMR0SA / IMCR0SA */ /* all 0 */
{ /* IMR1SA / IMCR1SA */ 0xffd20184, 0xffd201c4, 8,
{ _2DG_CER_INT, _2DG_GPM_INT, _2DG_TRAP, VPU5HA2,
0, 0, 0, 0 /*STPRO*/ } },
{ /* IMR2SA / IMCR2SA */ 0xffd20188, 0xffd201c8, 8,
{ 0/*STPRO*/, 0, CEU21, VPU5F,
0/*BBIF2*/, 0, 0, 0/*MFI*/ } },
{ /* IMR3SA / IMCR3SA */ 0xffd2018c, 0xffd201cc, 8,
{ 0, 0, 0, 0, /*2DDMAC*/
VIO6C, 0, 0, ICB } },
{ /* IMR4SA / IMCR4SA */ 0xffd20190, 0xffd201d0, 8,
{ 0, 0, VOU, CTI,
JPU, 0, LCRC, LCDC0 } },
/* IMR5SA / IMCR5SA */ /*KEYSC/RTDMAC2/RTDMAC1*/
/* IMR6SA / IMCR6SA */ /*MSIOF/SGX540*/
{ /* IMR7SA / IMCR7SA */ 0xffd2019c, 0xffd201dc, 8,
{ 0, TMU0_2, TMU0_1, TMU0_0,
0, 0, 0, 0 } },
{ /* IMR8SA / IMCR8SA */ 0xffd201a0, 0xffd201e0, 8,
{ 0, 0, 0, 0,
CEU20, 0, 0, 0 } },
{ /* IMR9SA / IMCR9SA */ 0xffd201a4, 0xffd201e4, 8,
{ 0, 0/*RWDT0*/, 0/*CMT2*/, CMT0,
0, 0, 0, 0 } },
/* IMR10SA / IMCR10SA */ /*IPMMU*/
{ /* IMR11SA / IMCR11SA */ 0xffd201ac, 0xffd201ec, 8,
{ IIC0_DTEI, IIC0_WAITI, IIC0_TACKI, IIC0_ALI,
0, _2DG_BRK_INT, LMB, 0 } },
/* IMR12SA / IMCR12SA */
/* IMR13SA / IMCR13SA */
/* IMR0SA3 / IMCR0SA3 */ /*RTDMAC2(1)/RTDMAC2(2)*/
/* IMR1SA3 / IMCR1SA3 */
/* IMR2SA3 / IMCR2SA3 */
/* IMR3SA3 / IMCR3SA3 */
{ /* IMR4SA3 / IMCR4SA3 */ 0xffd50190, 0xffd501d0, 8,
{ 0, 0, 0, 0,
LCDC1, 0, 0, 0 } },
/* IMR5SA3 / IMCR5SA3 */ /* SPU2/FSI/FMSI */
{ /* IMR6SA3 / IMCR6SA3 */ 0xffd50198, 0xffd501d8, 8,
{ TMU1_0, TMU1_1, TMU1_2, 0,
CMT4, DISP, DSRV, 0 } },
{ /* IMR7SA3 / IMCR7SA3 */ 0xffd5019c, 0xffd501dc, 8,
{ 0/*MFIS2*/, CPORTS2R, 0, 0,
0, 0, 0, 0 } },
{ /* INTAMASK */ 0xffd20104, 0, 16,
{ 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, INTCS } },
};
/* Priority is needed for INTCA to receive the INTCS interrupt */
static struct intc_prio_reg intcs_prio_registers[] = {
{ 0xffd20000, 0, 16, 4, /* IPRAS */ { CTI, VOU, 0/*2DDMAC*/, ICB } },
{ 0xffd20004, 0, 16, 4, /* IPRBS */ { JPU, LCDC0, 0, LCRC } },
/* IPRCS */ /*BBIF2*/
/* IPRDS */
{ 0xffd20010, 0, 16, 4, /* IPRES */ { 0/*RTDMAC(1)*/, VPU5HA2,
0/*MFI*/, VPU5F } },
{ 0xffd20014, 0, 16, 4, /* IPRFS */ { 0/*KEYSC*/, 0/*RTDMAC(2)*/,
0/*CMT2*/, CMT0 } },
{ 0xffd20018, 0, 16, 4, /* IPRGS */ { TMU0_0, TMU0_1,
TMU0_2, _2DG1 } },
{ 0xffd2001c, 0, 16, 4, /* IPRHS */ { 0, 0/*STPRO*/, 0/*STPRO*/,
_2DG_BRK_INT/*FIXME*/ } },
{ 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, 0/*MSIOF*/, 0, IIC0 } },
{ 0xffd20024, 0, 16, 4, /* IPRJS */ { CEU20, 0/*SGX540*/, 0, 0 } },
{ 0xffd20028, 0, 16, 4, /* IPRKS */ { VIO6C, 0, LMB, 0 } },
{ 0xffd2002c, 0, 16, 4, /* IPRLS */ { 0/*IPMMU*/, 0, CEU21, 0 } },
/* IPRMS */ /*RWDT0*/
/* IPRAS3 */ /*RTDMAC2(1)*/
/* IPRBS3 */ /*RTDMAC2(2)*/
/* IPRCS3 */
/* IPRDS3 */
/* IPRES3 */
/* IPRFS3 */
/* IPRGS3 */
/* IPRHS3 */
/* IPRIS3 */
{ 0xffd50024, 0, 16, 4, /* IPRJS3 */ { LCDC1, 0, 0, 0 } },
/* IPRKS3 */ /*SPU2/FSI/FMSi*/
/* IPRLS3 */
{ 0xffd50030, 0, 16, 4, /* IPRMS3 */ { TMU1, 0, 0, 0 } },
{ 0xffd50034, 0, 16, 4, /* IPRNS3 */ { CMT4, DISP, DSRV, 0 } },
{ 0xffd50038, 0, 16, 4, /* IPROS3 */ { 0/*MFIS2*/, CPORTS2R, 0, 0 } },
/* IPRPS3 */
};
static struct resource intcs_resources[] __initdata = {
[0] = {
.start = 0xffd20000,
.end = 0xffd201ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 0xffd50000,
.end = 0xffd501ff,
.flags = IORESOURCE_MEM,
}
};
static struct intc_desc intcs_desc __initdata = {
.name = "r8a7740-intcs",
.resource = intcs_resources,
.num_resources = ARRAY_SIZE(intcs_resources),
.hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers,
intcs_prio_registers, NULL, NULL),
};
static void intcs_demux(unsigned int irq, struct irq_desc *desc)
{
void __iomem *reg = (void *)irq_get_handler_data(irq);
unsigned int evtcodeas = ioread32(reg);
generic_handle_irq(intcs_evt2irq(evtcodeas));
}
void __init r8a7740_init_irq(void)
{
void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
register_intc_controller(&intca_desc);
register_intc_controller(&intca_irq_pins_desc);
register_intc_controller(&intcs_desc);
/* demux using INTEVTSA */
irq_set_handler_data(evt2irq(0xf80), (void *)intevtsa);
irq_set_chained_handler(evt2irq(0xf80), intcs_demux);
}
| gpl-2.0 |
oliliango/linux-cedarview_gfx | arch/sh/boards/mach-hp6xx/setup.c | 4782 | 3936 | /*
* linux/arch/sh/boards/hp6xx/setup.c
*
* Copyright (C) 2002 Andriy Skulysh
* Copyright (C) 2007 Kristoffer Ericson <Kristoffer_e1@hotmail.com>
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* Setup code for HP620/HP660/HP680/HP690 (internal peripherials only)
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/sh_intc.h>
#include <sound/sh_dac_audio.h>
#include <asm/hd64461.h>
#include <asm/io.h>
#include <mach/hp6xx.h>
#include <cpu/dac.h>
#define SCPCR 0xa4000116
#define SCPDR 0xa4000136
/* CF Slot */
static struct resource cf_ide_resources[] = {
[0] = {
.start = 0x15000000 + 0x1f0,
.end = 0x15000000 + 0x1f0 + 0x08 - 0x01,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 0x15000000 + 0x1fe,
.end = 0x15000000 + 0x1fe + 0x01,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = evt2irq(0xba0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device cf_ide_device = {
.name = "pata_platform",
.id = -1,
.num_resources = ARRAY_SIZE(cf_ide_resources),
.resource = cf_ide_resources,
};
static struct platform_device jornadakbd_device = {
.name = "jornada680_kbd",
.id = -1,
};
static void dac_audio_start(struct dac_audio_pdata *pdata)
{
u16 v;
u8 v8;
/* HP Jornada 680/690 speaker on */
v = inw(HD64461_GPADR);
v &= ~HD64461_GPADR_SPEAKER;
outw(v, HD64461_GPADR);
/* HP Palmtop 620lx/660lx speaker on */
v8 = inb(PKDR);
v8 &= ~PKDR_SPEAKER;
outb(v8, PKDR);
sh_dac_enable(pdata->channel);
}
static void dac_audio_stop(struct dac_audio_pdata *pdata)
{
u16 v;
u8 v8;
/* HP Jornada 680/690 speaker off */
v = inw(HD64461_GPADR);
v |= HD64461_GPADR_SPEAKER;
outw(v, HD64461_GPADR);
/* HP Palmtop 620lx/660lx speaker off */
v8 = inb(PKDR);
v8 |= PKDR_SPEAKER;
outb(v8, PKDR);
sh_dac_output(0, pdata->channel);
sh_dac_disable(pdata->channel);
}
static struct dac_audio_pdata dac_audio_platform_data = {
.buffer_size = 64000,
.channel = 1,
.start = dac_audio_start,
.stop = dac_audio_stop,
};
static struct platform_device dac_audio_device = {
.name = "dac_audio",
.id = -1,
.dev = {
.platform_data = &dac_audio_platform_data,
}
};
static struct platform_device *hp6xx_devices[] __initdata = {
&cf_ide_device,
&jornadakbd_device,
&dac_audio_device,
};
static void __init hp6xx_init_irq(void)
{
/* Gets touchscreen and powerbutton IRQ working */
plat_irq_setup_pins(IRQ_MODE_IRQ);
}
static int __init hp6xx_devices_setup(void)
{
return platform_add_devices(hp6xx_devices, ARRAY_SIZE(hp6xx_devices));
}
static void __init hp6xx_setup(char **cmdline_p)
{
u8 v8;
u16 v;
v = inw(HD64461_STBCR);
v |= HD64461_STBCR_SURTST | HD64461_STBCR_SIRST |
HD64461_STBCR_STM1ST | HD64461_STBCR_STM0ST |
HD64461_STBCR_SAFEST | HD64461_STBCR_SPC0ST |
HD64461_STBCR_SMIAST | HD64461_STBCR_SAFECKE_OST|
HD64461_STBCR_SAFECKE_IST;
#ifndef CONFIG_HD64461_ENABLER
v |= HD64461_STBCR_SPC1ST;
#endif
outw(v, HD64461_STBCR);
v = inw(HD64461_GPADR);
v |= HD64461_GPADR_SPEAKER | HD64461_GPADR_PCMCIA0;
outw(v, HD64461_GPADR);
outw(HD64461_PCCGCR_VCC0 | HD64461_PCCSCR_VCC1, HD64461_PCC0GCR);
#ifndef CONFIG_HD64461_ENABLER
outw(HD64461_PCCGCR_VCC0 | HD64461_PCCSCR_VCC1, HD64461_PCC1GCR);
#endif
sh_dac_output(0, DAC_SPEAKER_VOLUME);
sh_dac_disable(DAC_SPEAKER_VOLUME);
v8 = __raw_readb(DACR);
v8 &= ~DACR_DAE;
__raw_writeb(v8,DACR);
v8 = __raw_readb(SCPDR);
v8 |= SCPDR_TS_SCAN_X | SCPDR_TS_SCAN_Y;
v8 &= ~SCPDR_TS_SCAN_ENABLE;
__raw_writeb(v8, SCPDR);
v = __raw_readw(SCPCR);
v &= ~SCPCR_TS_MASK;
v |= SCPCR_TS_ENABLE;
__raw_writew(v, SCPCR);
}
device_initcall(hp6xx_devices_setup);
static struct sh_machine_vector mv_hp6xx __initmv = {
.mv_name = "hp6xx",
.mv_setup = hp6xx_setup,
/* Enable IRQ0 -> IRQ3 in IRQ_MODE */
.mv_init_irq = hp6xx_init_irq,
};
| gpl-2.0 |
TeamRegular/android_kernel_samsung_kminilte | arch/arm/mach-shmobile/clock-r8a7779.c | 4782 | 5381 | /*
* r8a7779 clock framework support
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2011 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/sh_clk.h>
#include <linux/clkdev.h>
#include <mach/common.h>
#define FRQMR 0xffc80014
#define MSTPCR0 0xffc80030
#define MSTPCR1 0xffc80034
#define MSTPCR3 0xffc8003c
#define MSTPSR1 0xffc80044
#define MSTPSR4 0xffc80048
#define MSTPSR6 0xffc8004c
#define MSTPCR4 0xffc80050
#define MSTPCR5 0xffc80054
#define MSTPCR6 0xffc80058
#define MSTPCR7 0xffc80040
/* ioremap() through clock mapping mandatory to avoid
* collision with ARM coherent DMA virtual memory range.
*/
static struct clk_mapping cpg_mapping = {
.phys = 0xffc80000,
.len = 0x80,
};
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
static struct clk plla_clk = {
.rate = 1500000000,
.mapping = &cpg_mapping,
};
static struct clk *main_clks[] = {
&plla_clk,
};
static int divisors[] = { 0, 0, 0, 6, 8, 12, 16, 0, 24, 32, 36, 0, 0, 0, 0, 0 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = divisors,
.nr_divisors = ARRAY_SIZE(divisors),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_S, DIV4_OUT, DIV4_S4, DIV4_S3, DIV4_S1, DIV4_P, DIV4_NR };
static struct clk div4_clks[DIV4_NR] = {
[DIV4_S] = SH_CLK_DIV4(&plla_clk, FRQMR, 20,
0x0018, CLK_ENABLE_ON_INIT),
[DIV4_OUT] = SH_CLK_DIV4(&plla_clk, FRQMR, 16,
0x0700, CLK_ENABLE_ON_INIT),
[DIV4_S4] = SH_CLK_DIV4(&plla_clk, FRQMR, 12,
0x0040, CLK_ENABLE_ON_INIT),
[DIV4_S3] = SH_CLK_DIV4(&plla_clk, FRQMR, 8,
0x0010, CLK_ENABLE_ON_INIT),
[DIV4_S1] = SH_CLK_DIV4(&plla_clk, FRQMR, 4,
0x0060, CLK_ENABLE_ON_INIT),
[DIV4_P] = SH_CLK_DIV4(&plla_clk, FRQMR, 0,
0x0300, CLK_ENABLE_ON_INIT),
};
enum { MSTP026, MSTP025, MSTP024, MSTP023, MSTP022, MSTP021,
MSTP016, MSTP015, MSTP014,
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
[MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0), /* SCIF0 */
[MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0), /* SCIF1 */
[MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0), /* SCIF2 */
[MSTP023] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 23, 0), /* SCIF3 */
[MSTP022] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, 0), /* SCIF4 */
[MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0), /* SCIF5 */
[MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0), /* TMU0 */
[MSTP015] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0), /* TMU1 */
[MSTP014] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 14, 0), /* TMU2 */
};
static unsigned long mul4_recalc(struct clk *clk)
{
return clk->parent->rate * 4;
}
static struct sh_clk_ops mul4_clk_ops = {
.recalc = mul4_recalc,
};
struct clk clkz_clk = {
.ops = &mul4_clk_ops,
.parent = &div4_clks[DIV4_S],
};
struct clk clkzs_clk = {
/* clks x 4 / 4 = clks */
.parent = &div4_clks[DIV4_S],
};
static struct clk *late_main_clks[] = {
&clkz_clk,
&clkzs_clk,
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("plla_clk", &plla_clk),
CLKDEV_CON_ID("clkz_clk", &clkz_clk),
CLKDEV_CON_ID("clkzs_clk", &clkzs_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_S]),
CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_OUT]),
CLKDEV_CON_ID("shyway4_clk", &div4_clks[DIV4_S4]),
CLKDEV_CON_ID("shyway3_clk", &div4_clks[DIV4_S3]),
CLKDEV_CON_ID("shyway1_clk", &div4_clks[DIV4_S1]),
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
/* MSTP32 clocks */
CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP016]), /* TMU00 */
CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP016]), /* TMU01 */
CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP026]), /* SCIF0 */
CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP025]), /* SCIF1 */
CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP024]), /* SCIF2 */
CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP023]), /* SCIF3 */
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP022]), /* SCIF4 */
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP021]), /* SCIF6 */
};
void __init r8a7779_clock_init(void)
{
int k, ret = 0;
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++)
ret = clk_register(late_main_clks[k]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
shmobile_clk_init();
else
panic("failed to setup r8a7779 clocks\n");
}
| gpl-2.0 |
Nick73/King_Kernel | drivers/infiniband/ulp/ipoib/ipoib_ethtool.c | 8110 | 2985 | /*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include "ipoib.h"
static void ipoib_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1);
}
static int ipoib_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs;
coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
return 0;
}
static int ipoib_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
/*
* These values are saved in the private data and returned
* when ipoib_get_coalesce() is called
*/
if (coal->rx_coalesce_usecs > 0xffff ||
coal->rx_max_coalesced_frames > 0xffff)
return -EINVAL;
ret = ib_modify_cq(priv->recv_cq, coal->rx_max_coalesced_frames,
coal->rx_coalesce_usecs);
if (ret && ret != -ENOSYS) {
ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
return ret;
}
priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs;
priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames;
return 0;
}
static const struct ethtool_ops ipoib_ethtool_ops = {
.get_drvinfo = ipoib_get_drvinfo,
.get_coalesce = ipoib_get_coalesce,
.set_coalesce = ipoib_set_coalesce,
};
void ipoib_set_ethtool_ops(struct net_device *dev)
{
SET_ETHTOOL_OPS(dev, &ipoib_ethtool_ops);
}
| gpl-2.0 |
TaichiN/android_kernel_google_msm | arch/ia64/xen/xcom_hcall.c | 11694 | 11064 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Tristan Gingold <tristan.gingold@bull.net>
*
* Copyright (c) 2007
* Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
* consolidate mini and inline version.
*/
#include <linux/module.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
#include <xen/interface/grant_table.h>
#include <xen/interface/callback.h>
#include <xen/interface/vcpu.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/xencomm.h>
/* Xencomm notes:
* This file defines hypercalls to be used by xencomm. The hypercalls simply
* create inlines or mini descriptors for pointers and then call the raw arch
* hypercall xencomm_arch_hypercall_XXX
*
* If the arch wants to directly use these hypercalls, simply define macros
* in asm/xen/hypercall.h, eg:
* #define HYPERVISOR_sched_op xencomm_hypercall_sched_op
*
* The arch may also define HYPERVISOR_xxx as a function and do more operations
* before/after doing the hypercall.
*
* Note: because only inline or mini descriptors are created these functions
* must only be called with in kernel memory parameters.
*/
int
xencomm_hypercall_console_io(int cmd, int count, char *str)
{
/* xen early printk uses console io hypercall before
* xencomm initialization. In that case, we just ignore it.
*/
if (!xencomm_is_initialized())
return 0;
return xencomm_arch_hypercall_console_io
(cmd, count, xencomm_map_no_alloc(str, count));
}
EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io);
int
xencomm_hypercall_event_channel_op(int cmd, void *op)
{
struct xencomm_handle *desc;
desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op));
if (desc == NULL)
return -EINVAL;
return xencomm_arch_hypercall_event_channel_op(cmd, desc);
}
EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op);
int
xencomm_hypercall_xen_version(int cmd, void *arg)
{
struct xencomm_handle *desc;
unsigned int argsize;
switch (cmd) {
case XENVER_version:
/* do not actually pass an argument */
return xencomm_arch_hypercall_xen_version(cmd, 0);
case XENVER_extraversion:
argsize = sizeof(struct xen_extraversion);
break;
case XENVER_compile_info:
argsize = sizeof(struct xen_compile_info);
break;
case XENVER_capabilities:
argsize = sizeof(struct xen_capabilities_info);
break;
case XENVER_changeset:
argsize = sizeof(struct xen_changeset_info);
break;
case XENVER_platform_parameters:
argsize = sizeof(struct xen_platform_parameters);
break;
case XENVER_get_features:
argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info);
break;
default:
printk(KERN_DEBUG
"%s: unknown version op %d\n", __func__, cmd);
return -ENOSYS;
}
desc = xencomm_map_no_alloc(arg, argsize);
if (desc == NULL)
return -EINVAL;
return xencomm_arch_hypercall_xen_version(cmd, desc);
}
EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version);
int
xencomm_hypercall_physdev_op(int cmd, void *op)
{
unsigned int argsize;
switch (cmd) {
case PHYSDEVOP_apic_read:
case PHYSDEVOP_apic_write:
argsize = sizeof(struct physdev_apic);
break;
case PHYSDEVOP_alloc_irq_vector:
case PHYSDEVOP_free_irq_vector:
argsize = sizeof(struct physdev_irq);
break;
case PHYSDEVOP_irq_status_query:
argsize = sizeof(struct physdev_irq_status_query);
break;
default:
printk(KERN_DEBUG
"%s: unknown physdev op %d\n", __func__, cmd);
return -ENOSYS;
}
return xencomm_arch_hypercall_physdev_op
(cmd, xencomm_map_no_alloc(op, argsize));
}
static int
xencommize_grant_table_op(struct xencomm_mini **xc_area,
unsigned int cmd, void *op, unsigned int count,
struct xencomm_handle **desc)
{
struct xencomm_handle *desc1;
unsigned int argsize;
switch (cmd) {
case GNTTABOP_map_grant_ref:
argsize = sizeof(struct gnttab_map_grant_ref);
break;
case GNTTABOP_unmap_grant_ref:
argsize = sizeof(struct gnttab_unmap_grant_ref);
break;
case GNTTABOP_setup_table:
{
struct gnttab_setup_table *setup = op;
argsize = sizeof(*setup);
if (count != 1)
return -EINVAL;
desc1 = __xencomm_map_no_alloc
(xen_guest_handle(setup->frame_list),
setup->nr_frames *
sizeof(*xen_guest_handle(setup->frame_list)),
*xc_area);
if (desc1 == NULL)
return -EINVAL;
(*xc_area)++;
set_xen_guest_handle(setup->frame_list, (void *)desc1);
break;
}
case GNTTABOP_dump_table:
argsize = sizeof(struct gnttab_dump_table);
break;
case GNTTABOP_transfer:
argsize = sizeof(struct gnttab_transfer);
break;
case GNTTABOP_copy:
argsize = sizeof(struct gnttab_copy);
break;
case GNTTABOP_query_size:
argsize = sizeof(struct gnttab_query_size);
break;
default:
printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n",
__func__, cmd);
BUG();
}
*desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area);
if (*desc == NULL)
return -EINVAL;
(*xc_area)++;
return 0;
}
int
xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
unsigned int count)
{
int rc;
struct xencomm_handle *desc;
XENCOMM_MINI_ALIGNED(xc_area, 2);
rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc);
if (rc)
return rc;
return xencomm_arch_hypercall_grant_table_op(cmd, desc, count);
}
EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op);
int
xencomm_hypercall_sched_op(int cmd, void *arg)
{
struct xencomm_handle *desc;
unsigned int argsize;
switch (cmd) {
case SCHEDOP_yield:
case SCHEDOP_block:
argsize = 0;
break;
case SCHEDOP_shutdown:
argsize = sizeof(struct sched_shutdown);
break;
case SCHEDOP_poll:
{
struct sched_poll *poll = arg;
struct xencomm_handle *ports;
argsize = sizeof(struct sched_poll);
ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports),
sizeof(*xen_guest_handle(poll->ports)));
set_xen_guest_handle(poll->ports, (void *)ports);
break;
}
default:
printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd);
return -ENOSYS;
}
desc = xencomm_map_no_alloc(arg, argsize);
if (desc == NULL)
return -EINVAL;
return xencomm_arch_hypercall_sched_op(cmd, desc);
}
EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op);
int
xencomm_hypercall_multicall(void *call_list, int nr_calls)
{
int rc;
int i;
struct multicall_entry *mce;
struct xencomm_handle *desc;
XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2);
for (i = 0; i < nr_calls; i++) {
mce = (struct multicall_entry *)call_list + i;
switch (mce->op) {
case __HYPERVISOR_update_va_mapping:
case __HYPERVISOR_mmu_update:
/* No-op on ia64. */
break;
case __HYPERVISOR_grant_table_op:
rc = xencommize_grant_table_op
(&xc_area,
mce->args[0], (void *)mce->args[1],
mce->args[2], &desc);
if (rc)
return rc;
mce->args[1] = (unsigned long)desc;
break;
case __HYPERVISOR_memory_op:
default:
printk(KERN_DEBUG
"%s: unhandled multicall op entry op %lu\n",
__func__, mce->op);
return -ENOSYS;
}
}
desc = xencomm_map_no_alloc(call_list,
nr_calls * sizeof(struct multicall_entry));
if (desc == NULL)
return -EINVAL;
return xencomm_arch_hypercall_multicall(desc, nr_calls);
}
EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall);
int
xencomm_hypercall_callback_op(int cmd, void *arg)
{
unsigned int argsize;
switch (cmd) {
case CALLBACKOP_register:
argsize = sizeof(struct callback_register);
break;
case CALLBACKOP_unregister:
argsize = sizeof(struct callback_unregister);
break;
default:
printk(KERN_DEBUG
"%s: unknown callback op %d\n", __func__, cmd);
return -ENOSYS;
}
return xencomm_arch_hypercall_callback_op
(cmd, xencomm_map_no_alloc(arg, argsize));
}
static int
xencommize_memory_reservation(struct xencomm_mini *xc_area,
struct xen_memory_reservation *mop)
{
struct xencomm_handle *desc;
desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start),
mop->nr_extents *
sizeof(*xen_guest_handle(mop->extent_start)),
xc_area);
if (desc == NULL)
return -EINVAL;
set_xen_guest_handle(mop->extent_start, (void *)desc);
return 0;
}
int
xencomm_hypercall_memory_op(unsigned int cmd, void *arg)
{
GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} };
struct xen_memory_reservation *xmr = NULL;
int rc;
struct xencomm_handle *desc;
unsigned int argsize;
XENCOMM_MINI_ALIGNED(xc_area, 2);
switch (cmd) {
case XENMEM_increase_reservation:
case XENMEM_decrease_reservation:
case XENMEM_populate_physmap:
xmr = (struct xen_memory_reservation *)arg;
set_xen_guest_handle(extent_start_va[0],
xen_guest_handle(xmr->extent_start));
argsize = sizeof(*xmr);
rc = xencommize_memory_reservation(xc_area, xmr);
if (rc)
return rc;
xc_area++;
break;
case XENMEM_maximum_ram_page:
argsize = 0;
break;
case XENMEM_add_to_physmap:
argsize = sizeof(struct xen_add_to_physmap);
break;
default:
printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd);
return -ENOSYS;
}
desc = xencomm_map_no_alloc(arg, argsize);
if (desc == NULL)
return -EINVAL;
rc = xencomm_arch_hypercall_memory_op(cmd, desc);
switch (cmd) {
case XENMEM_increase_reservation:
case XENMEM_decrease_reservation:
case XENMEM_populate_physmap:
set_xen_guest_handle(xmr->extent_start,
xen_guest_handle(extent_start_va[0]));
break;
}
return rc;
}
EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op);
int
xencomm_hypercall_suspend(unsigned long srec)
{
struct sched_shutdown arg;
arg.reason = SHUTDOWN_suspend;
return xencomm_arch_hypercall_sched_op(
SCHEDOP_shutdown, xencomm_map_no_alloc(&arg, sizeof(arg)));
}
long
xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg)
{
unsigned int argsize;
switch (cmd) {
case VCPUOP_register_runstate_memory_area: {
struct vcpu_register_runstate_memory_area *area =
(struct vcpu_register_runstate_memory_area *)arg;
argsize = sizeof(*arg);
set_xen_guest_handle(area->addr.h,
(void *)xencomm_map_no_alloc(area->addr.v,
sizeof(area->addr.v)));
break;
}
default:
printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd);
return -ENOSYS;
}
return xencomm_arch_hypercall_vcpu_op(cmd, cpu,
xencomm_map_no_alloc(arg, argsize));
}
long
xencomm_hypercall_opt_feature(void *arg)
{
return xencomm_arch_hypercall_opt_feature(
xencomm_map_no_alloc(arg,
sizeof(struct xen_ia64_opt_feature)));
}
| gpl-2.0 |
curbthepain/android_kernel_htc_a11chl | fs/minix/itree_v1.c | 13486 | 1481 | #include <linux/buffer_head.h>
#include <linux/slab.h>
#include "minix.h"
enum {DEPTH = 3, DIRECT = 7}; /* Only double indirect */
typedef u16 block_t; /* 16 bit, host order */
static inline unsigned long block_to_cpu(block_t n)
{
return n;
}
static inline block_t cpu_to_block(unsigned long n)
{
return n;
}
static inline block_t *i_data(struct inode *inode)
{
return (block_t *)minix_i(inode)->u.i1_data;
}
static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
{
int n = 0;
char b[BDEVNAME_SIZE];
if (block < 0) {
printk("MINIX-fs: block_to_path: block %ld < 0 on dev %s\n",
block, bdevname(inode->i_sb->s_bdev, b));
} else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) {
if (printk_ratelimit())
printk("MINIX-fs: block_to_path: "
"block %ld too big on dev %s\n",
block, bdevname(inode->i_sb->s_bdev, b));
} else if (block < 7) {
offsets[n++] = block;
} else if ((block -= 7) < 512) {
offsets[n++] = 7;
offsets[n++] = block;
} else {
block -= 512;
offsets[n++] = 8;
offsets[n++] = block>>9;
offsets[n++] = block & 511;
}
return n;
}
#include "itree_common.c"
int V1_minix_get_block(struct inode * inode, long block,
struct buffer_head *bh_result, int create)
{
return get_block(inode, block, bh_result, create);
}
void V1_minix_truncate(struct inode * inode)
{
truncate(inode);
}
unsigned V1_minix_blocks(loff_t size, struct super_block *sb)
{
return nblocks(size, sb);
}
| gpl-2.0 |
andrewwrightt/kernel_moto_shamu | drivers/coresight/coresight-etm.c | 175 | 63416 | /* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/wakelock.h>
#include <linux/sysfs.h>
#include <linux/stat.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/of_coresight.h>
#include <linux/coresight.h>
#include <asm/sections.h>
#include <soc/qcom/socinfo.h>
#include <soc/qcom/memory_dump.h>
#include "coresight-priv.h"
#define etm_writel_mm(drvdata, val, off) \
__raw_writel((val), drvdata->base + off)
#define etm_readl_mm(drvdata, off) \
__raw_readl(drvdata->base + off)
#define etm_writel(drvdata, val, off) \
({ \
if (cpu_is_krait_v3()) \
etm_writel_cp14(val, off); \
else \
etm_writel_mm(drvdata, val, off); \
})
#define etm_readl(drvdata, off) \
({ \
uint32_t val; \
if (cpu_is_krait_v3()) \
val = etm_readl_cp14(off); \
else \
val = etm_readl_mm(drvdata, off); \
val; \
})
#define ETM_LOCK(drvdata) \
do { \
/* recommended by spec to ensure ETM writes are committed prior
* to resuming execution
*/ \
mb(); \
isb(); \
etm_writel_mm(drvdata, 0x0, CORESIGHT_LAR); \
} while (0)
#define ETM_UNLOCK(drvdata) \
do { \
etm_writel_mm(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
/* ensure unlock and any pending writes are committed prior to
* programming ETM registers
*/ \
mb(); \
isb(); \
} while (0)
/*
* Device registers:
* 0x000 - 0x2FC: Trace registers
* 0x300 - 0x314: Management registers
* 0x318 - 0xEFC: Trace registers
*
* Coresight registers
* 0xF00 - 0xF9C: Management registers
* 0xFA0 - 0xFA4: Management registers in PFTv1.0
* Trace registers in PFTv1.1
* 0xFA8 - 0xFFC: Management registers
*/
/* Trace registers (0x000-0x2FC) */
#define ETMCR (0x000)
#define ETMCCR (0x004)
#define ETMTRIGGER (0x008)
#define ETMASSICCTLR (0x00C)
#define ETMSR (0x010)
#define ETMSCR (0x014)
#define ETMTSSCR (0x018)
#define ETMTECR2 (0x01C)
#define ETMTEEVR (0x020)
#define ETMTECR1 (0x024)
#define ETMFFLR (0x02C)
#define ETMVDEVR (0x030)
#define ETMVDCR1 (0x034)
#define ETMVDCR3 (0x03C)
#define ETMACVRn(n) (0x040 + (n * 4))
#define ETMACTRn(n) (0x080 + (n * 4))
#define ETMDCVRn(n) (0x0C0 + (n * 8))
#define ETMDCMRn(n) (0x100 + (n * 8))
#define ETMCNTRLDVRn(n) (0x140 + (n * 4))
#define ETMCNTENRn(n) (0x150 + (n * 4))
#define ETMCNTRLDEVRn(n) (0x160 + (n * 4))
#define ETMCNTVRn(n) (0x170 + (n * 4))
#define ETMSQ12EVR (0x180)
#define ETMSQ21EVR (0x184)
#define ETMSQ23EVR (0x188)
#define ETMSQ31EVR (0x18C)
#define ETMSQ32EVR (0x190)
#define ETMSQ13EVR (0x194)
#define ETMSQR (0x19C)
#define ETMEXTOUTEVRn(n) (0x1A0 + (n * 4))
#define ETMCIDCVRn(n) (0x1B0 + (n * 4))
#define ETMCIDCMR (0x1BC)
#define ETMIMPSPEC0 (0x1C0)
#define ETMIMPSPEC1 (0x1C4)
#define ETMIMPSPEC2 (0x1C8)
#define ETMIMPSPEC3 (0x1CC)
#define ETMIMPSPEC4 (0x1D0)
#define ETMIMPSPEC5 (0x1D4)
#define ETMIMPSPEC6 (0x1D8)
#define ETMIMPSPEC7 (0x1DC)
#define ETMSYNCFR (0x1E0)
#define ETMIDR (0x1E4)
#define ETMCCER (0x1E8)
#define ETMEXTINSELR (0x1EC)
#define ETMTESSEICR (0x1F0)
#define ETMEIBCR (0x1F4)
#define ETMTSEVR (0x1F8)
#define ETMAUXCR (0x1FC)
#define ETMTRACEIDR (0x200)
#define ETMIDR2 (0x208)
#define ETMVMIDCVR (0x240)
/* Management registers (0x300-0x314) */
#define ETMOSLAR (0x300)
#define ETMOSLSR (0x304)
#define ETMOSSRR (0x308)
#define ETMPDCR (0x310)
#define ETMPDSR (0x314)
#define ETM_MAX_ADDR_CMP (16)
#define ETM_MAX_CNTR (4)
#define ETM_MAX_CTXID_CMP (3)
#define ETM_MODE_EXCLUDE BIT(0)
#define ETM_MODE_CYCACC BIT(1)
#define ETM_MODE_STALL BIT(2)
#define ETM_MODE_TIMESTAMP BIT(3)
#define ETM_MODE_CTXID BIT(4)
#define ETM_MODE_DATA_TRACE_VAL BIT(5)
#define ETM_MODE_DATA_TRACE_ADDR BIT(6)
#define ETM_MODE_ALL (0x7F)
#define ETM_DATACMP_ENABLE (0x2)
#define ETM_EVENT_MASK (0x1FFFF)
#define ETM_SYNC_MASK (0xFFF)
#define ETM_ALL_MASK (0xFFFFFFFF)
#define ETM_SEQ_STATE_MAX_VAL (0x2)
#define ETM_REG_DUMP_VER_OFF (4)
#define ETM_REG_DUMP_VER (1)
#define CPMR_ETMCLKEN (8)
enum etm_addr_type {
ETM_ADDR_TYPE_NONE,
ETM_ADDR_TYPE_SINGLE,
ETM_ADDR_TYPE_RANGE,
ETM_ADDR_TYPE_START,
ETM_ADDR_TYPE_STOP,
};
#ifdef CONFIG_CORESIGHT_ETM_DEFAULT_ENABLE
static int boot_enable = 1;
#else
static int boot_enable;
#endif
module_param_named(
boot_enable, boot_enable, int, S_IRUGO
);
#ifdef CONFIG_CORESIGHT_ETM_PCSAVE_DEFAULT_ENABLE
static int boot_pcsave_enable = 1;
#else
static int boot_pcsave_enable;
#endif
module_param_named(
boot_pcsave_enable, boot_pcsave_enable, int, S_IRUGO
);
struct etm_drvdata {
void __iomem *base;
struct device *dev;
struct coresight_device *csdev;
struct clk *clk;
spinlock_t spinlock;
struct wake_lock wake_lock;
int cpu;
uint8_t arch;
bool enable;
bool sticky_enable;
bool boot_enable;
bool os_unlock;
uint8_t nr_addr_cmp;
uint8_t nr_cntr;
uint8_t nr_ext_inp;
uint8_t nr_ext_out;
uint8_t nr_ctxid_cmp;
uint8_t nr_data_cmp;
uint8_t reset;
uint32_t mode;
uint32_t ctrl;
uint32_t trigger_event;
uint32_t startstop_ctrl;
uint32_t enable_event;
uint32_t enable_ctrl1;
uint32_t enable_ctrl2;
uint32_t fifofull_level;
uint8_t addr_idx;
uint32_t addr_val[ETM_MAX_ADDR_CMP];
uint32_t addr_acctype[ETM_MAX_ADDR_CMP];
uint32_t addr_type[ETM_MAX_ADDR_CMP];
bool data_trace_support;
uint32_t data_val[ETM_MAX_ADDR_CMP];
uint32_t data_mask[ETM_MAX_ADDR_CMP];
uint32_t viewdata_event;
uint32_t viewdata_ctrl1;
uint32_t viewdata_ctrl3;
uint8_t cntr_idx;
uint32_t cntr_rld_val[ETM_MAX_CNTR];
uint32_t cntr_event[ETM_MAX_CNTR];
uint32_t cntr_rld_event[ETM_MAX_CNTR];
uint32_t cntr_val[ETM_MAX_CNTR];
uint32_t seq_12_event;
uint32_t seq_21_event;
uint32_t seq_23_event;
uint32_t seq_31_event;
uint32_t seq_32_event;
uint32_t seq_13_event;
uint32_t seq_curr_state;
uint8_t ctxid_idx;
uint32_t ctxid_val[ETM_MAX_CTXID_CMP];
uint32_t ctxid_mask;
uint32_t sync_freq;
uint32_t timestamp_event;
bool pcsave_impl;
bool pcsave_enable;
bool pcsave_sticky_enable;
bool pcsave_boot_enable;
bool round_robin;
};
static struct etm_drvdata *etmdrvdata[NR_CPUS];
static bool etm_os_lock_present(struct etm_drvdata *drvdata)
{
uint32_t etmoslsr;
etmoslsr = etm_readl(drvdata, ETMOSLSR);
if (!BVAL(etmoslsr, 0) && !BVAL(etmoslsr, 3))
return false;
return true;
}
/*
* Unlock OS lock to allow memory mapped access on Krait and in general
* so that ETMSR[1] can be polled while clearing the ETMCR[10] prog bit
* since ETMSR[1] is set when prog bit is set or OS lock is set.
*/
static void etm_os_unlock(void *info)
{
struct etm_drvdata *drvdata = (struct etm_drvdata *) info;
/*
* Memory mapped writes to clear os lock are not supported on Krait v1,
* v2 and OS lock must be unlocked before any memory mapped access,
* otherwise memory mapped reads/writes will be invalid.
*/
if (cpu_is_krait()) {
etm_writel_cp14(0x0, ETMOSLAR);
/* ensure os lock is unlocked before we return */
isb();
} else {
ETM_UNLOCK(drvdata);
if (etm_os_lock_present(drvdata)) {
etm_writel(drvdata, 0x0, ETMOSLAR);
/* ensure os lock is unlocked before we return */
mb();
}
ETM_LOCK(drvdata);
}
}
/*
* ETM clock is derived from the processor clock and gets enabled on a
* logical OR of below items on Krait (v2 onwards):
* 1.CPMR[ETMCLKEN] is 1
* 2.ETMCR[PD] is 0
* 3.ETMPDCR[PU] is 1
* 4.Reset is asserted (core or debug)
* 5.APB memory mapped requests (eg. EDAP access)
*
* 1., 2. and 3. above are permanent enables whereas 4. and 5. are temporary
* enables
*
* We rely on 5. to be able to access ETMCR/ETMPDCR and then use 2./3. above
* for ETM clock vote in the driver and the save-restore code uses 1. above
* for its vote
*/
static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
{
uint32_t etmcr;
/* ensure pending cp14 accesses complete before setting pwrdwn */
mb();
isb();
etmcr = etm_readl(drvdata, ETMCR);
etmcr |= BIT(0);
etm_writel(drvdata, etmcr, ETMCR);
}
static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
{
uint32_t etmcr;
etmcr = etm_readl(drvdata, ETMCR);
etmcr &= ~BIT(0);
etm_writel(drvdata, etmcr, ETMCR);
/* ensure pwrup completes before subsequent cp14 accesses */
mb();
isb();
}
static void etm_set_pwrup(struct etm_drvdata *drvdata)
{
uint32_t cpmr;
uint32_t etmpdcr;
/* For Krait, use cp15 CPMR_ETMCLKEN instead of ETMPDCR since ETMPDCR
* is not supported for this purpose on Krait v4.
*/
if (cpu_is_krait()) {
asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (cpmr));
cpmr |= CPMR_ETMCLKEN;
asm volatile("mcr p15, 7, %0, c15, c0, 5" : : "r" (cpmr));
} else {
etmpdcr = etm_readl_mm(drvdata, ETMPDCR);
etmpdcr |= BIT(3);
etm_writel_mm(drvdata, etmpdcr, ETMPDCR);
}
/* ensure pwrup completes before subsequent cp14 accesses */
mb();
isb();
}
static void etm_clr_pwrup(struct etm_drvdata *drvdata)
{
uint32_t cpmr;
uint32_t etmpdcr;
/* ensure pending cp14 accesses complete before clearing pwrup */
mb();
isb();
/* For Krait, use cp15 CPMR_ETMCLKEN instead of ETMPDCR since ETMPDCR
* is not supported for this purpose on Krait v4.
*/
if (cpu_is_krait()) {
asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (cpmr));
cpmr &= ~CPMR_ETMCLKEN;
asm volatile("mcr p15, 7, %0, c15, c0, 5" : : "r" (cpmr));
} else {
etmpdcr = etm_readl_mm(drvdata, ETMPDCR);
etmpdcr &= ~BIT(3);
etm_writel_mm(drvdata, etmpdcr, ETMPDCR);
}
}
static void etm_set_prog(struct etm_drvdata *drvdata)
{
uint32_t etmcr;
int count;
etmcr = etm_readl(drvdata, ETMCR);
etmcr |= BIT(10);
etm_writel(drvdata, etmcr, ETMCR);
/* recommended by spec for cp14 accesses to ensure etmcr write is
* complete before polling etmsr
*/
isb();
for (count = TIMEOUT_US; BVAL(etm_readl(drvdata, ETMSR), 1) != 1
&& count > 0; count--)
udelay(1);
WARN(count == 0, "timeout while setting prog bit, ETMSR: %#x\n",
etm_readl(drvdata, ETMSR));
}
static void etm_clr_prog(struct etm_drvdata *drvdata)
{
uint32_t etmcr;
int count;
etmcr = etm_readl(drvdata, ETMCR);
etmcr &= ~BIT(10);
etm_writel(drvdata, etmcr, ETMCR);
/* recommended by spec for cp14 accesses to ensure etmcr write is
* complete before polling etmsr
*/
isb();
for (count = TIMEOUT_US; BVAL(etm_readl(drvdata, ETMSR), 1) != 0
&& count > 0; count--)
udelay(1);
WARN(count == 0, "timeout while clearing prog bit, ETMSR: %#x\n",
etm_readl(drvdata, ETMSR));
}
static void etm_enable_pcsave(void *info)
{
struct etm_drvdata *drvdata = info;
ETM_UNLOCK(drvdata);
/*
* ETMPDCR is only accessible via memory mapped interface and so use
* it first to enable power/clock to allow subsequent cp14 accesses.
*/
etm_set_pwrup(drvdata);
etm_clr_pwrdwn(drvdata);
etm_clr_pwrup(drvdata);
ETM_LOCK(drvdata);
}
static void etm_disable_pcsave(void *info)
{
struct etm_drvdata *drvdata = info;
ETM_UNLOCK(drvdata);
if (!drvdata->enable)
etm_set_pwrdwn(drvdata);
ETM_LOCK(drvdata);
}
static bool etm_version_gte(uint8_t arch, uint8_t base_arch)
{
if (arch >= base_arch && ((arch & PFT_ARCH_MAJOR) != PFT_ARCH_MAJOR))
return true;
else
return false;
}
static void __etm_enable(void *info)
{
int i;
uint32_t etmcr;
struct etm_drvdata *drvdata = info;
ETM_UNLOCK(drvdata);
/*
* Vote for ETM power/clock enable. ETMPDCR is only accessible via
* memory mapped interface and so use it first to enable power/clock
* to allow subsequent cp14 accesses.
*/
etm_set_pwrup(drvdata);
/*
* Clear power down bit since when this bit is set writes to
* certain registers might be ignored. This is also a pre-requisite
* for trace enable.
*/
etm_clr_pwrdwn(drvdata);
etm_clr_pwrup(drvdata);
etm_set_prog(drvdata);
etmcr = etm_readl(drvdata, ETMCR);
etmcr &= (BIT(10) | BIT(0));
etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_2))
etm_writel(drvdata, drvdata->enable_ctrl2, ETMTECR2);
etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
if (drvdata->data_trace_support == true) {
etm_writel(drvdata, drvdata->viewdata_event, ETMVDEVR);
etm_writel(drvdata, drvdata->viewdata_ctrl1, ETMVDCR1);
etm_writel(drvdata, drvdata->viewdata_ctrl3, ETMVDCR3);
}
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
}
for (i = 0; i < drvdata->nr_data_cmp; i++) {
etm_writel(drvdata, drvdata->data_val[i], ETMDCVRn(i));
etm_writel(drvdata, drvdata->data_mask[i], ETMDCMRn(i));
}
for (i = 0; i < drvdata->nr_cntr; i++) {
etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
etm_writel(drvdata, drvdata->cntr_rld_event[i],
ETMCNTRLDEVRn(i));
etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
}
etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
for (i = 0; i < drvdata->nr_ext_out; i++)
etm_writel(drvdata, 0x0000406F, ETMEXTOUTEVRn(i));
for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
etm_writel(drvdata, drvdata->ctxid_val[i], ETMCIDCVRn(i));
etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
etm_writel(drvdata, 0x00000000, ETMEXTINSELR);
etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
etm_writel(drvdata, 0x00000000, ETMAUXCR);
etm_writel(drvdata, drvdata->cpu + 1, ETMTRACEIDR);
etm_writel(drvdata, 0x00000000, ETMVMIDCVR);
etm_clr_prog(drvdata);
ETM_LOCK(drvdata);
dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
}
static int etm_enable(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret;
wake_lock(&drvdata->wake_lock);
ret = clk_prepare_enable(drvdata->clk);
if (ret)
goto err_clk;
spin_lock(&drvdata->spinlock);
/*
* Executing __etm_enable on the cpu whose ETM is being enabled
* ensures that register writes occur when cpu is powered.
*/
ret = smp_call_function_single(drvdata->cpu, __etm_enable, drvdata, 1);
if (ret)
goto err;
drvdata->enable = true;
drvdata->sticky_enable = true;
spin_unlock(&drvdata->spinlock);
wake_unlock(&drvdata->wake_lock);
dev_info(drvdata->dev, "ETM tracing enabled\n");
return 0;
err:
spin_unlock(&drvdata->spinlock);
clk_disable_unprepare(drvdata->clk);
err_clk:
wake_unlock(&drvdata->wake_lock);
return ret;
}
static void __etm_disable(void *info)
{
struct etm_drvdata *drvdata = info;
ETM_UNLOCK(drvdata);
etm_set_prog(drvdata);
/* program trace enable to low by using always false event */
etm_writel(drvdata, 0x6F | BIT(14), ETMTEEVR);
if (!drvdata->pcsave_enable)
etm_set_pwrdwn(drvdata);
ETM_LOCK(drvdata);
dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
}
static void etm_disable(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
wake_lock(&drvdata->wake_lock);
/*
* Taking hotplug lock here protects from clocks getting disabled
* with tracing being left on (crash scenario) if user disable occurs
* after cpu online mask indicates the cpu is offline but before the
* DYING hotplug callback is serviced by the ETM driver.
*/
get_online_cpus();
spin_lock(&drvdata->spinlock);
/*
* Executing __etm_disable on the cpu whose ETM is being disabled
* ensures that register writes occur when cpu is powered.
*/
smp_call_function_single(drvdata->cpu, __etm_disable, drvdata, 1);
drvdata->enable = false;
spin_unlock(&drvdata->spinlock);
put_online_cpus();
clk_disable_unprepare(drvdata->clk);
wake_unlock(&drvdata->wake_lock);
dev_info(drvdata->dev, "ETM tracing disabled\n");
}
static const struct coresight_ops_source etm_source_ops = {
.enable = etm_enable,
.disable = etm_disable,
};
static const struct coresight_ops etm_cs_ops = {
.source_ops = &etm_source_ops,
};
static ssize_t etm_show_nr_addr_cmp(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->nr_addr_cmp;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR(nr_addr_cmp, S_IRUGO, etm_show_nr_addr_cmp, NULL);
static ssize_t etm_show_nr_cntr(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->nr_cntr;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR(nr_cntr, S_IRUGO, etm_show_nr_cntr, NULL);
static ssize_t etm_show_nr_ctxid_cmp(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->nr_ctxid_cmp;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR(nr_ctxid_cmp, S_IRUGO, etm_show_nr_ctxid_cmp, NULL);
static ssize_t etm_show_reset(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->reset;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
/* Reset to trace everything i.e. exclude nothing. */
static ssize_t etm_store_reset(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
int i;
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
spin_lock(&drvdata->spinlock);
if (val) {
drvdata->mode = ETM_MODE_EXCLUDE;
drvdata->ctrl = 0x0;
if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_0))
drvdata->ctrl |= BIT(11);
if (cpu_is_krait_v1()) {
drvdata->mode |= ETM_MODE_CYCACC;
drvdata->ctrl |= BIT(12);
}
drvdata->trigger_event = 0x406F;
drvdata->startstop_ctrl = 0x0;
if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_2))
drvdata->enable_ctrl2 = 0x0;
drvdata->enable_event = 0x6F;
drvdata->enable_ctrl1 = 0x1000000;
drvdata->fifofull_level = 0x28;
if (drvdata->data_trace_support == true) {
drvdata->mode |= (ETM_MODE_DATA_TRACE_VAL |
ETM_MODE_DATA_TRACE_ADDR);
drvdata->ctrl |= BIT(2) | BIT(3);
drvdata->viewdata_event = 0x6F;
drvdata->viewdata_ctrl1 = 0x0;
drvdata->viewdata_ctrl3 = 0x10000;
}
drvdata->addr_idx = 0x0;
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
drvdata->addr_val[i] = 0x0;
drvdata->addr_acctype[i] = 0x0;
drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
}
for (i = 0; i < drvdata->nr_data_cmp; i++) {
drvdata->data_val[i] = 0;
drvdata->data_mask[i] = ~(0);
}
drvdata->cntr_idx = 0x0;
for (i = 0; i < drvdata->nr_cntr; i++) {
drvdata->cntr_rld_val[i] = 0x0;
drvdata->cntr_event[i] = 0x406F;
drvdata->cntr_rld_event[i] = 0x406F;
drvdata->cntr_val[i] = 0x0;
}
drvdata->seq_12_event = 0x406F;
drvdata->seq_21_event = 0x406F;
drvdata->seq_23_event = 0x406F;
drvdata->seq_31_event = 0x406F;
drvdata->seq_32_event = 0x406F;
drvdata->seq_13_event = 0x406F;
drvdata->seq_curr_state = 0x0;
drvdata->ctxid_idx = 0x0;
for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
drvdata->ctxid_val[i] = 0x0;
drvdata->ctxid_mask = 0x0;
/* Bits[7:0] of ETMSYNCFR are reserved on Krait pass3 onwards */
if (cpu_is_krait() && !cpu_is_krait_v1() && !cpu_is_krait_v2())
drvdata->sync_freq = 0x100;
else
drvdata->sync_freq = 0x80;
drvdata->timestamp_event = 0x406F;
}
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(reset, S_IRUGO | S_IWUSR, etm_show_reset, etm_store_reset);
static ssize_t etm_show_mode(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->mode;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_mode(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
spin_lock(&drvdata->spinlock);
drvdata->mode = val & ETM_MODE_ALL;
if (drvdata->mode & ETM_MODE_EXCLUDE)
drvdata->enable_ctrl1 |= BIT(24);
else
drvdata->enable_ctrl1 &= ~BIT(24);
if (drvdata->mode & ETM_MODE_CYCACC)
drvdata->ctrl |= BIT(12);
else
drvdata->ctrl &= ~BIT(12);
if (drvdata->mode & ETM_MODE_STALL)
drvdata->ctrl |= BIT(7);
else
drvdata->ctrl &= ~BIT(7);
if (drvdata->mode & ETM_MODE_TIMESTAMP)
drvdata->ctrl |= BIT(28);
else
drvdata->ctrl &= ~BIT(28);
if (drvdata->mode & ETM_MODE_CTXID)
drvdata->ctrl |= (BIT(14) | BIT(15));
else
drvdata->ctrl &= ~(BIT(14) | BIT(15));
if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_0)) {
if (drvdata->mode & ETM_MODE_DATA_TRACE_VAL)
drvdata->ctrl |= BIT(2);
else
drvdata->ctrl &= ~(BIT(2));
if (drvdata->mode & ETM_MODE_DATA_TRACE_ADDR)
drvdata->ctrl |= (BIT(3));
else
drvdata->ctrl &= ~(BIT(3));
}
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, etm_show_mode, etm_store_mode);
static ssize_t etm_show_trigger_event(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->trigger_event;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_trigger_event(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
drvdata->trigger_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR(trigger_event, S_IRUGO | S_IWUSR, etm_show_trigger_event,
etm_store_trigger_event);
static ssize_t etm_show_enable_event(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->enable_event;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_enable_event(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
drvdata->enable_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR(enable_event, S_IRUGO | S_IWUSR, etm_show_enable_event,
etm_store_enable_event);
static ssize_t etm_show_fifofull_level(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->fifofull_level;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_fifofull_level(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
drvdata->fifofull_level = val;
return size;
}
static DEVICE_ATTR(fifofull_level, S_IRUGO | S_IWUSR, etm_show_fifofull_level,
etm_store_fifofull_level);
static ssize_t etm_show_addr_idx(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->addr_idx;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_addr_idx(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
if (val >= drvdata->nr_addr_cmp)
return -EINVAL;
/*
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
spin_lock(&drvdata->spinlock);
drvdata->addr_idx = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(addr_idx, S_IRUGO | S_IWUSR, etm_show_addr_idx,
etm_store_addr_idx);
static ssize_t etm_show_addr_single(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
uint8_t idx;
spin_lock(&drvdata->spinlock);
idx = drvdata->addr_idx;
if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val = drvdata->addr_val[idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_addr_single(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
uint8_t idx;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
spin_lock(&drvdata->spinlock);
idx = drvdata->addr_idx;
if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
drvdata->addr_val[idx] = val;
drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_2))
drvdata->enable_ctrl2 |= (1 << idx);
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(addr_single, S_IRUGO | S_IWUSR, etm_show_addr_single,
etm_store_addr_single);
static ssize_t etm_show_addr_range(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val1, val2;
uint8_t idx;
spin_lock(&drvdata->spinlock);
idx = drvdata->addr_idx;
if (idx % 2 != 0) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
(drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val1 = drvdata->addr_val[idx];
val2 = drvdata->addr_val[idx + 1];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
}
static ssize_t etm_store_addr_range(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val1, val2;
uint8_t idx;
if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
return -EINVAL;
/* lower address comparator cannot have a higher address value */
if (val1 > val2)
return -EINVAL;
spin_lock(&drvdata->spinlock);
idx = drvdata->addr_idx;
if (idx % 2 != 0) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
(drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
drvdata->addr_val[idx] = val1;
drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
drvdata->addr_val[idx + 1] = val2;
drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
drvdata->enable_ctrl1 |= (1 << (idx/2));
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(addr_range, S_IRUGO | S_IWUSR, etm_show_addr_range,
etm_store_addr_range);
static ssize_t etm_show_addr_start(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
uint8_t idx;
spin_lock(&drvdata->spinlock);
idx = drvdata->addr_idx;
if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val = drvdata->addr_val[idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_addr_start(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
uint8_t idx;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
spin_lock(&drvdata->spinlock);
idx = drvdata->addr_idx;
if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
drvdata->addr_val[idx] = val;
drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
drvdata->startstop_ctrl |= (1 << idx);
drvdata->enable_ctrl1 |= BIT(25);
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(addr_start, S_IRUGO | S_IWUSR, etm_show_addr_start,
etm_store_addr_start);
static ssize_t etm_show_addr_stop(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
uint8_t idx;
spin_lock(&drvdata->spinlock);
idx = drvdata->addr_idx;
if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val = drvdata->addr_val[idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_addr_stop(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
uint8_t idx;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
spin_lock(&drvdata->spinlock);
idx = drvdata->addr_idx;
if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
drvdata->addr_val[idx] = val;
drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
drvdata->startstop_ctrl |= (1 << (idx + 16));
drvdata->enable_ctrl1 |= BIT(25);
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(addr_stop, S_IRUGO | S_IWUSR, etm_show_addr_stop,
etm_store_addr_stop);
static ssize_t etm_show_addr_acctype(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
spin_lock(&drvdata->spinlock);
val = drvdata->addr_acctype[drvdata->addr_idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_addr_acctype(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
spin_lock(&drvdata->spinlock);
drvdata->addr_acctype[drvdata->addr_idx] = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(addr_acctype, S_IRUGO | S_IWUSR, etm_show_addr_acctype,
etm_store_addr_acctype);
static ssize_t etm_show_data_val(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
uint8_t idx;
spin_lock(&drvdata->spinlock);
idx = drvdata->addr_idx;
if (idx % 2 != 0) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
idx = idx >> 1;
if (idx >= drvdata->nr_data_cmp) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val = drvdata->data_val[idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_data_val(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
uint8_t idx, data_idx;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
spin_lock(&drvdata->spinlock);
idx = drvdata->addr_idx;
/* Adjust index to use the correct data comparator */
data_idx = idx >> 1;
/* Only idx = 0, 2, 4, 6... are valid */
if (idx % 2 != 0) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
if (data_idx >= drvdata->nr_data_cmp) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
if (!BVAL(drvdata->addr_acctype[idx], ETM_DATACMP_ENABLE)) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
if (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE) {
if (!BVAL(drvdata->addr_acctype[idx + 1], ETM_DATACMP_ENABLE)) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
}
drvdata->data_val[data_idx] = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(data_val, S_IRUGO | S_IWUSR, etm_show_data_val,
etm_store_data_val);
static ssize_t etm_show_data_mask(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long mask;
uint8_t idx;
spin_lock(&drvdata->spinlock);
idx = drvdata->addr_idx;
if (idx % 2 != 0) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
idx = idx >> 1;
if (idx >= drvdata->nr_data_cmp) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
mask = drvdata->data_mask[idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", mask);
}
static ssize_t etm_store_data_mask(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long mask;
uint8_t idx, data_idx;
if (sscanf(buf, "%lx", &mask) != 1)
return -EINVAL;
spin_lock(&drvdata->spinlock);
idx = drvdata->addr_idx;
/* Adjust index to use the correct data comparator */
data_idx = idx >> 1;
/* Only idx = 0, 2, 4, 6... are valid */
if (idx % 2 != 0) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
if (data_idx >= drvdata->nr_data_cmp) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
if (!BVAL(drvdata->addr_acctype[idx], ETM_DATACMP_ENABLE)) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
if (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE) {
if (!BVAL(drvdata->addr_acctype[idx + 1], ETM_DATACMP_ENABLE)) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
}
drvdata->data_mask[data_idx] = mask;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(data_mask, S_IRUGO | S_IWUSR, etm_show_data_mask,
etm_store_data_mask);
static ssize_t etm_show_cntr_idx(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->addr_idx;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_cntr_idx(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
if (val >= drvdata->nr_cntr)
return -EINVAL;
/*
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
spin_lock(&drvdata->spinlock);
drvdata->cntr_idx = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(cntr_idx, S_IRUGO | S_IWUSR, etm_show_cntr_idx,
etm_store_cntr_idx);
static ssize_t etm_show_cntr_rld_val(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
spin_lock(&drvdata->spinlock);
val = drvdata->cntr_rld_val[drvdata->cntr_idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_cntr_rld_val(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
spin_lock(&drvdata->spinlock);
drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(cntr_rld_val, S_IRUGO | S_IWUSR, etm_show_cntr_rld_val,
etm_store_cntr_rld_val);
static ssize_t etm_show_cntr_event(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
spin_lock(&drvdata->spinlock);
val = drvdata->cntr_event[drvdata->cntr_idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_cntr_event(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
spin_lock(&drvdata->spinlock);
drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(cntr_event, S_IRUGO | S_IWUSR, etm_show_cntr_event,
etm_store_cntr_event);
static ssize_t etm_show_cntr_rld_event(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
spin_lock(&drvdata->spinlock);
val = drvdata->cntr_rld_event[drvdata->cntr_idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_cntr_rld_event(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
spin_lock(&drvdata->spinlock);
drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(cntr_rld_event, S_IRUGO | S_IWUSR, etm_show_cntr_rld_event,
etm_store_cntr_rld_event);
static ssize_t etm_show_cntr_val(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
spin_lock(&drvdata->spinlock);
val = drvdata->cntr_val[drvdata->cntr_idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_cntr_val(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
spin_lock(&drvdata->spinlock);
drvdata->cntr_val[drvdata->cntr_idx] = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(cntr_val, S_IRUGO | S_IWUSR, etm_show_cntr_val,
etm_store_cntr_val);
static ssize_t etm_show_seq_12_event(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->seq_12_event;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_seq_12_event(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
drvdata->seq_12_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR(seq_12_event, S_IRUGO | S_IWUSR, etm_show_seq_12_event,
etm_store_seq_12_event);
static ssize_t etm_show_seq_21_event(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->seq_21_event;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_seq_21_event(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
drvdata->seq_21_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR(seq_21_event, S_IRUGO | S_IWUSR, etm_show_seq_21_event,
etm_store_seq_21_event);
static ssize_t etm_show_seq_23_event(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->seq_23_event;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_seq_23_event(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
drvdata->seq_23_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR(seq_23_event, S_IRUGO | S_IWUSR, etm_show_seq_23_event,
etm_store_seq_23_event);
static ssize_t etm_show_seq_31_event(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->seq_31_event;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_seq_31_event(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
drvdata->seq_31_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR(seq_31_event, S_IRUGO | S_IWUSR, etm_show_seq_31_event,
etm_store_seq_31_event);
static ssize_t etm_show_seq_32_event(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->seq_32_event;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_seq_32_event(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
drvdata->seq_32_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR(seq_32_event, S_IRUGO | S_IWUSR, etm_show_seq_32_event,
etm_store_seq_32_event);
static ssize_t etm_show_seq_13_event(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->seq_13_event;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_seq_13_event(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
drvdata->seq_13_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR(seq_13_event, S_IRUGO | S_IWUSR, etm_show_seq_13_event,
etm_store_seq_13_event);
static ssize_t etm_show_seq_curr_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->seq_curr_state;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_seq_curr_state(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
if (val > ETM_SEQ_STATE_MAX_VAL)
return -EINVAL;
drvdata->seq_curr_state = val;
return size;
}
static DEVICE_ATTR(seq_curr_state, S_IRUGO | S_IWUSR, etm_show_seq_curr_state,
etm_store_seq_curr_state);
static ssize_t etm_show_ctxid_idx(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->ctxid_idx;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_ctxid_idx(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
if (val >= drvdata->nr_ctxid_cmp)
return -EINVAL;
/*
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
spin_lock(&drvdata->spinlock);
drvdata->ctxid_idx = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(ctxid_idx, S_IRUGO | S_IWUSR, etm_show_ctxid_idx,
etm_store_ctxid_idx);
static ssize_t etm_show_ctxid_val(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
spin_lock(&drvdata->spinlock);
val = drvdata->ctxid_val[drvdata->ctxid_idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_ctxid_val(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
spin_lock(&drvdata->spinlock);
drvdata->ctxid_val[drvdata->ctxid_idx] = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR(ctxid_val, S_IRUGO | S_IWUSR, etm_show_ctxid_val,
etm_store_ctxid_val);
static ssize_t etm_show_ctxid_mask(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->ctxid_mask;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_ctxid_mask(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
drvdata->ctxid_mask = val;
return size;
}
static DEVICE_ATTR(ctxid_mask, S_IRUGO | S_IWUSR, etm_show_ctxid_mask,
etm_store_ctxid_mask);
static ssize_t etm_show_sync_freq(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->sync_freq;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_sync_freq(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
drvdata->sync_freq = val & ETM_SYNC_MASK;
return size;
}
static DEVICE_ATTR(sync_freq, S_IRUGO | S_IWUSR, etm_show_sync_freq,
etm_store_sync_freq);
static ssize_t etm_show_timestamp_event(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->timestamp_event;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t etm_store_timestamp_event(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
drvdata->timestamp_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR(timestamp_event, S_IRUGO | S_IWUSR, etm_show_timestamp_event,
etm_store_timestamp_event);
static ssize_t etm_show_pcsave(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
val = drvdata->pcsave_enable;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static int __etm_store_pcsave(struct etm_drvdata *drvdata, unsigned long val)
{
int ret = 0;
ret = clk_prepare_enable(drvdata->clk);
if (ret)
return ret;
spin_lock(&drvdata->spinlock);
if (val) {
if (drvdata->pcsave_enable)
goto out;
ret = smp_call_function_single(drvdata->cpu, etm_enable_pcsave,
drvdata, 1);
if (ret)
goto out;
drvdata->pcsave_enable = true;
drvdata->pcsave_sticky_enable = true;
dev_info(drvdata->dev, "PC save enabled\n");
} else {
if (!drvdata->pcsave_enable)
goto out;
ret = smp_call_function_single(drvdata->cpu, etm_disable_pcsave,
drvdata, 1);
if (ret)
goto out;
drvdata->pcsave_enable = false;
dev_info(drvdata->dev, "PC save disabled\n");
}
out:
spin_unlock(&drvdata->spinlock);
clk_disable_unprepare(drvdata->clk);
return ret;
}
static ssize_t etm_store_pcsave(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
int ret;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
ret = __etm_store_pcsave(drvdata, val);
if (ret)
return ret;
return size;
}
static DEVICE_ATTR(pcsave, S_IRUGO | S_IWUSR, etm_show_pcsave,
etm_store_pcsave);
static struct attribute *etm_attrs[] = {
&dev_attr_nr_addr_cmp.attr,
&dev_attr_nr_cntr.attr,
&dev_attr_nr_ctxid_cmp.attr,
&dev_attr_reset.attr,
&dev_attr_mode.attr,
&dev_attr_trigger_event.attr,
&dev_attr_enable_event.attr,
&dev_attr_fifofull_level.attr,
&dev_attr_addr_idx.attr,
&dev_attr_addr_single.attr,
&dev_attr_addr_range.attr,
&dev_attr_addr_start.attr,
&dev_attr_addr_stop.attr,
&dev_attr_addr_acctype.attr,
&dev_attr_data_val.attr,
&dev_attr_data_mask.attr,
&dev_attr_cntr_idx.attr,
&dev_attr_cntr_rld_val.attr,
&dev_attr_cntr_event.attr,
&dev_attr_cntr_rld_event.attr,
&dev_attr_cntr_val.attr,
&dev_attr_seq_12_event.attr,
&dev_attr_seq_21_event.attr,
&dev_attr_seq_23_event.attr,
&dev_attr_seq_31_event.attr,
&dev_attr_seq_32_event.attr,
&dev_attr_seq_13_event.attr,
&dev_attr_seq_curr_state.attr,
&dev_attr_ctxid_idx.attr,
&dev_attr_ctxid_val.attr,
&dev_attr_ctxid_mask.attr,
&dev_attr_sync_freq.attr,
&dev_attr_timestamp_event.attr,
NULL,
};
static struct attribute_group etm_attr_grp = {
.attrs = etm_attrs,
};
static const struct attribute_group *etm_attr_grps[] = {
&etm_attr_grp,
NULL,
};
static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
static bool clk_disable[NR_CPUS];
int ret;
if (!etmdrvdata[cpu])
goto out;
switch (action & (~CPU_TASKS_FROZEN)) {
case CPU_UP_PREPARE:
if (!etmdrvdata[cpu]->os_unlock) {
ret = clk_prepare_enable(etmdrvdata[cpu]->clk);
if (ret) {
dev_err(etmdrvdata[cpu]->dev,
"ETM clk enable during hotplug failed"
"for cpu: %d, ret: %d\n", cpu, ret);
return notifier_from_errno(ret);
}
clk_disable[cpu] = true;
}
break;
case CPU_STARTING:
spin_lock(&etmdrvdata[cpu]->spinlock);
if (!etmdrvdata[cpu]->os_unlock) {
etm_os_unlock(etmdrvdata[cpu]);
etmdrvdata[cpu]->os_unlock = true;
}
if (etmdrvdata[cpu]->enable && etmdrvdata[cpu]->round_robin)
__etm_enable(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
break;
case CPU_ONLINE:
if (clk_disable[cpu]) {
clk_disable_unprepare(etmdrvdata[cpu]->clk);
clk_disable[cpu] = false;
}
if (etmdrvdata[cpu]->boot_enable &&
!etmdrvdata[cpu]->sticky_enable)
coresight_enable(etmdrvdata[cpu]->csdev);
if (etmdrvdata[cpu]->pcsave_boot_enable &&
!etmdrvdata[cpu]->pcsave_sticky_enable)
__etm_store_pcsave(etmdrvdata[cpu], 1);
break;
case CPU_UP_CANCELED:
if (clk_disable[cpu]) {
clk_disable_unprepare(etmdrvdata[cpu]->clk);
clk_disable[cpu] = false;
}
break;
case CPU_DYING:
spin_lock(&etmdrvdata[cpu]->spinlock);
if (etmdrvdata[cpu]->enable && etmdrvdata[cpu]->round_robin)
__etm_disable(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
break;
}
out:
return NOTIFY_OK;
}
static struct notifier_block etm_cpu_notifier = {
.notifier_call = etm_cpu_callback,
};
static bool etm_arch_supported(uint8_t arch)
{
switch (arch) {
case PFT_ARCH_V1_1:
break;
case ETM_ARCH_V3_5:
break;
default:
return false;
}
return true;
}
static void etm_init_arch_data(void *info)
{
uint32_t etmidr;
uint32_t etmccr;
uint32_t etmcr;
struct etm_drvdata *drvdata = info;
ETM_UNLOCK(drvdata);
/*
* Vote for ETM power/clock enable. ETMPDCR is only accessible via
* memory mapped interface and so use it first to enable power/clock
* to allow subsequent cp14 accesses.
*/
etm_set_pwrup(drvdata);
/*
* Clear power down bit since when this bit is set writes to
* certain registers might be ignored.
*/
etm_clr_pwrdwn(drvdata);
etm_clr_pwrup(drvdata);
/* Set prog bit. It will be set from reset but this is included to
* ensure it is set
*/
etm_set_prog(drvdata);
/* find all capabilities */
etmidr = etm_readl(drvdata, ETMIDR);
drvdata->arch = BMVAL(etmidr, 4, 11);
etmccr = etm_readl(drvdata, ETMCCR);
drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
drvdata->nr_data_cmp = BMVAL(etmccr, 4, 7);
if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_0)) {
etmcr = etm_readl(drvdata, ETMCR);
etmcr |= (BIT(2) | BIT(3));
etm_writel(drvdata, etmcr, ETMCR);
etmcr = etm_readl(drvdata, ETMCR);
if (BVAL(etmcr, 2) || BVAL(etmcr, 3))
drvdata->data_trace_support = true;
else
drvdata->data_trace_support = false;
} else
drvdata->data_trace_support = false;
etm_set_pwrdwn(drvdata);
ETM_LOCK(drvdata);
}
static void etm_copy_arch_data(struct etm_drvdata *drvdata)
{
drvdata->arch = etmdrvdata[0]->arch;
drvdata->nr_addr_cmp = etmdrvdata[0]->nr_addr_cmp;
drvdata->nr_cntr = etmdrvdata[0]->nr_cntr;
drvdata->nr_ext_inp = etmdrvdata[0]->nr_ext_inp;
drvdata->nr_ext_out = etmdrvdata[0]->nr_ext_out;
drvdata->nr_ctxid_cmp = etmdrvdata[0]->nr_ctxid_cmp;
drvdata->nr_data_cmp = etmdrvdata[0]->nr_data_cmp;
drvdata->data_trace_support = etmdrvdata[0]->data_trace_support;
}
static void etm_init_default_data(struct etm_drvdata *drvdata)
{
int i;
drvdata->trigger_event = 0x406F;
drvdata->enable_event = 0x6F;
drvdata->enable_ctrl1 = 0x1;
drvdata->fifofull_level = 0x28;
if (drvdata->nr_addr_cmp >= 2) {
drvdata->addr_val[0] = (uint32_t) _stext;
drvdata->addr_val[1] = (uint32_t) _etext;
drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_0)) {
drvdata->addr_acctype[0] = 0x19;
drvdata->addr_acctype[1] = 0x19;
}
}
for (i = 0; i < drvdata->nr_cntr; i++) {
drvdata->cntr_event[i] = 0x406F;
drvdata->cntr_rld_event[i] = 0x406F;
}
drvdata->seq_12_event = 0x406F;
drvdata->seq_21_event = 0x406F;
drvdata->seq_23_event = 0x406F;
drvdata->seq_31_event = 0x406F;
drvdata->seq_32_event = 0x406F;
drvdata->seq_13_event = 0x406F;
/* Bits[7:0] of ETMSYNCFR are reserved on Krait pass3 onwards */
if (cpu_is_krait() && !cpu_is_krait_v1() && !cpu_is_krait_v2())
drvdata->sync_freq = 0x100;
else
drvdata->sync_freq = 0x80;
drvdata->timestamp_event = 0x406F;
/* Overrides for Krait pass1 */
if (cpu_is_krait_v1()) {
/* Krait pass1 doesn't support include filtering and non-cycle
* accurate tracing
*/
drvdata->mode = (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC);
drvdata->ctrl = 0x1000;
drvdata->enable_ctrl1 = 0x1000000;
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
drvdata->addr_val[i] = 0x0;
drvdata->addr_acctype[i] = 0x0;
drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
}
}
if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_0))
drvdata->ctrl |= BIT(11);
if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_2))
drvdata->enable_ctrl2 = 0x0;
if (drvdata->data_trace_support == true) {
drvdata->mode |= (ETM_MODE_DATA_TRACE_VAL |
ETM_MODE_DATA_TRACE_ADDR);
drvdata->ctrl |= BIT(2) | BIT(3);
drvdata->viewdata_ctrl1 = 0x0;
drvdata->viewdata_ctrl3 = 0x10000;
drvdata->viewdata_event = 0x6F;
}
for (i = 0; i < drvdata->nr_data_cmp; i++) {
drvdata->data_val[i] = 0;
drvdata->data_mask[i] = ~(0);
}
}
static int etm_probe(struct platform_device *pdev)
{
int ret;
struct device *dev = &pdev->dev;
struct coresight_platform_data *pdata;
struct etm_drvdata *drvdata;
struct resource *res;
uint32_t reg_size;
static int count;
void *baddr;
struct msm_client_dump dump;
struct coresight_desc *desc;
if (coresight_fuse_access_disabled() ||
coresight_fuse_apps_access_disabled())
return -EPERM;
if (pdev->dev.of_node) {
pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
pdev->dev.platform_data = pdata;
}
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "etm-base");
if (!res)
return -ENODEV;
reg_size = resource_size(res);
drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
if (!drvdata->base)
return -ENOMEM;
spin_lock_init(&drvdata->spinlock);
wake_lock_init(&drvdata->wake_lock, WAKE_LOCK_SUSPEND, "coresight-etm");
drvdata->clk = devm_clk_get(dev, "core_clk");
if (IS_ERR(drvdata->clk)) {
ret = PTR_ERR(drvdata->clk);
goto err0;
}
ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
if (ret)
goto err0;
ret = clk_prepare_enable(drvdata->clk);
if (ret)
goto err0;
drvdata->cpu = count++;
etmdrvdata[drvdata->cpu] = drvdata;
/*
* This is safe wrt CPU_UP_PREPARE and CPU_STARTING hotplug callbacks
* on the secondary cores that may enable the clock and perform
* etm_os_unlock since they occur before the cpu online mask is updated
* for the cpu which is checked by this smp call.
*/
if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
drvdata->os_unlock = true;
/*
* OS unlock must have happened on cpu0 so use it to populate read-only
* configuration data for ETM0. For other ETMs copy it over from ETM0.
*/
if (drvdata->cpu == 0) {
register_hotcpu_notifier(&etm_cpu_notifier);
if (smp_call_function_single(drvdata->cpu, etm_init_arch_data,
drvdata, 1))
dev_err(dev, "ETM arch init failed\n");
} else {
etm_copy_arch_data(drvdata);
}
if (etm_arch_supported(drvdata->arch) == false) {
ret = -EINVAL;
goto err1;
}
etm_init_default_data(drvdata);
clk_disable_unprepare(drvdata->clk);
if (pdev->dev.of_node)
drvdata->round_robin = of_property_read_bool(pdev->dev.of_node,
"qcom,round-robin");
baddr = devm_kzalloc(dev, PAGE_SIZE + reg_size, GFP_KERNEL);
if (baddr) {
*(uint32_t *)(baddr + ETM_REG_DUMP_VER_OFF) = ETM_REG_DUMP_VER;
dump.id = MSM_ETM0_REG + drvdata->cpu;
dump.start_addr = virt_to_phys(baddr);
dump.end_addr = dump.start_addr + PAGE_SIZE + reg_size;
ret = msm_dump_table_register(&dump);
if (ret) {
devm_kfree(dev, baddr);
dev_err(dev, "ETM REG dump setup failed/unsupported\n");
}
} else {
dev_err(dev, "ETM REG dump space allocation failed\n");
}
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc) {
ret = -ENOMEM;
goto err2;
}
desc->type = CORESIGHT_DEV_TYPE_SOURCE;
desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
desc->ops = &etm_cs_ops;
desc->pdata = pdev->dev.platform_data;
desc->dev = &pdev->dev;
desc->groups = etm_attr_grps;
desc->owner = THIS_MODULE;
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
goto err2;
}
if (pdev->dev.of_node)
drvdata->pcsave_impl = of_property_read_bool(pdev->dev.of_node,
"qcom,pc-save");
if (drvdata->pcsave_impl) {
ret = device_create_file(&drvdata->csdev->dev,
&dev_attr_pcsave);
if (ret)
dev_err(dev, "ETM pcsave dev node creation failed\n");
}
dev_info(dev, "ETM initialized\n");
if (boot_enable) {
coresight_enable(drvdata->csdev);
drvdata->boot_enable = true;
}
if (drvdata->pcsave_impl && boot_pcsave_enable) {
__etm_store_pcsave(drvdata, 1);
drvdata->pcsave_boot_enable = true;
}
return 0;
err2:
if (drvdata->cpu == 0)
unregister_hotcpu_notifier(&etm_cpu_notifier);
wake_lock_destroy(&drvdata->wake_lock);
return ret;
err1:
if (drvdata->cpu == 0)
unregister_hotcpu_notifier(&etm_cpu_notifier);
clk_disable_unprepare(drvdata->clk);
err0:
wake_lock_destroy(&drvdata->wake_lock);
return ret;
}
static int etm_remove(struct platform_device *pdev)
{
struct etm_drvdata *drvdata = platform_get_drvdata(pdev);
device_remove_file(&drvdata->csdev->dev, &dev_attr_pcsave);
coresight_unregister(drvdata->csdev);
if (drvdata->cpu == 0)
unregister_hotcpu_notifier(&etm_cpu_notifier);
wake_lock_destroy(&drvdata->wake_lock);
return 0;
}
static struct of_device_id etm_match[] = {
{.compatible = "arm,coresight-etm"},
{}
};
static struct platform_driver etm_driver = {
.probe = etm_probe,
.remove = etm_remove,
.driver = {
.name = "coresight-etm",
.owner = THIS_MODULE,
.of_match_table = etm_match,
},
};
int __init etm_init(void)
{
return platform_driver_register(&etm_driver);
}
module_init(etm_init);
void __exit etm_exit(void)
{
platform_driver_unregister(&etm_driver);
}
module_exit(etm_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
| gpl-2.0 |
aneeshv/linux-bq27xxx | drivers/pinctrl/pinmux.c | 175 | 31299 | /*
* Core driver for the pin muxing portions of the pin control subsystem
*
* Copyright (C) 2011 ST-Ericsson SA
* Written on behalf of Linaro for ST-Ericsson
* Based on bits of regulator core, gpio core and clk core
*
* Author: Linus Walleij <linus.walleij@linaro.org>
*
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) "pinmux core: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/radix-tree.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinmux.h>
#include "core.h"
/* List of pinmuxes */
static DEFINE_MUTEX(pinmux_list_mutex);
static LIST_HEAD(pinmux_list);
/* List of pinmux hogs */
static DEFINE_MUTEX(pinmux_hoglist_mutex);
static LIST_HEAD(pinmux_hoglist);
/* Global pinmux maps, we allow one set only */
static struct pinmux_map const *pinmux_maps;
static unsigned pinmux_maps_num;
/**
* struct pinmux_group - group list item for pinmux groups
* @node: pinmux group list node
* @group_selector: the group selector for this group
*/
struct pinmux_group {
struct list_head node;
unsigned group_selector;
};
/**
* struct pinmux - per-device pinmux state holder
* @node: global list node
* @dev: the device using this pinmux
* @usecount: the number of active users of this mux setting, used to keep
* track of nested use cases
* @pins: an array of discrete physical pins used in this mapping, taken
* from the global pin enumeration space (copied from pinmux map)
* @num_pins: the number of pins in this mapping array, i.e. the number of
* elements in .pins so we can iterate over that array (copied from
* pinmux map)
* @pctldev: pin control device handling this pinmux
* @func_selector: the function selector for the pinmux device handling
* this pinmux
* @groups: the group selectors for the pinmux device and
* selector combination handling this pinmux, this is a list that
* will be traversed on all pinmux operations such as
* get/put/enable/disable
* @mutex: a lock for the pinmux state holder
*/
struct pinmux {
struct list_head node;
struct device *dev;
unsigned usecount;
struct pinctrl_dev *pctldev;
unsigned func_selector;
struct list_head groups;
struct mutex mutex;
};
/**
* struct pinmux_hog - a list item to stash mux hogs
* @node: pinmux hog list node
* @map: map entry responsible for this hogging
* @pmx: the pinmux hogged by this item
*/
struct pinmux_hog {
struct list_head node;
struct pinmux_map const *map;
struct pinmux *pmx;
};
/**
* pin_request() - request a single pin to be muxed in, typically for GPIO
* @pin: the pin number in the global pin space
* @function: a functional name to give to this pin, passed to the driver
* so it knows what function to mux in, e.g. the string "gpioNN"
* means that you want to mux in the pin for use as GPIO number NN
* @gpio: if this request concerns a single GPIO pin
* @gpio_range: the range matching the GPIO pin if this is a request for a
* single GPIO pin
*/
static int pin_request(struct pinctrl_dev *pctldev,
int pin, const char *function, bool gpio,
struct pinctrl_gpio_range *gpio_range)
{
struct pin_desc *desc;
const struct pinmux_ops *ops = pctldev->desc->pmxops;
int status = -EINVAL;
dev_dbg(&pctldev->dev, "request pin %d for %s\n", pin, function);
if (!pin_is_valid(pctldev, pin)) {
dev_err(&pctldev->dev, "pin is invalid\n");
return -EINVAL;
}
if (!function) {
dev_err(&pctldev->dev, "no function name given\n");
return -EINVAL;
}
desc = pin_desc_get(pctldev, pin);
if (desc == NULL) {
dev_err(&pctldev->dev,
"pin is not registered so it cannot be requested\n");
goto out;
}
spin_lock(&desc->lock);
if (desc->mux_function) {
spin_unlock(&desc->lock);
dev_err(&pctldev->dev,
"pin already requested\n");
goto out;
}
desc->mux_function = function;
spin_unlock(&desc->lock);
/* Let each pin increase references to this module */
if (!try_module_get(pctldev->owner)) {
dev_err(&pctldev->dev,
"could not increase module refcount for pin %d\n",
pin);
status = -EINVAL;
goto out_free_pin;
}
/*
* If there is no kind of request function for the pin we just assume
* we got it by default and proceed.
*/
if (gpio && ops->gpio_request_enable)
/* This requests and enables a single GPIO pin */
status = ops->gpio_request_enable(pctldev, gpio_range, pin);
else if (ops->request)
status = ops->request(pctldev, pin);
else
status = 0;
if (status)
dev_err(&pctldev->dev, "->request on device %s failed "
"for pin %d\n",
pctldev->desc->name, pin);
out_free_pin:
if (status) {
spin_lock(&desc->lock);
desc->mux_function = NULL;
spin_unlock(&desc->lock);
}
out:
if (status)
dev_err(&pctldev->dev, "pin-%d (%s) status %d\n",
pin, function ? : "?", status);
return status;
}
/**
* pin_free() - release a single muxed in pin so something else can be muxed
* @pctldev: pin controller device handling this pin
* @pin: the pin to free
* @free_func: whether to free the pin's assigned function name string
*/
static void pin_free(struct pinctrl_dev *pctldev, int pin, int free_func)
{
const struct pinmux_ops *ops = pctldev->desc->pmxops;
struct pin_desc *desc;
desc = pin_desc_get(pctldev, pin);
if (desc == NULL) {
dev_err(&pctldev->dev,
"pin is not registered so it cannot be freed\n");
return;
}
if (ops->free)
ops->free(pctldev, pin);
spin_lock(&desc->lock);
if (free_func)
kfree(desc->mux_function);
desc->mux_function = NULL;
spin_unlock(&desc->lock);
module_put(pctldev->owner);
}
/**
* pinmux_request_gpio() - request a single pin to be muxed in as GPIO
* @gpio: the GPIO pin number from the GPIO subsystem number space
*/
int pinmux_request_gpio(unsigned gpio)
{
char gpiostr[16];
const char *function;
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range *range;
int ret;
int pin;
ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
if (ret)
return -EINVAL;
/* Convert to the pin controllers number space */
pin = gpio - range->base;
/* Conjure some name stating what chip and pin this is taken by */
snprintf(gpiostr, 15, "%s:%d", range->name, gpio);
function = kstrdup(gpiostr, GFP_KERNEL);
if (!function)
return -EINVAL;
ret = pin_request(pctldev, pin, function, true, range);
if (ret < 0)
kfree(function);
return ret;
}
EXPORT_SYMBOL_GPL(pinmux_request_gpio);
/**
* pinmux_free_gpio() - free a single pin, currently used as GPIO
* @gpio: the GPIO pin number from the GPIO subsystem number space
*/
void pinmux_free_gpio(unsigned gpio)
{
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range *range;
int ret;
int pin;
ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
if (ret)
return;
/* Convert to the pin controllers number space */
pin = gpio - range->base;
pin_free(pctldev, pin, true);
}
EXPORT_SYMBOL_GPL(pinmux_free_gpio);
/**
* pinmux_register_mappings() - register a set of pinmux mappings
* @maps: the pinmux mappings table to register
* @num_maps: the number of maps in the mapping table
*
* Only call this once during initialization of your machine, the function is
* tagged as __init and won't be callable after init has completed. The map
* passed into this function will be owned by the pinmux core and cannot be
* free:d.
*/
int __init pinmux_register_mappings(struct pinmux_map const *maps,
unsigned num_maps)
{
int i;
if (pinmux_maps != NULL) {
pr_err("pinmux mappings already registered, you can only "
"register one set of maps\n");
return -EINVAL;
}
pr_debug("add %d pinmux maps\n", num_maps);
for (i = 0; i < num_maps; i++) {
/* Sanity check the mapping */
if (!maps[i].name) {
pr_err("failed to register map %d: "
"no map name given\n", i);
return -EINVAL;
}
if (!maps[i].ctrl_dev && !maps[i].ctrl_dev_name) {
pr_err("failed to register map %s (%d): "
"no pin control device given\n",
maps[i].name, i);
return -EINVAL;
}
if (!maps[i].function) {
pr_err("failed to register map %s (%d): "
"no function ID given\n", maps[i].name, i);
return -EINVAL;
}
if (!maps[i].dev && !maps[i].dev_name)
pr_debug("add system map %s function %s with no device\n",
maps[i].name,
maps[i].function);
else
pr_debug("register map %s, function %s\n",
maps[i].name,
maps[i].function);
}
pinmux_maps = maps;
pinmux_maps_num = num_maps;
return 0;
}
/**
* acquire_pins() - acquire all the pins for a certain funcion on a pinmux
* @pctldev: the device to take the pins on
* @func_selector: the function selector to acquire the pins for
* @group_selector: the group selector containing the pins to acquire
*/
static int acquire_pins(struct pinctrl_dev *pctldev,
unsigned func_selector,
unsigned group_selector)
{
const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
const char *func = pmxops->get_function_name(pctldev,
func_selector);
const unsigned *pins;
unsigned num_pins;
int ret;
int i;
ret = pctlops->get_group_pins(pctldev, group_selector,
&pins, &num_pins);
if (ret)
return ret;
dev_dbg(&pctldev->dev, "requesting the %u pins from group %u\n",
num_pins, group_selector);
/* Try to allocate all pins in this group, one by one */
for (i = 0; i < num_pins; i++) {
ret = pin_request(pctldev, pins[i], func, false, NULL);
if (ret) {
dev_err(&pctldev->dev,
"could not get pin %d for function %s "
"on device %s - conflicting mux mappings?\n",
pins[i], func ? : "(undefined)",
pinctrl_dev_get_name(pctldev));
/* On error release all taken pins */
i--; /* this pin just failed */
for (; i >= 0; i--)
pin_free(pctldev, pins[i], false);
return -ENODEV;
}
}
return 0;
}
/**
* release_pins() - release pins taken by earlier acquirement
* @pctldev: the device to free the pinx on
* @group_selector: the group selector containing the pins to free
*/
static void release_pins(struct pinctrl_dev *pctldev,
unsigned group_selector)
{
const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
const unsigned *pins;
unsigned num_pins;
int ret;
int i;
ret = pctlops->get_group_pins(pctldev, group_selector,
&pins, &num_pins);
if (ret) {
dev_err(&pctldev->dev, "could not get pins to release for "
"group selector %d\n",
group_selector);
return;
}
for (i = 0; i < num_pins; i++)
pin_free(pctldev, pins[i], false);
}
/**
* pinmux_get_group_selector() - returns the group selector for a group
* @pctldev: the pin controller handling the group
* @pin_group: the pin group to look up
*/
static int pinmux_get_group_selector(struct pinctrl_dev *pctldev,
const char *pin_group)
{
const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
unsigned group_selector = 0;
while (pctlops->list_groups(pctldev, group_selector) >= 0) {
const char *gname = pctlops->get_group_name(pctldev,
group_selector);
if (!strcmp(gname, pin_group)) {
dev_dbg(&pctldev->dev,
"found group selector %u for %s\n",
group_selector,
pin_group);
return group_selector;
}
group_selector++;
}
dev_err(&pctldev->dev, "does not have pin group %s\n",
pin_group);
return -EINVAL;
}
/**
* pinmux_check_pin_group() - check function and pin group combo
* @pctldev: device to check the pin group vs function for
* @func_selector: the function selector to check the pin group for, we have
* already looked this up in the calling function
* @pin_group: the pin group to match to the function
*
* This function will check that the pinmux driver can supply the
* selected pin group for a certain function, returns the group selector if
* the group and function selector will work fine together, else returns
* negative
*/
static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
unsigned func_selector,
const char *pin_group)
{
const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
int ret;
/*
* If the driver does not support different pin groups for the
* functions, we only support group 0, and assume this exists.
*/
if (!pctlops || !pctlops->list_groups)
return 0;
/*
* Passing NULL (no specific group) will select the first and
* hopefully only group of pins available for this function.
*/
if (!pin_group) {
char const * const *groups;
unsigned num_groups;
ret = pmxops->get_function_groups(pctldev, func_selector,
&groups, &num_groups);
if (ret)
return ret;
if (num_groups < 1)
return -EINVAL;
ret = pinmux_get_group_selector(pctldev, groups[0]);
if (ret < 0) {
dev_err(&pctldev->dev,
"function %s wants group %s but the pin "
"controller does not seem to have that group\n",
pmxops->get_function_name(pctldev, func_selector),
groups[0]);
return ret;
}
if (num_groups > 1)
dev_dbg(&pctldev->dev,
"function %s support more than one group, "
"default-selecting first group %s (%d)\n",
pmxops->get_function_name(pctldev, func_selector),
groups[0],
ret);
return ret;
}
dev_dbg(&pctldev->dev,
"check if we have pin group %s on controller %s\n",
pin_group, pinctrl_dev_get_name(pctldev));
ret = pinmux_get_group_selector(pctldev, pin_group);
if (ret < 0) {
dev_dbg(&pctldev->dev,
"%s does not support pin group %s with function %s\n",
pinctrl_dev_get_name(pctldev),
pin_group,
pmxops->get_function_name(pctldev, func_selector));
}
return ret;
}
/**
* pinmux_search_function() - check pin control driver for a certain function
* @pctldev: device to check for function and position
* @map: function map containing the function and position to look for
* @func_selector: returns the applicable function selector if found
* @group_selector: returns the applicable group selector if found
*
* This will search the pinmux driver for an applicable
* function with a specific pin group, returns 0 if these can be mapped
* negative otherwise
*/
static int pinmux_search_function(struct pinctrl_dev *pctldev,
struct pinmux_map const *map,
unsigned *func_selector,
unsigned *group_selector)
{
const struct pinmux_ops *ops = pctldev->desc->pmxops;
unsigned selector = 0;
/* See if this pctldev has this function */
while (ops->list_functions(pctldev, selector) >= 0) {
const char *fname = ops->get_function_name(pctldev,
selector);
int ret;
if (!strcmp(map->function, fname)) {
/* Found the function, check pin group */
ret = pinmux_check_pin_group(pctldev, selector,
map->group);
if (ret < 0)
return ret;
/* This function and group selector can be used */
*func_selector = selector;
*group_selector = ret;
return 0;
}
selector++;
}
pr_err("%s does not support function %s\n",
pinctrl_dev_get_name(pctldev), map->function);
return -EINVAL;
}
/**
* pinmux_enable_muxmap() - enable a map entry for a certain pinmux
*/
static int pinmux_enable_muxmap(struct pinctrl_dev *pctldev,
struct pinmux *pmx,
struct device *dev,
const char *devname,
struct pinmux_map const *map)
{
unsigned func_selector;
unsigned group_selector;
struct pinmux_group *grp;
int ret;
/*
* Note that we're not locking the pinmux mutex here, because
* this is only called at pinmux initialization time when it
* has not been added to any list and thus is not reachable
* by anyone else.
*/
if (pmx->pctldev && pmx->pctldev != pctldev) {
dev_err(&pctldev->dev,
"different pin control devices given for device %s, "
"function %s\n",
devname,
map->function);
return -EINVAL;
}
pmx->dev = dev;
pmx->pctldev = pctldev;
/* Now go into the driver and try to match a function and group */
ret = pinmux_search_function(pctldev, map, &func_selector,
&group_selector);
if (ret < 0)
return ret;
/*
* If the function selector is already set, it needs to be identical,
* we support several groups with one function but not several
* functions with one or several groups in the same pinmux.
*/
if (pmx->func_selector != UINT_MAX &&
pmx->func_selector != func_selector) {
dev_err(&pctldev->dev,
"dual function defines in the map for device %s\n",
devname);
return -EINVAL;
}
pmx->func_selector = func_selector;
/* Now add this group selector, we may have many of them */
grp = kmalloc(sizeof(struct pinmux_group), GFP_KERNEL);
if (!grp)
return -ENOMEM;
grp->group_selector = group_selector;
ret = acquire_pins(pctldev, func_selector, group_selector);
if (ret) {
kfree(grp);
return ret;
}
list_add(&grp->node, &pmx->groups);
return 0;
}
static void pinmux_free_groups(struct pinmux *pmx)
{
struct list_head *node, *tmp;
list_for_each_safe(node, tmp, &pmx->groups) {
struct pinmux_group *grp =
list_entry(node, struct pinmux_group, node);
/* Release all pins taken by this group */
release_pins(pmx->pctldev, grp->group_selector);
list_del(node);
kfree(grp);
}
}
/**
* pinmux_get() - retrieves the pinmux for a certain device
* @dev: the device to get the pinmux for
* @name: an optional specific mux mapping name or NULL, the name is only
* needed if you want to have more than one mapping per device, or if you
* need an anonymous pinmux (not tied to any specific device)
*/
struct pinmux *pinmux_get(struct device *dev, const char *name)
{
struct pinmux_map const *map = NULL;
struct pinctrl_dev *pctldev = NULL;
const char *devname = NULL;
struct pinmux *pmx;
bool found_map;
unsigned num_maps = 0;
int ret = -ENODEV;
int i;
/* We must have dev or ID or both */
if (!dev && !name)
return ERR_PTR(-EINVAL);
if (dev)
devname = dev_name(dev);
pr_debug("get mux %s for device %s\n", name,
devname ? devname : "(none)");
/*
* create the state cookie holder struct pinmux for each
* mapping, this is what consumers will get when requesting
* a pinmux handle with pinmux_get()
*/
pmx = kzalloc(sizeof(struct pinmux), GFP_KERNEL);
if (pmx == NULL)
return ERR_PTR(-ENOMEM);
mutex_init(&pmx->mutex);
pmx->func_selector = UINT_MAX;
INIT_LIST_HEAD(&pmx->groups);
/* Iterate over the pinmux maps to locate the right ones */
for (i = 0; i < pinmux_maps_num; i++) {
map = &pinmux_maps[i];
found_map = false;
/*
* First, try to find the pctldev given in the map
*/
pctldev = get_pinctrl_dev_from_dev(map->ctrl_dev,
map->ctrl_dev_name);
if (!pctldev) {
const char *devname = NULL;
if (map->ctrl_dev)
devname = dev_name(map->ctrl_dev);
else if (map->ctrl_dev_name)
devname = map->ctrl_dev_name;
pr_warning("could not find a pinctrl device for pinmux "
"function %s, fishy, they shall all have one\n",
map->function);
pr_warning("given pinctrl device name: %s",
devname ? devname : "UNDEFINED");
/* Continue to check the other mappings anyway... */
continue;
}
pr_debug("in map, found pctldev %s to handle function %s",
dev_name(&pctldev->dev), map->function);
/*
* If we're looking for a specific named map, this must match,
* else we loop and look for the next.
*/
if (name != NULL) {
if (map->name == NULL)
continue;
if (strcmp(map->name, name))
continue;
}
/*
* This is for the case where no device name is given, we
* already know that the function name matches from above
* code.
*/
if (!map->dev_name && (name != NULL))
found_map = true;
/* If the mapping has a device set up it must match */
if (map->dev_name &&
(!devname || !strcmp(map->dev_name, devname)))
/* MATCH! */
found_map = true;
/* If this map is applicable, then apply it */
if (found_map) {
ret = pinmux_enable_muxmap(pctldev, pmx, dev,
devname, map);
if (ret) {
pinmux_free_groups(pmx);
kfree(pmx);
return ERR_PTR(ret);
}
num_maps++;
}
}
/* We should have atleast one map, right */
if (!num_maps) {
pr_err("could not find any mux maps for device %s, ID %s\n",
devname ? devname : "(anonymous)",
name ? name : "(undefined)");
kfree(pmx);
return ERR_PTR(-EINVAL);
}
pr_debug("found %u mux maps for device %s, UD %s\n",
num_maps,
devname ? devname : "(anonymous)",
name ? name : "(undefined)");
/* Add the pinmux to the global list */
mutex_lock(&pinmux_list_mutex);
list_add(&pmx->node, &pinmux_list);
mutex_unlock(&pinmux_list_mutex);
return pmx;
}
EXPORT_SYMBOL_GPL(pinmux_get);
/**
* pinmux_put() - release a previously claimed pinmux
* @pmx: a pinmux previously claimed by pinmux_get()
*/
void pinmux_put(struct pinmux *pmx)
{
if (pmx == NULL)
return;
mutex_lock(&pmx->mutex);
if (pmx->usecount)
pr_warn("releasing pinmux with active users!\n");
/* Free the groups and all acquired pins */
pinmux_free_groups(pmx);
mutex_unlock(&pmx->mutex);
/* Remove from list */
mutex_lock(&pinmux_list_mutex);
list_del(&pmx->node);
mutex_unlock(&pinmux_list_mutex);
kfree(pmx);
}
EXPORT_SYMBOL_GPL(pinmux_put);
/**
* pinmux_enable() - enable a certain pinmux setting
* @pmx: the pinmux to enable, previously claimed by pinmux_get()
*/
int pinmux_enable(struct pinmux *pmx)
{
int ret = 0;
if (pmx == NULL)
return -EINVAL;
mutex_lock(&pmx->mutex);
if (pmx->usecount++ == 0) {
struct pinctrl_dev *pctldev = pmx->pctldev;
const struct pinmux_ops *ops = pctldev->desc->pmxops;
struct pinmux_group *grp;
list_for_each_entry(grp, &pmx->groups, node) {
ret = ops->enable(pctldev, pmx->func_selector,
grp->group_selector);
if (ret) {
/*
* TODO: call disable() on all groups we called
* enable() on to this point?
*/
pmx->usecount--;
break;
}
}
}
mutex_unlock(&pmx->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(pinmux_enable);
/**
* pinmux_disable() - disable a certain pinmux setting
* @pmx: the pinmux to disable, previously claimed by pinmux_get()
*/
void pinmux_disable(struct pinmux *pmx)
{
if (pmx == NULL)
return;
mutex_lock(&pmx->mutex);
if (--pmx->usecount == 0) {
struct pinctrl_dev *pctldev = pmx->pctldev;
const struct pinmux_ops *ops = pctldev->desc->pmxops;
struct pinmux_group *grp;
list_for_each_entry(grp, &pmx->groups, node) {
ops->disable(pctldev, pmx->func_selector,
grp->group_selector);
}
}
mutex_unlock(&pmx->mutex);
}
EXPORT_SYMBOL_GPL(pinmux_disable);
int pinmux_check_ops(const struct pinmux_ops *ops)
{
/* Check that we implement required operations */
if (!ops->list_functions ||
!ops->get_function_name ||
!ops->get_function_groups ||
!ops->enable ||
!ops->disable)
return -EINVAL;
return 0;
}
/* Hog a single map entry and add to the hoglist */
static int pinmux_hog_map(struct pinctrl_dev *pctldev,
struct pinmux_map const *map)
{
struct pinmux_hog *hog;
struct pinmux *pmx;
int ret;
if (map->dev || map->dev_name) {
/*
* TODO: the day we have device tree support, we can
* traverse the device tree and hog to specific device nodes
* without any problems, so then we can hog pinmuxes for
* all devices that just want a static pin mux at this point.
*/
dev_err(&pctldev->dev, "map %s wants to hog a non-system "
"pinmux, this is not going to work\n", map->name);
return -EINVAL;
}
hog = kzalloc(sizeof(struct pinmux_hog), GFP_KERNEL);
if (!hog)
return -ENOMEM;
pmx = pinmux_get(NULL, map->name);
if (IS_ERR(pmx)) {
kfree(hog);
dev_err(&pctldev->dev,
"could not get the %s pinmux mapping for hogging\n",
map->name);
return PTR_ERR(pmx);
}
ret = pinmux_enable(pmx);
if (ret) {
pinmux_put(pmx);
kfree(hog);
dev_err(&pctldev->dev,
"could not enable the %s pinmux mapping for hogging\n",
map->name);
return ret;
}
hog->map = map;
hog->pmx = pmx;
dev_info(&pctldev->dev, "hogged map %s, function %s\n", map->name,
map->function);
mutex_lock(&pctldev->pinmux_hogs_lock);
list_add(&hog->node, &pctldev->pinmux_hogs);
mutex_unlock(&pctldev->pinmux_hogs_lock);
return 0;
}
/**
* pinmux_hog_maps() - hog specific map entries on controller device
* @pctldev: the pin control device to hog entries on
*
* When the pin controllers are registered, there may be some specific pinmux
* map entries that need to be hogged, i.e. get+enabled until the system shuts
* down.
*/
int pinmux_hog_maps(struct pinctrl_dev *pctldev)
{
struct device *dev = &pctldev->dev;
const char *devname = dev_name(dev);
int ret;
int i;
INIT_LIST_HEAD(&pctldev->pinmux_hogs);
mutex_init(&pctldev->pinmux_hogs_lock);
for (i = 0; i < pinmux_maps_num; i++) {
struct pinmux_map const *map = &pinmux_maps[i];
if (((map->ctrl_dev == dev) ||
!strcmp(map->ctrl_dev_name, devname)) &&
map->hog_on_boot) {
/* OK time to hog! */
ret = pinmux_hog_map(pctldev, map);
if (ret)
return ret;
}
}
return 0;
}
/**
* pinmux_hog_maps() - unhog specific map entries on controller device
* @pctldev: the pin control device to unhog entries on
*/
void pinmux_unhog_maps(struct pinctrl_dev *pctldev)
{
struct list_head *node, *tmp;
mutex_lock(&pctldev->pinmux_hogs_lock);
list_for_each_safe(node, tmp, &pctldev->pinmux_hogs) {
struct pinmux_hog *hog =
list_entry(node, struct pinmux_hog, node);
pinmux_disable(hog->pmx);
pinmux_put(hog->pmx);
list_del(node);
kfree(hog);
}
mutex_unlock(&pctldev->pinmux_hogs_lock);
}
#ifdef CONFIG_DEBUG_FS
/* Called from pincontrol core */
static int pinmux_functions_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
unsigned func_selector = 0;
while (pmxops->list_functions(pctldev, func_selector) >= 0) {
const char *func = pmxops->get_function_name(pctldev,
func_selector);
const char * const *groups;
unsigned num_groups;
int ret;
int i;
ret = pmxops->get_function_groups(pctldev, func_selector,
&groups, &num_groups);
if (ret)
seq_printf(s, "function %s: COULD NOT GET GROUPS\n",
func);
seq_printf(s, "function: %s, groups = [ ", func);
for (i = 0; i < num_groups; i++)
seq_printf(s, "%s ", groups[i]);
seq_puts(s, "]\n");
func_selector++;
}
return 0;
}
static int pinmux_pins_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
unsigned pin;
seq_puts(s, "Pinmux settings per pin\n");
seq_puts(s, "Format: pin (name): pinmuxfunction\n");
/* The highest pin number need to be included in the loop, thus <= */
for (pin = 0; pin <= pctldev->desc->maxpin; pin++) {
struct pin_desc *desc;
desc = pin_desc_get(pctldev, pin);
/* Pin space may be sparse */
if (desc == NULL)
continue;
seq_printf(s, "pin %d (%s): %s\n", pin,
desc->name ? desc->name : "unnamed",
desc->mux_function ? desc->mux_function
: "UNCLAIMED");
}
return 0;
}
static int pinmux_hogs_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
struct pinmux_hog *hog;
seq_puts(s, "Pinmux map hogs held by device\n");
list_for_each_entry(hog, &pctldev->pinmux_hogs, node)
seq_printf(s, "%s\n", hog->map->name);
return 0;
}
static int pinmux_show(struct seq_file *s, void *what)
{
struct pinmux *pmx;
seq_puts(s, "Requested pinmuxes and their maps:\n");
list_for_each_entry(pmx, &pinmux_list, node) {
struct pinctrl_dev *pctldev = pmx->pctldev;
const struct pinmux_ops *pmxops;
const struct pinctrl_ops *pctlops;
struct pinmux_group *grp;
if (!pctldev) {
seq_puts(s, "NO PIN CONTROLLER DEVICE\n");
continue;
}
pmxops = pctldev->desc->pmxops;
pctlops = pctldev->desc->pctlops;
seq_printf(s, "device: %s function: %s (%u),",
pinctrl_dev_get_name(pmx->pctldev),
pmxops->get_function_name(pctldev, pmx->func_selector),
pmx->func_selector);
seq_printf(s, " groups: [");
list_for_each_entry(grp, &pmx->groups, node) {
seq_printf(s, " %s (%u)",
pctlops->get_group_name(pctldev, grp->group_selector),
grp->group_selector);
}
seq_printf(s, " ]");
seq_printf(s, " users: %u map-> %s\n",
pmx->usecount,
pmx->dev ? dev_name(pmx->dev) : "(system)");
}
return 0;
}
static int pinmux_maps_show(struct seq_file *s, void *what)
{
int i;
seq_puts(s, "Pinmux maps:\n");
for (i = 0; i < pinmux_maps_num; i++) {
struct pinmux_map const *map = &pinmux_maps[i];
seq_printf(s, "%s:\n", map->name);
if (map->dev || map->dev_name)
seq_printf(s, " device: %s\n",
map->dev ? dev_name(map->dev) :
map->dev_name);
else
seq_printf(s, " SYSTEM MUX\n");
seq_printf(s, " controlling device %s\n",
map->ctrl_dev ? dev_name(map->ctrl_dev) :
map->ctrl_dev_name);
seq_printf(s, " function: %s\n", map->function);
seq_printf(s, " group: %s\n", map->group ? map->group :
"(default)");
}
return 0;
}
static int pinmux_functions_open(struct inode *inode, struct file *file)
{
return single_open(file, pinmux_functions_show, inode->i_private);
}
static int pinmux_pins_open(struct inode *inode, struct file *file)
{
return single_open(file, pinmux_pins_show, inode->i_private);
}
static int pinmux_hogs_open(struct inode *inode, struct file *file)
{
return single_open(file, pinmux_hogs_show, inode->i_private);
}
static int pinmux_open(struct inode *inode, struct file *file)
{
return single_open(file, pinmux_show, NULL);
}
static int pinmux_maps_open(struct inode *inode, struct file *file)
{
return single_open(file, pinmux_maps_show, NULL);
}
static const struct file_operations pinmux_functions_ops = {
.open = pinmux_functions_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations pinmux_pins_ops = {
.open = pinmux_pins_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations pinmux_hogs_ops = {
.open = pinmux_hogs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations pinmux_ops = {
.open = pinmux_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations pinmux_maps_ops = {
.open = pinmux_maps_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
void pinmux_init_device_debugfs(struct dentry *devroot,
struct pinctrl_dev *pctldev)
{
debugfs_create_file("pinmux-functions", S_IFREG | S_IRUGO,
devroot, pctldev, &pinmux_functions_ops);
debugfs_create_file("pinmux-pins", S_IFREG | S_IRUGO,
devroot, pctldev, &pinmux_pins_ops);
debugfs_create_file("pinmux-hogs", S_IFREG | S_IRUGO,
devroot, pctldev, &pinmux_hogs_ops);
}
void pinmux_init_debugfs(struct dentry *subsys_root)
{
debugfs_create_file("pinmuxes", S_IFREG | S_IRUGO,
subsys_root, NULL, &pinmux_ops);
debugfs_create_file("pinmux-maps", S_IFREG | S_IRUGO,
subsys_root, NULL, &pinmux_maps_ops);
}
#endif /* CONFIG_DEBUG_FS */
| gpl-2.0 |
lucaspcamargo/litmus-rt | sound/soc/codecs/wm8978.c | 431 | 31019 | /*
* wm8978.c -- WM8978 ALSA SoC Audio Codec driver
*
* Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
* Copyright (C) 2007 Carlos Munoz <carlos@kenati.com>
* Copyright 2006-2009 Wolfson Microelectronics PLC.
* Based on wm8974 and wm8990 by Liam Girdwood <lrg@slimlogic.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include <asm/div64.h>
#include "wm8978.h"
static const struct reg_default wm8978_reg_defaults[] = {
{ 1, 0x0000 },
{ 2, 0x0000 },
{ 3, 0x0000 },
{ 4, 0x0050 },
{ 5, 0x0000 },
{ 6, 0x0140 },
{ 7, 0x0000 },
{ 8, 0x0000 },
{ 9, 0x0000 },
{ 10, 0x0000 },
{ 11, 0x00ff },
{ 12, 0x00ff },
{ 13, 0x0000 },
{ 14, 0x0100 },
{ 15, 0x00ff },
{ 16, 0x00ff },
{ 17, 0x0000 },
{ 18, 0x012c },
{ 19, 0x002c },
{ 20, 0x002c },
{ 21, 0x002c },
{ 22, 0x002c },
{ 23, 0x0000 },
{ 24, 0x0032 },
{ 25, 0x0000 },
{ 26, 0x0000 },
{ 27, 0x0000 },
{ 28, 0x0000 },
{ 29, 0x0000 },
{ 30, 0x0000 },
{ 31, 0x0000 },
{ 32, 0x0038 },
{ 33, 0x000b },
{ 34, 0x0032 },
{ 35, 0x0000 },
{ 36, 0x0008 },
{ 37, 0x000c },
{ 38, 0x0093 },
{ 39, 0x00e9 },
{ 40, 0x0000 },
{ 41, 0x0000 },
{ 42, 0x0000 },
{ 43, 0x0000 },
{ 44, 0x0033 },
{ 45, 0x0010 },
{ 46, 0x0010 },
{ 47, 0x0100 },
{ 48, 0x0100 },
{ 49, 0x0002 },
{ 50, 0x0001 },
{ 51, 0x0001 },
{ 52, 0x0039 },
{ 53, 0x0039 },
{ 54, 0x0039 },
{ 55, 0x0039 },
{ 56, 0x0001 },
{ 57, 0x0001 },
};
static bool wm8978_volatile(struct device *dev, unsigned int reg)
{
return reg == WM8978_RESET;
}
/* codec private data */
struct wm8978_priv {
struct regmap *regmap;
unsigned int f_pllout;
unsigned int f_mclk;
unsigned int f_256fs;
unsigned int f_opclk;
int mclk_idx;
enum wm8978_sysclk_src sysclk;
};
static const char *wm8978_companding[] = {"Off", "NC", "u-law", "A-law"};
static const char *wm8978_eqmode[] = {"Capture", "Playback"};
static const char *wm8978_bw[] = {"Narrow", "Wide"};
static const char *wm8978_eq1[] = {"80Hz", "105Hz", "135Hz", "175Hz"};
static const char *wm8978_eq2[] = {"230Hz", "300Hz", "385Hz", "500Hz"};
static const char *wm8978_eq3[] = {"650Hz", "850Hz", "1.1kHz", "1.4kHz"};
static const char *wm8978_eq4[] = {"1.8kHz", "2.4kHz", "3.2kHz", "4.1kHz"};
static const char *wm8978_eq5[] = {"5.3kHz", "6.9kHz", "9kHz", "11.7kHz"};
static const char *wm8978_alc3[] = {"ALC", "Limiter"};
static const char *wm8978_alc1[] = {"Off", "Right", "Left", "Both"};
static SOC_ENUM_SINGLE_DECL(adc_compand, WM8978_COMPANDING_CONTROL, 1,
wm8978_companding);
static SOC_ENUM_SINGLE_DECL(dac_compand, WM8978_COMPANDING_CONTROL, 3,
wm8978_companding);
static SOC_ENUM_SINGLE_DECL(eqmode, WM8978_EQ1, 8, wm8978_eqmode);
static SOC_ENUM_SINGLE_DECL(eq1, WM8978_EQ1, 5, wm8978_eq1);
static SOC_ENUM_SINGLE_DECL(eq2bw, WM8978_EQ2, 8, wm8978_bw);
static SOC_ENUM_SINGLE_DECL(eq2, WM8978_EQ2, 5, wm8978_eq2);
static SOC_ENUM_SINGLE_DECL(eq3bw, WM8978_EQ3, 8, wm8978_bw);
static SOC_ENUM_SINGLE_DECL(eq3, WM8978_EQ3, 5, wm8978_eq3);
static SOC_ENUM_SINGLE_DECL(eq4bw, WM8978_EQ4, 8, wm8978_bw);
static SOC_ENUM_SINGLE_DECL(eq4, WM8978_EQ4, 5, wm8978_eq4);
static SOC_ENUM_SINGLE_DECL(eq5, WM8978_EQ5, 5, wm8978_eq5);
static SOC_ENUM_SINGLE_DECL(alc3, WM8978_ALC_CONTROL_3, 8, wm8978_alc3);
static SOC_ENUM_SINGLE_DECL(alc1, WM8978_ALC_CONTROL_1, 7, wm8978_alc1);
static const DECLARE_TLV_DB_SCALE(digital_tlv, -12750, 50, 1);
static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1200, 75, 0);
static const DECLARE_TLV_DB_SCALE(spk_tlv, -5700, 100, 0);
static const DECLARE_TLV_DB_SCALE(boost_tlv, -1500, 300, 1);
static const DECLARE_TLV_DB_SCALE(limiter_tlv, 0, 100, 0);
static const struct snd_kcontrol_new wm8978_snd_controls[] = {
SOC_SINGLE("Digital Loopback Switch",
WM8978_COMPANDING_CONTROL, 0, 1, 0),
SOC_ENUM("ADC Companding", adc_compand),
SOC_ENUM("DAC Companding", dac_compand),
SOC_DOUBLE("DAC Inversion Switch", WM8978_DAC_CONTROL, 0, 1, 1, 0),
SOC_DOUBLE_R_TLV("PCM Volume",
WM8978_LEFT_DAC_DIGITAL_VOLUME, WM8978_RIGHT_DAC_DIGITAL_VOLUME,
0, 255, 0, digital_tlv),
SOC_SINGLE("High Pass Filter Switch", WM8978_ADC_CONTROL, 8, 1, 0),
SOC_SINGLE("High Pass Cut Off", WM8978_ADC_CONTROL, 4, 7, 0),
SOC_DOUBLE("ADC Inversion Switch", WM8978_ADC_CONTROL, 0, 1, 1, 0),
SOC_DOUBLE_R_TLV("ADC Volume",
WM8978_LEFT_ADC_DIGITAL_VOLUME, WM8978_RIGHT_ADC_DIGITAL_VOLUME,
0, 255, 0, digital_tlv),
SOC_ENUM("Equaliser Function", eqmode),
SOC_ENUM("EQ1 Cut Off", eq1),
SOC_SINGLE_TLV("EQ1 Volume", WM8978_EQ1, 0, 24, 1, eq_tlv),
SOC_ENUM("Equaliser EQ2 Bandwidth", eq2bw),
SOC_ENUM("EQ2 Cut Off", eq2),
SOC_SINGLE_TLV("EQ2 Volume", WM8978_EQ2, 0, 24, 1, eq_tlv),
SOC_ENUM("Equaliser EQ3 Bandwidth", eq3bw),
SOC_ENUM("EQ3 Cut Off", eq3),
SOC_SINGLE_TLV("EQ3 Volume", WM8978_EQ3, 0, 24, 1, eq_tlv),
SOC_ENUM("Equaliser EQ4 Bandwidth", eq4bw),
SOC_ENUM("EQ4 Cut Off", eq4),
SOC_SINGLE_TLV("EQ4 Volume", WM8978_EQ4, 0, 24, 1, eq_tlv),
SOC_ENUM("EQ5 Cut Off", eq5),
SOC_SINGLE_TLV("EQ5 Volume", WM8978_EQ5, 0, 24, 1, eq_tlv),
SOC_SINGLE("DAC Playback Limiter Switch",
WM8978_DAC_LIMITER_1, 8, 1, 0),
SOC_SINGLE("DAC Playback Limiter Decay",
WM8978_DAC_LIMITER_1, 4, 15, 0),
SOC_SINGLE("DAC Playback Limiter Attack",
WM8978_DAC_LIMITER_1, 0, 15, 0),
SOC_SINGLE("DAC Playback Limiter Threshold",
WM8978_DAC_LIMITER_2, 4, 7, 0),
SOC_SINGLE_TLV("DAC Playback Limiter Volume",
WM8978_DAC_LIMITER_2, 0, 12, 0, limiter_tlv),
SOC_ENUM("ALC Enable Switch", alc1),
SOC_SINGLE("ALC Capture Min Gain", WM8978_ALC_CONTROL_1, 0, 7, 0),
SOC_SINGLE("ALC Capture Max Gain", WM8978_ALC_CONTROL_1, 3, 7, 0),
SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 10, 0),
SOC_SINGLE("ALC Capture Target", WM8978_ALC_CONTROL_2, 0, 15, 0),
SOC_ENUM("ALC Capture Mode", alc3),
SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 10, 0),
SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 10, 0),
SOC_SINGLE("ALC Capture Noise Gate Switch", WM8978_NOISE_GATE, 3, 1, 0),
SOC_SINGLE("ALC Capture Noise Gate Threshold",
WM8978_NOISE_GATE, 0, 7, 0),
SOC_DOUBLE_R("Capture PGA ZC Switch",
WM8978_LEFT_INP_PGA_CONTROL, WM8978_RIGHT_INP_PGA_CONTROL,
7, 1, 0),
/* OUT1 - Headphones */
SOC_DOUBLE_R("Headphone Playback ZC Switch",
WM8978_LOUT1_HP_CONTROL, WM8978_ROUT1_HP_CONTROL, 7, 1, 0),
SOC_DOUBLE_R_TLV("Headphone Playback Volume",
WM8978_LOUT1_HP_CONTROL, WM8978_ROUT1_HP_CONTROL,
0, 63, 0, spk_tlv),
/* OUT2 - Speakers */
SOC_DOUBLE_R("Speaker Playback ZC Switch",
WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL, 7, 1, 0),
SOC_DOUBLE_R_TLV("Speaker Playback Volume",
WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL,
0, 63, 0, spk_tlv),
/* OUT3/4 - Line Output */
SOC_DOUBLE_R("Line Playback Switch",
WM8978_OUT3_MIXER_CONTROL, WM8978_OUT4_MIXER_CONTROL, 6, 1, 1),
/* Mixer #3: Boost (Input) mixer */
SOC_DOUBLE_R("PGA Boost (+20dB)",
WM8978_LEFT_ADC_BOOST_CONTROL, WM8978_RIGHT_ADC_BOOST_CONTROL,
8, 1, 0),
SOC_DOUBLE_R_TLV("L2/R2 Boost Volume",
WM8978_LEFT_ADC_BOOST_CONTROL, WM8978_RIGHT_ADC_BOOST_CONTROL,
4, 7, 0, boost_tlv),
SOC_DOUBLE_R_TLV("Aux Boost Volume",
WM8978_LEFT_ADC_BOOST_CONTROL, WM8978_RIGHT_ADC_BOOST_CONTROL,
0, 7, 0, boost_tlv),
/* Input PGA volume */
SOC_DOUBLE_R_TLV("Input PGA Volume",
WM8978_LEFT_INP_PGA_CONTROL, WM8978_RIGHT_INP_PGA_CONTROL,
0, 63, 0, inpga_tlv),
/* Headphone */
SOC_DOUBLE_R("Headphone Switch",
WM8978_LOUT1_HP_CONTROL, WM8978_ROUT1_HP_CONTROL, 6, 1, 1),
/* Speaker */
SOC_DOUBLE_R("Speaker Switch",
WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL, 6, 1, 1),
/* DAC / ADC oversampling */
SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL,
5, 1, 0),
SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL,
5, 1, 0),
};
/* Mixer #1: Output (OUT1, OUT2) Mixer: mix AUX, Input mixer output and DAC */
static const struct snd_kcontrol_new wm8978_left_out_mixer[] = {
SOC_DAPM_SINGLE("Line Bypass Switch", WM8978_LEFT_MIXER_CONTROL, 1, 1, 0),
SOC_DAPM_SINGLE("Aux Playback Switch", WM8978_LEFT_MIXER_CONTROL, 5, 1, 0),
SOC_DAPM_SINGLE("PCM Playback Switch", WM8978_LEFT_MIXER_CONTROL, 0, 1, 0),
};
static const struct snd_kcontrol_new wm8978_right_out_mixer[] = {
SOC_DAPM_SINGLE("Line Bypass Switch", WM8978_RIGHT_MIXER_CONTROL, 1, 1, 0),
SOC_DAPM_SINGLE("Aux Playback Switch", WM8978_RIGHT_MIXER_CONTROL, 5, 1, 0),
SOC_DAPM_SINGLE("PCM Playback Switch", WM8978_RIGHT_MIXER_CONTROL, 0, 1, 0),
};
/* OUT3/OUT4 Mixer not implemented */
/* Mixer #2: Input PGA Mute */
static const struct snd_kcontrol_new wm8978_left_input_mixer[] = {
SOC_DAPM_SINGLE("L2 Switch", WM8978_INPUT_CONTROL, 2, 1, 0),
SOC_DAPM_SINGLE("MicN Switch", WM8978_INPUT_CONTROL, 1, 1, 0),
SOC_DAPM_SINGLE("MicP Switch", WM8978_INPUT_CONTROL, 0, 1, 0),
};
static const struct snd_kcontrol_new wm8978_right_input_mixer[] = {
SOC_DAPM_SINGLE("R2 Switch", WM8978_INPUT_CONTROL, 6, 1, 0),
SOC_DAPM_SINGLE("MicN Switch", WM8978_INPUT_CONTROL, 5, 1, 0),
SOC_DAPM_SINGLE("MicP Switch", WM8978_INPUT_CONTROL, 4, 1, 0),
};
static const struct snd_soc_dapm_widget wm8978_dapm_widgets[] = {
SND_SOC_DAPM_DAC("Left DAC", "Left HiFi Playback",
WM8978_POWER_MANAGEMENT_3, 0, 0),
SND_SOC_DAPM_DAC("Right DAC", "Right HiFi Playback",
WM8978_POWER_MANAGEMENT_3, 1, 0),
SND_SOC_DAPM_ADC("Left ADC", "Left HiFi Capture",
WM8978_POWER_MANAGEMENT_2, 0, 0),
SND_SOC_DAPM_ADC("Right ADC", "Right HiFi Capture",
WM8978_POWER_MANAGEMENT_2, 1, 0),
/* Mixer #1: OUT1,2 */
SOC_MIXER_ARRAY("Left Output Mixer", WM8978_POWER_MANAGEMENT_3,
2, 0, wm8978_left_out_mixer),
SOC_MIXER_ARRAY("Right Output Mixer", WM8978_POWER_MANAGEMENT_3,
3, 0, wm8978_right_out_mixer),
SOC_MIXER_ARRAY("Left Input Mixer", WM8978_POWER_MANAGEMENT_2,
2, 0, wm8978_left_input_mixer),
SOC_MIXER_ARRAY("Right Input Mixer", WM8978_POWER_MANAGEMENT_2,
3, 0, wm8978_right_input_mixer),
SND_SOC_DAPM_PGA("Left Boost Mixer", WM8978_POWER_MANAGEMENT_2,
4, 0, NULL, 0),
SND_SOC_DAPM_PGA("Right Boost Mixer", WM8978_POWER_MANAGEMENT_2,
5, 0, NULL, 0),
SND_SOC_DAPM_PGA("Left Capture PGA", WM8978_LEFT_INP_PGA_CONTROL,
6, 1, NULL, 0),
SND_SOC_DAPM_PGA("Right Capture PGA", WM8978_RIGHT_INP_PGA_CONTROL,
6, 1, NULL, 0),
SND_SOC_DAPM_PGA("Left Headphone Out", WM8978_POWER_MANAGEMENT_2,
7, 0, NULL, 0),
SND_SOC_DAPM_PGA("Right Headphone Out", WM8978_POWER_MANAGEMENT_2,
8, 0, NULL, 0),
SND_SOC_DAPM_PGA("Left Speaker Out", WM8978_POWER_MANAGEMENT_3,
6, 0, NULL, 0),
SND_SOC_DAPM_PGA("Right Speaker Out", WM8978_POWER_MANAGEMENT_3,
5, 0, NULL, 0),
SND_SOC_DAPM_MIXER("OUT4 VMID", WM8978_POWER_MANAGEMENT_3,
8, 0, NULL, 0),
SND_SOC_DAPM_MICBIAS("Mic Bias", WM8978_POWER_MANAGEMENT_1, 4, 0),
SND_SOC_DAPM_INPUT("LMICN"),
SND_SOC_DAPM_INPUT("LMICP"),
SND_SOC_DAPM_INPUT("RMICN"),
SND_SOC_DAPM_INPUT("RMICP"),
SND_SOC_DAPM_INPUT("LAUX"),
SND_SOC_DAPM_INPUT("RAUX"),
SND_SOC_DAPM_INPUT("L2"),
SND_SOC_DAPM_INPUT("R2"),
SND_SOC_DAPM_OUTPUT("LHP"),
SND_SOC_DAPM_OUTPUT("RHP"),
SND_SOC_DAPM_OUTPUT("LSPK"),
SND_SOC_DAPM_OUTPUT("RSPK"),
};
static const struct snd_soc_dapm_route wm8978_dapm_routes[] = {
/* Output mixer */
{"Right Output Mixer", "PCM Playback Switch", "Right DAC"},
{"Right Output Mixer", "Aux Playback Switch", "RAUX"},
{"Right Output Mixer", "Line Bypass Switch", "Right Boost Mixer"},
{"Left Output Mixer", "PCM Playback Switch", "Left DAC"},
{"Left Output Mixer", "Aux Playback Switch", "LAUX"},
{"Left Output Mixer", "Line Bypass Switch", "Left Boost Mixer"},
/* Outputs */
{"Right Headphone Out", NULL, "Right Output Mixer"},
{"RHP", NULL, "Right Headphone Out"},
{"Left Headphone Out", NULL, "Left Output Mixer"},
{"LHP", NULL, "Left Headphone Out"},
{"Right Speaker Out", NULL, "Right Output Mixer"},
{"RSPK", NULL, "Right Speaker Out"},
{"Left Speaker Out", NULL, "Left Output Mixer"},
{"LSPK", NULL, "Left Speaker Out"},
/* Boost Mixer */
{"Right ADC", NULL, "Right Boost Mixer"},
{"Right Boost Mixer", NULL, "RAUX"},
{"Right Boost Mixer", NULL, "Right Capture PGA"},
{"Right Boost Mixer", NULL, "R2"},
{"Left ADC", NULL, "Left Boost Mixer"},
{"Left Boost Mixer", NULL, "LAUX"},
{"Left Boost Mixer", NULL, "Left Capture PGA"},
{"Left Boost Mixer", NULL, "L2"},
/* Input PGA */
{"Right Capture PGA", NULL, "Right Input Mixer"},
{"Left Capture PGA", NULL, "Left Input Mixer"},
{"Right Input Mixer", "R2 Switch", "R2"},
{"Right Input Mixer", "MicN Switch", "RMICN"},
{"Right Input Mixer", "MicP Switch", "RMICP"},
{"Left Input Mixer", "L2 Switch", "L2"},
{"Left Input Mixer", "MicN Switch", "LMICN"},
{"Left Input Mixer", "MicP Switch", "LMICP"},
};
/* PLL divisors */
struct wm8978_pll_div {
u32 k;
u8 n;
u8 div2;
};
#define FIXED_PLL_SIZE (1 << 24)
static void pll_factors(struct snd_soc_codec *codec,
struct wm8978_pll_div *pll_div, unsigned int target, unsigned int source)
{
u64 k_part;
unsigned int k, n_div, n_mod;
n_div = target / source;
if (n_div < 6) {
source >>= 1;
pll_div->div2 = 1;
n_div = target / source;
} else {
pll_div->div2 = 0;
}
if (n_div < 6 || n_div > 12)
dev_warn(codec->dev,
"WM8978 N value exceeds recommended range! N = %u\n",
n_div);
pll_div->n = n_div;
n_mod = target - source * n_div;
k_part = FIXED_PLL_SIZE * (long long)n_mod + source / 2;
do_div(k_part, source);
k = k_part & 0xFFFFFFFF;
pll_div->k = k;
}
/* MCLK dividers */
static const int mclk_numerator[] = {1, 3, 2, 3, 4, 6, 8, 12};
static const int mclk_denominator[] = {1, 2, 1, 1, 1, 1, 1, 1};
/*
* find index >= idx, such that, for a given f_out,
* 3 * f_mclk / 4 <= f_PLLOUT < 13 * f_mclk / 4
* f_out can be f_256fs or f_opclk, currently only used for f_256fs. Can be
* generalised for f_opclk with suitable coefficient arrays, but currently
* the OPCLK divisor is calculated directly, not iteratively.
*/
static int wm8978_enum_mclk(unsigned int f_out, unsigned int f_mclk,
unsigned int *f_pllout)
{
int i;
for (i = 0; i < ARRAY_SIZE(mclk_numerator); i++) {
unsigned int f_pllout_x4 = 4 * f_out * mclk_numerator[i] /
mclk_denominator[i];
if (3 * f_mclk <= f_pllout_x4 && f_pllout_x4 < 13 * f_mclk) {
*f_pllout = f_pllout_x4 / 4;
return i;
}
}
return -EINVAL;
}
/*
* Calculate internal frequencies and dividers, according to Figure 40
* "PLL and Clock Select Circuit" in WM8978 datasheet Rev. 2.6
*/
static int wm8978_configure_pll(struct snd_soc_codec *codec)
{
struct wm8978_priv *wm8978 = snd_soc_codec_get_drvdata(codec);
struct wm8978_pll_div pll_div;
unsigned int f_opclk = wm8978->f_opclk, f_mclk = wm8978->f_mclk,
f_256fs = wm8978->f_256fs;
unsigned int f2;
if (!f_mclk)
return -EINVAL;
if (f_opclk) {
unsigned int opclk_div;
/* Cannot set up MCLK divider now, do later */
wm8978->mclk_idx = -1;
/*
* The user needs OPCLK. Choose OPCLKDIV to put
* 6 <= R = f2 / f1 < 13, 1 <= OPCLKDIV <= 4.
* f_opclk = f_mclk * prescale * R / 4 / OPCLKDIV, where
* prescale = 1, or prescale = 2. Prescale is calculated inside
* pll_factors(). We have to select f_PLLOUT, such that
* f_mclk * 3 / 4 <= f_PLLOUT < f_mclk * 13 / 4. Must be
* f_mclk * 3 / 16 <= f_opclk < f_mclk * 13 / 4.
*/
if (16 * f_opclk < 3 * f_mclk || 4 * f_opclk >= 13 * f_mclk)
return -EINVAL;
if (4 * f_opclk < 3 * f_mclk)
/* Have to use OPCLKDIV */
opclk_div = (3 * f_mclk / 4 + f_opclk - 1) / f_opclk;
else
opclk_div = 1;
dev_dbg(codec->dev, "%s: OPCLKDIV=%d\n", __func__, opclk_div);
snd_soc_update_bits(codec, WM8978_GPIO_CONTROL, 0x30,
(opclk_div - 1) << 4);
wm8978->f_pllout = f_opclk * opclk_div;
} else if (f_256fs) {
/*
* Not using OPCLK, but PLL is used for the codec, choose R:
* 6 <= R = f2 / f1 < 13, to put 1 <= MCLKDIV <= 12.
* f_256fs = f_mclk * prescale * R / 4 / MCLKDIV, where
* prescale = 1, or prescale = 2. Prescale is calculated inside
* pll_factors(). We have to select f_PLLOUT, such that
* f_mclk * 3 / 4 <= f_PLLOUT < f_mclk * 13 / 4. Must be
* f_mclk * 3 / 48 <= f_256fs < f_mclk * 13 / 4. This means MCLK
* must be 3.781MHz <= f_MCLK <= 32.768MHz
*/
int idx = wm8978_enum_mclk(f_256fs, f_mclk, &wm8978->f_pllout);
if (idx < 0)
return idx;
wm8978->mclk_idx = idx;
} else {
return -EINVAL;
}
f2 = wm8978->f_pllout * 4;
dev_dbg(codec->dev, "%s: f_MCLK=%uHz, f_PLLOUT=%uHz\n", __func__,
wm8978->f_mclk, wm8978->f_pllout);
pll_factors(codec, &pll_div, f2, wm8978->f_mclk);
dev_dbg(codec->dev, "%s: calculated PLL N=0x%x, K=0x%x, div2=%d\n",
__func__, pll_div.n, pll_div.k, pll_div.div2);
/* Turn PLL off for configuration... */
snd_soc_update_bits(codec, WM8978_POWER_MANAGEMENT_1, 0x20, 0);
snd_soc_write(codec, WM8978_PLL_N, (pll_div.div2 << 4) | pll_div.n);
snd_soc_write(codec, WM8978_PLL_K1, pll_div.k >> 18);
snd_soc_write(codec, WM8978_PLL_K2, (pll_div.k >> 9) & 0x1ff);
snd_soc_write(codec, WM8978_PLL_K3, pll_div.k & 0x1ff);
/* ...and on again */
snd_soc_update_bits(codec, WM8978_POWER_MANAGEMENT_1, 0x20, 0x20);
if (f_opclk)
/* Output PLL (OPCLK) to GPIO1 */
snd_soc_update_bits(codec, WM8978_GPIO_CONTROL, 7, 4);
return 0;
}
/*
* Configure WM8978 clock dividers.
*/
static int wm8978_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
int div_id, int div)
{
struct snd_soc_codec *codec = codec_dai->codec;
struct wm8978_priv *wm8978 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
switch (div_id) {
case WM8978_OPCLKRATE:
wm8978->f_opclk = div;
if (wm8978->f_mclk)
/*
* We know the MCLK frequency, the user has requested
* OPCLK, configure the PLL based on that and start it
* and OPCLK immediately. We will configure PLL to match
* user-requested OPCLK frquency as good as possible.
* In fact, it is likely, that matching the sampling
* rate, when it becomes known, is more important, and
* we will not be reconfiguring PLL then, because we
* must not interrupt OPCLK. But it should be fine,
* because typically the user will request OPCLK to run
* at 256fs or 512fs, and for these cases we will also
* find an exact MCLK divider configuration - it will
* be equal to or double the OPCLK divisor.
*/
ret = wm8978_configure_pll(codec);
break;
case WM8978_BCLKDIV:
if (div & ~0x1c)
return -EINVAL;
snd_soc_update_bits(codec, WM8978_CLOCKING, 0x1c, div);
break;
default:
return -EINVAL;
}
dev_dbg(codec->dev, "%s: ID %d, value %u\n", __func__, div_id, div);
return ret;
}
/*
* @freq: when .set_pll() us not used, freq is codec MCLK input frequency
*/
static int wm8978_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id,
unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
struct wm8978_priv *wm8978 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
dev_dbg(codec->dev, "%s: ID %d, freq %u\n", __func__, clk_id, freq);
if (freq) {
wm8978->f_mclk = freq;
/* Even if MCLK is used for system clock, might have to drive OPCLK */
if (wm8978->f_opclk)
ret = wm8978_configure_pll(codec);
/* Our sysclk is fixed to 256 * fs, will configure in .hw_params() */
if (!ret)
wm8978->sysclk = clk_id;
}
if (wm8978->sysclk == WM8978_PLL && (!freq || clk_id == WM8978_MCLK)) {
/* Clock CODEC directly from MCLK */
snd_soc_update_bits(codec, WM8978_CLOCKING, 0x100, 0);
/* GPIO1 into default mode as input - before configuring PLL */
snd_soc_update_bits(codec, WM8978_GPIO_CONTROL, 7, 0);
/* Turn off PLL */
snd_soc_update_bits(codec, WM8978_POWER_MANAGEMENT_1, 0x20, 0);
wm8978->sysclk = WM8978_MCLK;
wm8978->f_pllout = 0;
wm8978->f_opclk = 0;
}
return ret;
}
/*
* Set ADC and Voice DAC format.
*/
static int wm8978_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
/*
* BCLK polarity mask = 0x100, LRC clock polarity mask = 0x80,
* Data Format mask = 0x18: all will be calculated anew
*/
u16 iface = snd_soc_read(codec, WM8978_AUDIO_INTERFACE) & ~0x198;
u16 clk = snd_soc_read(codec, WM8978_CLOCKING);
dev_dbg(codec->dev, "%s\n", __func__);
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
clk |= 1;
break;
case SND_SOC_DAIFMT_CBS_CFS:
clk &= ~1;
break;
default:
return -EINVAL;
}
/* interface format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
iface |= 0x10;
break;
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
iface |= 0x8;
break;
case SND_SOC_DAIFMT_DSP_A:
iface |= 0x18;
break;
default:
return -EINVAL;
}
/* clock inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
iface |= 0x180;
break;
case SND_SOC_DAIFMT_IB_NF:
iface |= 0x100;
break;
case SND_SOC_DAIFMT_NB_IF:
iface |= 0x80;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, WM8978_AUDIO_INTERFACE, iface);
snd_soc_write(codec, WM8978_CLOCKING, clk);
return 0;
}
/*
* Set PCM DAI bit size and sample rate.
*/
static int wm8978_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct wm8978_priv *wm8978 = snd_soc_codec_get_drvdata(codec);
/* Word length mask = 0x60 */
u16 iface_ctl = snd_soc_read(codec, WM8978_AUDIO_INTERFACE) & ~0x60;
/* Sampling rate mask = 0xe (for filters) */
u16 add_ctl = snd_soc_read(codec, WM8978_ADDITIONAL_CONTROL) & ~0xe;
u16 clking = snd_soc_read(codec, WM8978_CLOCKING);
enum wm8978_sysclk_src current_clk_id = clking & 0x100 ?
WM8978_PLL : WM8978_MCLK;
unsigned int f_sel, diff, diff_best = INT_MAX;
int i, best = 0;
if (!wm8978->f_mclk)
return -EINVAL;
/* bit size */
switch (params_width(params)) {
case 16:
break;
case 20:
iface_ctl |= 0x20;
break;
case 24:
iface_ctl |= 0x40;
break;
case 32:
iface_ctl |= 0x60;
break;
}
/* filter coefficient */
switch (params_rate(params)) {
case 8000:
add_ctl |= 0x5 << 1;
break;
case 11025:
add_ctl |= 0x4 << 1;
break;
case 16000:
add_ctl |= 0x3 << 1;
break;
case 22050:
add_ctl |= 0x2 << 1;
break;
case 32000:
add_ctl |= 0x1 << 1;
break;
case 44100:
case 48000:
break;
}
/* Sampling rate is known now, can configure the MCLK divider */
wm8978->f_256fs = params_rate(params) * 256;
if (wm8978->sysclk == WM8978_MCLK) {
wm8978->mclk_idx = -1;
f_sel = wm8978->f_mclk;
} else {
if (!wm8978->f_opclk) {
/* We only enter here, if OPCLK is not used */
int ret = wm8978_configure_pll(codec);
if (ret < 0)
return ret;
}
f_sel = wm8978->f_pllout;
}
if (wm8978->mclk_idx < 0) {
/* Either MCLK is used directly, or OPCLK is used */
if (f_sel < wm8978->f_256fs || f_sel > 12 * wm8978->f_256fs)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(mclk_numerator); i++) {
diff = abs(wm8978->f_256fs * 3 -
f_sel * 3 * mclk_denominator[i] / mclk_numerator[i]);
if (diff < diff_best) {
diff_best = diff;
best = i;
}
if (!diff)
break;
}
} else {
/* OPCLK not used, codec driven by PLL */
best = wm8978->mclk_idx;
diff = 0;
}
if (diff)
dev_warn(codec->dev, "Imprecise sampling rate: %uHz%s\n",
f_sel * mclk_denominator[best] / mclk_numerator[best] / 256,
wm8978->sysclk == WM8978_MCLK ?
", consider using PLL" : "");
dev_dbg(codec->dev, "%s: width %d, rate %u, MCLK divisor #%d\n", __func__,
params_width(params), params_rate(params), best);
/* MCLK divisor mask = 0xe0 */
snd_soc_update_bits(codec, WM8978_CLOCKING, 0xe0, best << 5);
snd_soc_write(codec, WM8978_AUDIO_INTERFACE, iface_ctl);
snd_soc_write(codec, WM8978_ADDITIONAL_CONTROL, add_ctl);
if (wm8978->sysclk != current_clk_id) {
if (wm8978->sysclk == WM8978_PLL)
/* Run CODEC from PLL instead of MCLK */
snd_soc_update_bits(codec, WM8978_CLOCKING,
0x100, 0x100);
else
/* Clock CODEC directly from MCLK */
snd_soc_update_bits(codec, WM8978_CLOCKING, 0x100, 0);
}
return 0;
}
static int wm8978_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
dev_dbg(codec->dev, "%s: %d\n", __func__, mute);
if (mute)
snd_soc_update_bits(codec, WM8978_DAC_CONTROL, 0x40, 0x40);
else
snd_soc_update_bits(codec, WM8978_DAC_CONTROL, 0x40, 0);
return 0;
}
static int wm8978_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
u16 power1 = snd_soc_read(codec, WM8978_POWER_MANAGEMENT_1) & ~3;
switch (level) {
case SND_SOC_BIAS_ON:
case SND_SOC_BIAS_PREPARE:
power1 |= 1; /* VMID 75k */
snd_soc_write(codec, WM8978_POWER_MANAGEMENT_1, power1);
break;
case SND_SOC_BIAS_STANDBY:
/* bit 3: enable bias, bit 2: enable I/O tie off buffer */
power1 |= 0xc;
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
/* Initial cap charge at VMID 5k */
snd_soc_write(codec, WM8978_POWER_MANAGEMENT_1,
power1 | 0x3);
mdelay(100);
}
power1 |= 0x2; /* VMID 500k */
snd_soc_write(codec, WM8978_POWER_MANAGEMENT_1, power1);
break;
case SND_SOC_BIAS_OFF:
/* Preserve PLL - OPCLK may be used by someone */
snd_soc_update_bits(codec, WM8978_POWER_MANAGEMENT_1, ~0x20, 0);
snd_soc_write(codec, WM8978_POWER_MANAGEMENT_2, 0);
snd_soc_write(codec, WM8978_POWER_MANAGEMENT_3, 0);
break;
}
dev_dbg(codec->dev, "%s: %d, %x\n", __func__, level, power1);
codec->dapm.bias_level = level;
return 0;
}
#define WM8978_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops wm8978_dai_ops = {
.hw_params = wm8978_hw_params,
.digital_mute = wm8978_mute,
.set_fmt = wm8978_set_dai_fmt,
.set_clkdiv = wm8978_set_dai_clkdiv,
.set_sysclk = wm8978_set_dai_sysclk,
};
/* Also supports 12kHz */
static struct snd_soc_dai_driver wm8978_dai = {
.name = "wm8978-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = WM8978_FORMATS,
},
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = WM8978_FORMATS,
},
.ops = &wm8978_dai_ops,
.symmetric_rates = 1,
};
static int wm8978_suspend(struct snd_soc_codec *codec)
{
struct wm8978_priv *wm8978 = snd_soc_codec_get_drvdata(codec);
wm8978_set_bias_level(codec, SND_SOC_BIAS_OFF);
/* Also switch PLL off */
snd_soc_write(codec, WM8978_POWER_MANAGEMENT_1, 0);
regcache_mark_dirty(wm8978->regmap);
return 0;
}
static int wm8978_resume(struct snd_soc_codec *codec)
{
struct wm8978_priv *wm8978 = snd_soc_codec_get_drvdata(codec);
/* Sync reg_cache with the hardware */
regcache_sync(wm8978->regmap);
wm8978_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
if (wm8978->f_pllout)
/* Switch PLL on */
snd_soc_update_bits(codec, WM8978_POWER_MANAGEMENT_1, 0x20, 0x20);
return 0;
}
/*
* These registers contain an "update" bit - bit 8. This means, for example,
* that one can write new DAC digital volume for both channels, but only when
* the update bit is set, will also the volume be updated - simultaneously for
* both channels.
*/
static const int update_reg[] = {
WM8978_LEFT_DAC_DIGITAL_VOLUME,
WM8978_RIGHT_DAC_DIGITAL_VOLUME,
WM8978_LEFT_ADC_DIGITAL_VOLUME,
WM8978_RIGHT_ADC_DIGITAL_VOLUME,
WM8978_LEFT_INP_PGA_CONTROL,
WM8978_RIGHT_INP_PGA_CONTROL,
WM8978_LOUT1_HP_CONTROL,
WM8978_ROUT1_HP_CONTROL,
WM8978_LOUT2_SPK_CONTROL,
WM8978_ROUT2_SPK_CONTROL,
};
static int wm8978_probe(struct snd_soc_codec *codec)
{
struct wm8978_priv *wm8978 = snd_soc_codec_get_drvdata(codec);
int i;
/*
* Set default system clock to PLL, it is more precise, this is also the
* default hardware setting
*/
wm8978->sysclk = WM8978_PLL;
/*
* Set the update bit in all registers, that have one. This way all
* writes to those registers will also cause the update bit to be
* written.
*/
for (i = 0; i < ARRAY_SIZE(update_reg); i++)
snd_soc_update_bits(codec, update_reg[i], 0x100, 0x100);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8978 = {
.probe = wm8978_probe,
.suspend = wm8978_suspend,
.resume = wm8978_resume,
.set_bias_level = wm8978_set_bias_level,
.controls = wm8978_snd_controls,
.num_controls = ARRAY_SIZE(wm8978_snd_controls),
.dapm_widgets = wm8978_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8978_dapm_widgets),
.dapm_routes = wm8978_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(wm8978_dapm_routes),
};
static const struct regmap_config wm8978_regmap_config = {
.reg_bits = 7,
.val_bits = 9,
.max_register = WM8978_MAX_REGISTER,
.volatile_reg = wm8978_volatile,
.cache_type = REGCACHE_RBTREE,
.reg_defaults = wm8978_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8978_reg_defaults),
};
static int wm8978_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8978_priv *wm8978;
int ret;
wm8978 = devm_kzalloc(&i2c->dev, sizeof(struct wm8978_priv),
GFP_KERNEL);
if (wm8978 == NULL)
return -ENOMEM;
wm8978->regmap = devm_regmap_init_i2c(i2c, &wm8978_regmap_config);
if (IS_ERR(wm8978->regmap)) {
ret = PTR_ERR(wm8978->regmap);
dev_err(&i2c->dev, "Failed to allocate regmap: %d\n", ret);
return ret;
}
i2c_set_clientdata(i2c, wm8978);
/* Reset the codec */
ret = regmap_write(wm8978->regmap, WM8978_RESET, 0);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to issue reset: %d\n", ret);
return ret;
}
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8978, &wm8978_dai, 1);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
return ret;
}
return 0;
}
static int wm8978_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
}
static const struct i2c_device_id wm8978_i2c_id[] = {
{ "wm8978", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8978_i2c_id);
static struct i2c_driver wm8978_i2c_driver = {
.driver = {
.name = "wm8978",
.owner = THIS_MODULE,
},
.probe = wm8978_i2c_probe,
.remove = wm8978_i2c_remove,
.id_table = wm8978_i2c_id,
};
module_i2c_driver(wm8978_i2c_driver);
MODULE_DESCRIPTION("ASoC WM8978 codec driver");
MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
SM-G920P/G920PVPU3BOI1 | drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 1455 | 84032 | /*
* Copyright (C) 2003 - 2009 NetXen, Inc.
* Copyright (C) 2009 - QLogic Corporation.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston,
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
*
*/
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include "netxen_nic_hw.h"
#include "netxen_nic.h"
#include <linux/dma-mapping.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/sysfs.h>
#include <linux/aer.h>
MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
char netxen_nic_driver_name[] = "netxen_nic";
static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v"
NETXEN_NIC_LINUX_VERSIONID;
static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
/* Default to restricted 1G auto-neg mode */
static int wol_port_mode = 5;
static int use_msi = 1;
static int use_msi_x = 1;
static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
module_param(auto_fw_reset, int, 0644);
MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled");
static int netxen_nic_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
static void netxen_nic_remove(struct pci_dev *pdev);
static int netxen_nic_open(struct net_device *netdev);
static int netxen_nic_close(struct net_device *netdev);
static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *,
struct net_device *);
static void netxen_tx_timeout(struct net_device *netdev);
static void netxen_tx_timeout_task(struct work_struct *work);
static void netxen_fw_poll_work(struct work_struct *work);
static void netxen_schedule_work(struct netxen_adapter *adapter,
work_func_t func, int delay);
static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
static int netxen_nic_poll(struct napi_struct *napi, int budget);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netxen_nic_poll_controller(struct net_device *netdev);
#endif
static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
static void netxen_create_diag_entries(struct netxen_adapter *adapter);
static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
static int nx_dev_request_aer(struct netxen_adapter *adapter);
static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
static int netxen_can_start_firmware(struct netxen_adapter *adapter);
static irqreturn_t netxen_intr(int irq, void *data);
static irqreturn_t netxen_msi_intr(int irq, void *data);
static irqreturn_t netxen_msix_intr(int irq, void *data);
static void netxen_free_ip_list(struct netxen_adapter *, bool);
static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *stats);
static int netxen_nic_set_mac(struct net_device *netdev, void *p);
/* PCI Device ID Table */
#define ENTRY(device) \
{PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
ENTRY(PCI_DEVICE_ID_NX3031),
{0,}
};
MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
static uint32_t crb_cmd_producer[4] = {
CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
};
void
netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
struct nx_host_tx_ring *tx_ring)
{
NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
}
static uint32_t crb_cmd_consumer[4] = {
CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
};
static inline void
netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
struct nx_host_tx_ring *tx_ring)
{
NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer);
}
static uint32_t msi_tgt_status[8] = {
ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
};
static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring)
{
struct netxen_adapter *adapter = sds_ring->adapter;
NXWRIO(adapter, sds_ring->crb_intr_mask, 0);
}
static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring)
{
struct netxen_adapter *adapter = sds_ring->adapter;
NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1);
if (!NETXEN_IS_MSI_FAMILY(adapter))
NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff);
}
static int
netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
{
int size = sizeof(struct nx_host_sds_ring) * count;
recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
return recv_ctx->sds_rings == NULL;
}
static void
netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
{
if (recv_ctx->sds_rings != NULL)
kfree(recv_ctx->sds_rings);
recv_ctx->sds_rings = NULL;
}
static int
netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
{
int ring;
struct nx_host_sds_ring *sds_ring;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
return -ENOMEM;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_add(netdev, &sds_ring->napi,
netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
}
return 0;
}
static void
netxen_napi_del(struct netxen_adapter *adapter)
{
int ring;
struct nx_host_sds_ring *sds_ring;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_del(&sds_ring->napi);
}
netxen_free_sds_rings(&adapter->recv_ctx);
}
static void
netxen_napi_enable(struct netxen_adapter *adapter)
{
int ring;
struct nx_host_sds_ring *sds_ring;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
netxen_nic_enable_int(sds_ring);
}
}
static void
netxen_napi_disable(struct netxen_adapter *adapter)
{
int ring;
struct nx_host_sds_ring *sds_ring;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netxen_nic_disable_int(sds_ring);
napi_synchronize(&sds_ring->napi);
napi_disable(&sds_ring->napi);
}
}
static int nx_set_dma_mask(struct netxen_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
uint64_t mask, cmask;
adapter->pci_using_dac = 0;
mask = DMA_BIT_MASK(32);
cmask = DMA_BIT_MASK(32);
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
#ifndef CONFIG_IA64
mask = DMA_BIT_MASK(35);
#endif
} else {
mask = DMA_BIT_MASK(39);
cmask = mask;
}
if (pci_set_dma_mask(pdev, mask) == 0 &&
pci_set_consistent_dma_mask(pdev, cmask) == 0) {
adapter->pci_using_dac = 1;
return 0;
}
return -EIO;
}
/* Update addressable range if firmware supports it */
static int
nx_update_dma_mask(struct netxen_adapter *adapter)
{
int change, shift, err;
uint64_t mask, old_mask, old_cmask;
struct pci_dev *pdev = adapter->pdev;
change = 0;
shift = NXRD32(adapter, CRB_DMA_SHIFT);
if (shift > 32)
return 0;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
change = 1;
else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4))
change = 1;
if (change) {
old_mask = pdev->dma_mask;
old_cmask = pdev->dev.coherent_dma_mask;
mask = DMA_BIT_MASK(32+shift);
err = pci_set_dma_mask(pdev, mask);
if (err)
goto err_out;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
err = pci_set_consistent_dma_mask(pdev, mask);
if (err)
goto err_out;
}
dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
}
return 0;
err_out:
pci_set_dma_mask(pdev, old_mask);
pci_set_consistent_dma_mask(pdev, old_cmask);
return err;
}
static int
netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
{
u32 val, timeout;
if (first_boot == 0x55555555) {
/* This is the first boot after power up */
NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
/* PCI bus master workaround */
first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
if (!(first_boot & 0x4)) {
first_boot |= 0x4;
NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
NXRD32(adapter, NETXEN_PCIE_REG(0x4));
}
/* This is the first boot after power up */
first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
if (first_boot != 0x80000f) {
/* clear the register for future unloads/loads */
NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0);
return -EIO;
}
/* Start P2 boot loader */
val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
timeout = 0;
do {
msleep(1);
val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
if (++timeout > 5000)
return -EIO;
} while (val == NETXEN_BDINFO_MAGIC);
}
return 0;
}
static void netxen_set_port_mode(struct netxen_adapter *adapter)
{
u32 val, data;
val = adapter->ahw.board_type;
if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
(val == NETXEN_BRDTYPE_P3_XG_LOM)) {
if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
data = NETXEN_PORT_MODE_802_3_AP;
NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
} else if (port_mode == NETXEN_PORT_MODE_XG) {
data = NETXEN_PORT_MODE_XG;
NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
} else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
data = NETXEN_PORT_MODE_AUTO_NEG_1G;
NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
} else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
data = NETXEN_PORT_MODE_AUTO_NEG_XG;
NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
} else {
data = NETXEN_PORT_MODE_AUTO_NEG;
NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
}
if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
(wol_port_mode != NETXEN_PORT_MODE_XG) &&
(wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
(wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
}
NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode);
}
}
#define PCI_CAP_ID_GEN 0x10
static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
{
u32 pdevfuncsave;
u32 c8c9value = 0;
u32 chicken = 0;
u32 control = 0;
int i, pos;
struct pci_dev *pdev;
pdev = adapter->pdev;
chicken = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3));
/* clear chicken3.25:24 */
chicken &= 0xFCFFFFFF;
/*
* if gen1 and B0, set F1020 - if gen 2, do nothing
* if gen2 set to F1000
*/
pos = pci_find_capability(pdev, PCI_CAP_ID_GEN);
if (pos == 0xC0) {
pci_read_config_dword(pdev, pos + 0x10, &control);
if ((control & 0x000F0000) != 0x00020000) {
/* set chicken3.24 if gen1 */
chicken |= 0x01000000;
}
dev_info(&adapter->pdev->dev, "Gen2 strapping detected\n");
c8c9value = 0xF1000;
} else {
/* set chicken3.24 if gen1 */
chicken |= 0x01000000;
dev_info(&adapter->pdev->dev, "Gen1 strapping detected\n");
if (adapter->ahw.revision_id == NX_P3_B0)
c8c9value = 0xF1020;
else
c8c9value = 0;
}
NXWR32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3), chicken);
if (!c8c9value)
return;
pdevfuncsave = pdev->devfn;
if (pdevfuncsave & 0x07)
return;
for (i = 0; i < 8; i++) {
pci_read_config_dword(pdev, pos + 8, &control);
pci_read_config_dword(pdev, pos + 8, &control);
pci_write_config_dword(pdev, pos + 8, c8c9value);
pdev->devfn++;
}
pdev->devfn = pdevfuncsave;
}
static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
{
u32 control;
int pos;
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
if (pos) {
pci_read_config_dword(pdev, pos, &control);
if (enable)
control |= PCI_MSIX_FLAGS_ENABLE;
else
control = 0;
pci_write_config_dword(pdev, pos, control);
}
}
static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count)
{
int i;
for (i = 0; i < count; i++)
adapter->msix_entries[i].entry = i;
}
static int
netxen_read_mac_addr(struct netxen_adapter *adapter)
{
int i;
unsigned char *p;
u64 mac_addr;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
return -EIO;
} else {
if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
return -EIO;
}
p = (unsigned char *)&mac_addr;
for (i = 0; i < 6; i++)
netdev->dev_addr[i] = *(p + 5 - i);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
/* set station address */
if (!is_valid_ether_addr(netdev->dev_addr))
dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
return 0;
}
static int netxen_nic_set_mac(struct net_device *netdev, void *p)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
if (netif_running(netdev)) {
netif_device_detach(netdev);
netxen_napi_disable(adapter);
}
memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
adapter->macaddr_set(adapter, addr->sa_data);
if (netif_running(netdev)) {
netif_device_attach(netdev);
netxen_napi_enable(adapter);
}
return 0;
}
static void netxen_set_multicast_list(struct net_device *dev)
{
struct netxen_adapter *adapter = netdev_priv(dev);
adapter->set_multi(dev);
}
static netdev_features_t netxen_fix_features(struct net_device *dev,
netdev_features_t features)
{
if (!(features & NETIF_F_RXCSUM)) {
netdev_info(dev, "disabling LRO as RXCSUM is off\n");
features &= ~NETIF_F_LRO;
}
return features;
}
static int netxen_set_features(struct net_device *dev,
netdev_features_t features)
{
struct netxen_adapter *adapter = netdev_priv(dev);
int hw_lro;
if (!((dev->features ^ features) & NETIF_F_LRO))
return 0;
hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED
: NETXEN_NIC_LRO_DISABLED;
if (netxen_config_hw_lro(adapter, hw_lro))
return -EIO;
if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter))
return -EIO;
return 0;
}
static const struct net_device_ops netxen_netdev_ops = {
.ndo_open = netxen_nic_open,
.ndo_stop = netxen_nic_close,
.ndo_start_xmit = netxen_nic_xmit_frame,
.ndo_get_stats64 = netxen_nic_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = netxen_set_multicast_list,
.ndo_set_mac_address = netxen_nic_set_mac,
.ndo_change_mtu = netxen_nic_change_mtu,
.ndo_tx_timeout = netxen_tx_timeout,
.ndo_fix_features = netxen_fix_features,
.ndo_set_features = netxen_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = netxen_nic_poll_controller,
#endif
};
static void
netxen_setup_intr(struct netxen_adapter *adapter)
{
struct netxen_legacy_intr_set *legacy_intrp;
struct pci_dev *pdev = adapter->pdev;
int err, num_msix;
if (adapter->rss_supported) {
num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
MSIX_ENTRIES_PER_ADAPTER : 2;
} else
num_msix = 1;
adapter->max_sds_rings = 1;
adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
if (adapter->ahw.revision_id >= NX_P3_B0)
legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
else
legacy_intrp = &legacy_intr[0];
adapter->int_vec_bit = legacy_intrp->int_vec_bit;
adapter->tgt_status_reg = netxen_get_ioaddr(adapter,
legacy_intrp->tgt_status_reg);
adapter->tgt_mask_reg = netxen_get_ioaddr(adapter,
legacy_intrp->tgt_mask_reg);
adapter->pci_int_reg = netxen_get_ioaddr(adapter,
legacy_intrp->pci_int_reg);
adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR);
if (adapter->ahw.revision_id >= NX_P3_B1)
adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
ISR_INT_STATE_REG);
else
adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
CRB_INT_VECTOR);
netxen_set_msix_bit(pdev, 0);
if (adapter->msix_supported) {
netxen_init_msix_entries(adapter, num_msix);
err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
if (err == 0) {
adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
netxen_set_msix_bit(pdev, 1);
if (adapter->rss_supported)
adapter->max_sds_rings = num_msix;
dev_info(&pdev->dev, "using msi-x interrupts\n");
return;
}
if (err > 0)
pci_disable_msix(pdev);
/* fall through for msi */
}
if (use_msi && !pci_enable_msi(pdev)) {
adapter->flags |= NETXEN_NIC_MSI_ENABLED;
adapter->tgt_status_reg = netxen_get_ioaddr(adapter,
msi_tgt_status[adapter->ahw.pci_func]);
dev_info(&pdev->dev, "using msi interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
return;
}
dev_info(&pdev->dev, "using legacy interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
}
static void
netxen_teardown_intr(struct netxen_adapter *adapter)
{
if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
pci_disable_msix(adapter->pdev);
if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
pci_disable_msi(adapter->pdev);
}
static void
netxen_cleanup_pci_map(struct netxen_adapter *adapter)
{
if (adapter->ahw.db_base != NULL)
iounmap(adapter->ahw.db_base);
if (adapter->ahw.pci_base0 != NULL)
iounmap(adapter->ahw.pci_base0);
if (adapter->ahw.pci_base1 != NULL)
iounmap(adapter->ahw.pci_base1);
if (adapter->ahw.pci_base2 != NULL)
iounmap(adapter->ahw.pci_base2);
}
static int
netxen_setup_pci_map(struct netxen_adapter *adapter)
{
void __iomem *db_ptr = NULL;
resource_size_t mem_base, db_base;
unsigned long mem_len, db_len = 0;
struct pci_dev *pdev = adapter->pdev;
int pci_func = adapter->ahw.pci_func;
struct netxen_hardware_context *ahw = &adapter->ahw;
int err = 0;
/*
* Set the CRB window to invalid. If any register in window 0 is
* accessed it should set the window to 0 and then reset it to 1.
*/
adapter->ahw.crb_win = -1;
adapter->ahw.ocm_win = -1;
/* remap phys address */
mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
mem_len = pci_resource_len(pdev, 0);
/* 128 Meg of memory */
if (mem_len == NETXEN_PCI_128MB_SIZE) {
ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
SECOND_PAGE_GROUP_SIZE);
ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
THIRD_PAGE_GROUP_SIZE);
if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL ||
ahw->pci_base2 == NULL) {
dev_err(&pdev->dev, "failed to map PCI bar 0\n");
err = -EIO;
goto err_out;
}
ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE;
} else if (mem_len == NETXEN_PCI_32MB_SIZE) {
ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) {
dev_err(&pdev->dev, "failed to map PCI bar 0\n");
err = -EIO;
goto err_out;
}
} else if (mem_len == NETXEN_PCI_2MB_SIZE) {
ahw->pci_base0 = pci_ioremap_bar(pdev, 0);
if (ahw->pci_base0 == NULL) {
dev_err(&pdev->dev, "failed to map PCI bar 0\n");
return -EIO;
}
ahw->pci_len0 = mem_len;
} else {
return -EIO;
}
netxen_setup_hwops(adapter);
dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
} else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func)));
}
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
goto skip_doorbell;
db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
db_len = pci_resource_len(pdev, 4);
if (db_len == 0) {
printk(KERN_ERR "%s: doorbell is disabled\n",
netxen_nic_driver_name);
err = -EIO;
goto err_out;
}
db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
if (!db_ptr) {
printk(KERN_ERR "%s: Failed to allocate doorbell map.",
netxen_nic_driver_name);
err = -EIO;
goto err_out;
}
skip_doorbell:
adapter->ahw.db_base = db_ptr;
adapter->ahw.db_len = db_len;
return 0;
err_out:
netxen_cleanup_pci_map(adapter);
return err;
}
static void
netxen_check_options(struct netxen_adapter *adapter)
{
u32 fw_major, fw_minor, fw_build, prev_fw_version;
char brd_name[NETXEN_MAX_SHORT_NAME];
char serial_num[32];
int i, offset, val, err;
__le32 *ptr32;
struct pci_dev *pdev = adapter->pdev;
adapter->driver_mismatch = 0;
ptr32 = (__le32 *)&serial_num;
offset = NX_FW_SERIAL_NUM_OFFSET;
for (i = 0; i < 8; i++) {
if (netxen_rom_fast_read(adapter, offset, &val) == -1) {
dev_err(&pdev->dev, "error reading board info\n");
adapter->driver_mismatch = 1;
return;
}
ptr32[i] = cpu_to_le32(val);
offset += sizeof(u32);
}
fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
prev_fw_version = adapter->fw_version;
adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build);
/* Get FW Mini Coredump template and store it */
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
if (adapter->mdump.md_template == NULL ||
adapter->fw_version > prev_fw_version) {
kfree(adapter->mdump.md_template);
adapter->mdump.md_template = NULL;
err = netxen_setup_minidump(adapter);
if (err)
dev_err(&adapter->pdev->dev,
"Failed to setup minidump rcode = %d\n", err);
}
}
if (adapter->portnum == 0) {
get_brd_name_by_type(adapter->ahw.board_type, brd_name);
pr_info("%s: %s Board S/N %s Chip rev 0x%x\n",
module_name(THIS_MODULE),
brd_name, serial_num, adapter->ahw.revision_id);
}
if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) {
adapter->driver_mismatch = 1;
dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
fw_major, fw_minor, fw_build);
return;
}
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
i = NXRD32(adapter, NETXEN_SRE_MISC);
adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
}
dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
fw_major, fw_minor, fw_build,
adapter->ahw.cut_through ? "cut-through" : "legacy");
if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222))
adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
} else if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
}
adapter->msix_supported = 0;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
adapter->msix_supported = !!use_msi_x;
adapter->rss_supported = !!use_msi_x;
} else {
u32 flashed_ver = 0;
netxen_rom_fast_read(adapter,
NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) {
switch (adapter->ahw.board_type) {
case NETXEN_BRDTYPE_P2_SB31_10G:
case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
adapter->msix_supported = !!use_msi_x;
adapter->rss_supported = !!use_msi_x;
break;
default:
break;
}
}
}
adapter->num_txd = MAX_CMD_DESCRIPTORS;
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS;
adapter->max_rds_rings = 3;
} else {
adapter->num_lro_rxd = 0;
adapter->max_rds_rings = 2;
}
}
static int
netxen_start_firmware(struct netxen_adapter *adapter)
{
int val, err, first_boot;
struct pci_dev *pdev = adapter->pdev;
/* required for NX2031 dummy dma */
err = nx_set_dma_mask(adapter);
if (err)
return err;
err = netxen_can_start_firmware(adapter);
if (err < 0)
return err;
if (!err)
goto wait_init;
first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
err = netxen_check_hw_init(adapter, first_boot);
if (err) {
dev_err(&pdev->dev, "error in init HW init sequence\n");
return err;
}
netxen_request_firmware(adapter);
err = netxen_need_fw_reset(adapter);
if (err < 0)
goto err_out;
if (err == 0)
goto pcie_strap_init;
if (first_boot != 0x55555555) {
NXWR32(adapter, CRB_CMDPEG_STATE, 0);
netxen_pinit_from_rom(adapter);
msleep(1);
}
NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0);
NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
netxen_set_port_mode(adapter);
err = netxen_load_firmware(adapter);
if (err)
goto err_out;
netxen_release_firmware(adapter);
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
/* Initialize multicast addr pool owners */
val = 0x7654;
if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
val |= 0x0f000000;
NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
}
err = netxen_init_dummy_dma(adapter);
if (err)
goto err_out;
/*
* Tell the hardware our version number.
*/
val = (_NETXEN_NIC_LINUX_MAJOR << 16)
| ((_NETXEN_NIC_LINUX_MINOR << 8))
| (_NETXEN_NIC_LINUX_SUBVERSION);
NXWR32(adapter, CRB_DRIVER_VERSION, val);
pcie_strap_init:
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
netxen_pcie_strap_init(adapter);
wait_init:
/* Handshake with the card before we register the devices. */
err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
if (err) {
netxen_free_dummy_dma(adapter);
goto err_out;
}
NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY);
nx_update_dma_mask(adapter);
netxen_check_options(adapter);
adapter->need_fw_reset = 0;
/* fall through and release firmware */
err_out:
netxen_release_firmware(adapter);
return err;
}
static int
netxen_nic_request_irq(struct netxen_adapter *adapter)
{
irq_handler_t handler;
struct nx_host_sds_ring *sds_ring;
int err, ring;
unsigned long flags = 0;
struct net_device *netdev = adapter->netdev;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
handler = netxen_msix_intr;
else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
handler = netxen_msi_intr;
else {
flags |= IRQF_SHARED;
handler = netxen_intr;
}
adapter->irq = netdev->irq;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
err = request_irq(sds_ring->irq, handler,
flags, sds_ring->name, sds_ring);
if (err)
return err;
}
return 0;
}
static void
netxen_nic_free_irq(struct netxen_adapter *adapter)
{
int ring;
struct nx_host_sds_ring *sds_ring;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
free_irq(sds_ring->irq, sds_ring);
}
}
static void
netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter)
{
adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
adapter->coal.normal.data.rx_time_us =
NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
adapter->coal.normal.data.rx_packets =
NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
adapter->coal.normal.data.tx_time_us =
NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US;
adapter->coal.normal.data.tx_packets =
NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS;
}
/* with rtnl_lock */
static int
__netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
{
int err;
if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
return -EIO;
err = adapter->init_port(adapter, adapter->physical_port);
if (err) {
printk(KERN_ERR "%s: Failed to initialize port %d\n",
netxen_nic_driver_name, adapter->portnum);
return err;
}
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
adapter->macaddr_set(adapter, adapter->mac_addr);
adapter->set_multi(netdev);
adapter->set_mtu(adapter, netdev->mtu);
adapter->ahw.linkup = 0;
if (adapter->max_sds_rings > 1)
netxen_config_rss(adapter, 1);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
netxen_config_intr_coalesce(adapter);
if (netdev->features & NETIF_F_LRO)
netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED);
netxen_napi_enable(adapter);
if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
netxen_linkevent_request(adapter, 1);
else
netxen_nic_set_link_parameters(adapter);
set_bit(__NX_DEV_UP, &adapter->state);
return 0;
}
/* Usage: During resume and firmware recovery module.*/
static inline int
netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
{
int err = 0;
rtnl_lock();
if (netif_running(netdev))
err = __netxen_nic_up(adapter, netdev);
rtnl_unlock();
return err;
}
/* with rtnl_lock */
static void
__netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
{
if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
return;
if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state))
return;
smp_mb();
spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
netxen_linkevent_request(adapter, 0);
if (adapter->stop_port)
adapter->stop_port(adapter);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
netxen_p3_free_mac_list(adapter);
adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE);
netxen_napi_disable(adapter);
netxen_release_tx_buffers(adapter);
spin_unlock(&adapter->tx_clean_lock);
}
/* Usage: During suspend and firmware recovery module */
static inline void
netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
{
rtnl_lock();
if (netif_running(netdev))
__netxen_nic_down(adapter, netdev);
rtnl_unlock();
}
static int
netxen_nic_attach(struct netxen_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
int err, ring;
struct nx_host_rds_ring *rds_ring;
struct nx_host_tx_ring *tx_ring;
u32 capab2;
if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
return 0;
err = netxen_init_firmware(adapter);
if (err)
return err;
adapter->flags &= ~NETXEN_FW_MSS_CAP;
if (adapter->capabilities & NX_FW_CAPABILITY_MORE_CAPS) {
capab2 = NXRD32(adapter, CRB_FW_CAPABILITIES_2);
if (capab2 & NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
adapter->flags |= NETXEN_FW_MSS_CAP;
}
err = netxen_napi_add(adapter, netdev);
if (err)
return err;
err = netxen_alloc_sw_resources(adapter);
if (err) {
printk(KERN_ERR "%s: Error in setting sw resources\n",
netdev->name);
return err;
}
err = netxen_alloc_hw_resources(adapter);
if (err) {
printk(KERN_ERR "%s: Error in setting hw resources\n",
netdev->name);
goto err_out_free_sw;
}
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
tx_ring = adapter->tx_ring;
tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
crb_cmd_producer[adapter->portnum]);
tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter,
crb_cmd_consumer[adapter->portnum]);
tx_ring->producer = 0;
tx_ring->sw_consumer = 0;
netxen_nic_update_cmd_producer(adapter, tx_ring);
netxen_nic_update_cmd_consumer(adapter, tx_ring);
}
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &adapter->recv_ctx.rds_rings[ring];
netxen_post_rx_buffers(adapter, ring, rds_ring);
}
err = netxen_nic_request_irq(adapter);
if (err) {
dev_err(&pdev->dev, "%s: failed to setup interrupt\n",
netdev->name);
goto err_out_free_rxbuf;
}
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
netxen_nic_init_coalesce_defaults(adapter);
netxen_create_sysfs_entries(adapter);
adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
return 0;
err_out_free_rxbuf:
netxen_release_rx_buffers(adapter);
netxen_free_hw_resources(adapter);
err_out_free_sw:
netxen_free_sw_resources(adapter);
return err;
}
static void
netxen_nic_detach(struct netxen_adapter *adapter)
{
if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
return;
netxen_remove_sysfs_entries(adapter);
netxen_free_hw_resources(adapter);
netxen_release_rx_buffers(adapter);
netxen_nic_free_irq(adapter);
netxen_napi_del(adapter);
netxen_free_sw_resources(adapter);
adapter->is_up = 0;
}
int
netxen_nic_reset_context(struct netxen_adapter *adapter)
{
int err = 0;
struct net_device *netdev = adapter->netdev;
if (test_and_set_bit(__NX_RESETTING, &adapter->state))
return -EBUSY;
if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
netif_device_detach(netdev);
if (netif_running(netdev))
__netxen_nic_down(adapter, netdev);
netxen_nic_detach(adapter);
if (netif_running(netdev)) {
err = netxen_nic_attach(adapter);
if (!err)
err = __netxen_nic_up(adapter, netdev);
if (err)
goto done;
}
netif_device_attach(netdev);
}
done:
clear_bit(__NX_RESETTING, &adapter->state);
return err;
}
static int
netxen_setup_netdev(struct netxen_adapter *adapter,
struct net_device *netdev)
{
int err = 0;
struct pci_dev *pdev = adapter->pdev;
adapter->mc_enabled = 0;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
adapter->max_mc_count = 38;
else
adapter->max_mc_count = 16;
netdev->netdev_ops = &netxen_netdev_ops;
netdev->watchdog_timeo = 5*HZ;
netxen_nic_change_mtu(netdev, netdev->mtu);
SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_RXCSUM;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
netdev->vlan_features |= netdev->hw_features;
if (adapter->pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
netdev->hw_features |= NETIF_F_LRO;
netdev->features |= netdev->hw_features;
netdev->irq = adapter->msix_entries[0].vector;
INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
if (netxen_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
netif_carrier_off(netdev);
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "failed to register net device\n");
return err;
}
return 0;
}
#ifdef CONFIG_PCIEAER
static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct pci_dev *root = pdev->bus->self;
u32 aer_pos;
/* root bus? */
if (!root)
return;
if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM &&
adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
return;
if (pci_pcie_type(root) != PCI_EXP_TYPE_ROOT_PORT)
return;
aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR);
if (!aer_pos)
return;
pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff);
}
#endif
static int
netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev = NULL;
struct netxen_adapter *adapter = NULL;
int i = 0, err;
int pci_func_id = PCI_FUNC(pdev->devfn);
uint8_t revision_id;
u32 val;
if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
pr_warning("%s: chip revisions between 0x%x-0x%x "
"will not be enabled.\n",
module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
return -ENODEV;
}
if ((err = pci_enable_device(pdev)))
return err;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
err = -ENODEV;
goto err_out_disable_pdev;
}
if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
goto err_out_disable_pdev;
if (NX_IS_REVISION_P3(pdev->revision))
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
netdev = alloc_etherdev(sizeof(struct netxen_adapter));
if(!netdev) {
err = -ENOMEM;
goto err_out_free_res;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->ahw.pci_func = pci_func_id;
revision_id = pdev->revision;
adapter->ahw.revision_id = revision_id;
rwlock_init(&adapter->ahw.crb_lock);
spin_lock_init(&adapter->ahw.mem_lock);
spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
INIT_LIST_HEAD(&adapter->ip_list);
err = netxen_setup_pci_map(adapter);
if (err)
goto err_out_free_netdev;
/* This will be reset for mezz cards */
adapter->portnum = pci_func_id;
err = netxen_nic_get_board_info(adapter);
if (err) {
dev_err(&pdev->dev, "Error getting board config info.\n");
goto err_out_iounmap;
}
#ifdef CONFIG_PCIEAER
netxen_mask_aer_correctable(adapter);
#endif
/* Mezz cards have PCI function 0,2,3 enabled */
switch (adapter->ahw.board_type) {
case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
if (pci_func_id >= 2)
adapter->portnum = pci_func_id - 2;
break;
default:
break;
}
err = netxen_check_flash_fw_compatibility(adapter);
if (err)
goto err_out_iounmap;
if (adapter->portnum == 0) {
val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
if (val != 0xffffffff && val != 0) {
NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
adapter->need_fw_reset = 1;
}
}
err = netxen_start_firmware(adapter);
if (err)
goto err_out_decr_ref;
/*
* See if the firmware gave us a virtual-physical port mapping.
*/
adapter->physical_port = adapter->portnum;
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
i = NXRD32(adapter, CRB_V2P(adapter->portnum));
if (i != 0x55555555)
adapter->physical_port = i;
}
netxen_nic_clear_stats(adapter);
netxen_setup_intr(adapter);
err = netxen_setup_netdev(adapter, netdev);
if (err)
goto err_out_disable_msi;
pci_set_drvdata(pdev, adapter);
netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
switch (adapter->ahw.port_type) {
case NETXEN_NIC_GBE:
dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
adapter->netdev->name);
break;
case NETXEN_NIC_XGBE:
dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
adapter->netdev->name);
break;
}
netxen_create_diag_entries(adapter);
return 0;
err_out_disable_msi:
netxen_teardown_intr(adapter);
netxen_free_dummy_dma(adapter);
err_out_decr_ref:
nx_decr_dev_ref_cnt(adapter);
err_out_iounmap:
netxen_cleanup_pci_map(adapter);
err_out_free_netdev:
free_netdev(netdev);
err_out_free_res:
pci_release_regions(pdev);
err_out_disable_pdev:
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
}
static
void netxen_cleanup_minidump(struct netxen_adapter *adapter)
{
kfree(adapter->mdump.md_template);
adapter->mdump.md_template = NULL;
if (adapter->mdump.md_capture_buff) {
vfree(adapter->mdump.md_capture_buff);
adapter->mdump.md_capture_buff = NULL;
}
}
static void netxen_nic_remove(struct pci_dev *pdev)
{
struct netxen_adapter *adapter;
struct net_device *netdev;
adapter = pci_get_drvdata(pdev);
if (adapter == NULL)
return;
netdev = adapter->netdev;
netxen_cancel_fw_work(adapter);
unregister_netdev(netdev);
cancel_work_sync(&adapter->tx_timeout_task);
netxen_free_ip_list(adapter, false);
netxen_nic_detach(adapter);
nx_decr_dev_ref_cnt(adapter);
if (adapter->portnum == 0)
netxen_free_dummy_dma(adapter);
clear_bit(__NX_RESETTING, &adapter->state);
netxen_teardown_intr(adapter);
netxen_remove_diag_entries(adapter);
netxen_cleanup_pci_map(adapter);
netxen_release_firmware(adapter);
if (NX_IS_REVISION_P3(pdev->revision)) {
netxen_cleanup_minidump(adapter);
pci_disable_pcie_error_reporting(pdev);
}
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
static void netxen_nic_detach_func(struct netxen_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
netxen_cancel_fw_work(adapter);
if (netif_running(netdev))
netxen_nic_down(adapter, netdev);
cancel_work_sync(&adapter->tx_timeout_task);
netxen_nic_detach(adapter);
if (adapter->portnum == 0)
netxen_free_dummy_dma(adapter);
nx_decr_dev_ref_cnt(adapter);
clear_bit(__NX_RESETTING, &adapter->state);
}
static int netxen_nic_attach_func(struct pci_dev *pdev)
{
struct netxen_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
int err;
err = pci_enable_device(pdev);
if (err)
return err;
pci_set_power_state(pdev, PCI_D0);
pci_set_master(pdev);
pci_restore_state(pdev);
adapter->ahw.crb_win = -1;
adapter->ahw.ocm_win = -1;
err = netxen_start_firmware(adapter);
if (err) {
dev_err(&pdev->dev, "failed to start firmware\n");
return err;
}
if (netif_running(netdev)) {
err = netxen_nic_attach(adapter);
if (err)
goto err_out;
err = netxen_nic_up(adapter, netdev);
if (err)
goto err_out_detach;
netxen_restore_indev_addr(netdev, NETDEV_UP);
}
netif_device_attach(netdev);
netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
return 0;
err_out_detach:
netxen_nic_detach(adapter);
err_out:
nx_decr_dev_ref_cnt(adapter);
return err;
}
static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct netxen_adapter *adapter = pci_get_drvdata(pdev);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
if (nx_dev_request_aer(adapter))
return PCI_ERS_RESULT_RECOVERED;
netxen_nic_detach_func(adapter);
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
}
static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
{
int err = 0;
err = netxen_nic_attach_func(pdev);
return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
static void netxen_io_resume(struct pci_dev *pdev)
{
pci_cleanup_aer_uncorrect_error_status(pdev);
}
static void netxen_nic_shutdown(struct pci_dev *pdev)
{
struct netxen_adapter *adapter = pci_get_drvdata(pdev);
netxen_nic_detach_func(adapter);
if (pci_save_state(pdev))
return;
if (netxen_nic_wol_supported(adapter)) {
pci_enable_wake(pdev, PCI_D3cold, 1);
pci_enable_wake(pdev, PCI_D3hot, 1);
}
pci_disable_device(pdev);
}
#ifdef CONFIG_PM
static int
netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct netxen_adapter *adapter = pci_get_drvdata(pdev);
int retval;
netxen_nic_detach_func(adapter);
retval = pci_save_state(pdev);
if (retval)
return retval;
if (netxen_nic_wol_supported(adapter)) {
pci_enable_wake(pdev, PCI_D3cold, 1);
pci_enable_wake(pdev, PCI_D3hot, 1);
}
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
static int
netxen_nic_resume(struct pci_dev *pdev)
{
return netxen_nic_attach_func(pdev);
}
#endif
static int netxen_nic_open(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
int err = 0;
if (adapter->driver_mismatch)
return -EIO;
err = netxen_nic_attach(adapter);
if (err)
return err;
err = __netxen_nic_up(adapter, netdev);
if (err)
goto err_out;
netif_start_queue(netdev);
return 0;
err_out:
netxen_nic_detach(adapter);
return err;
}
/*
* netxen_nic_close - Disables a network interface entry point
*/
static int netxen_nic_close(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
__netxen_nic_down(adapter, netdev);
return 0;
}
static void
netxen_tso_check(struct net_device *netdev,
struct nx_host_tx_ring *tx_ring,
struct cmd_desc_type0 *first_desc,
struct sk_buff *skb)
{
u8 opcode = TX_ETHER_PKT;
__be16 protocol = skb->protocol;
u16 flags = 0, vid = 0;
u32 producer;
int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
struct cmd_desc_type0 *hwdesc;
struct vlan_ethhdr *vh;
if (protocol == cpu_to_be16(ETH_P_8021Q)) {
vh = (struct vlan_ethhdr *)skb->data;
protocol = vh->h_vlan_encapsulated_proto;
flags = FLAGS_VLAN_TAGGED;
} else if (vlan_tx_tag_present(skb)) {
flags = FLAGS_VLAN_OOB;
vid = vlan_tx_tag_get(skb);
netxen_set_tx_vlan_tci(first_desc, vid);
vlan_oob = 1;
}
if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
skb_shinfo(skb)->gso_size > 0) {
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->total_hdr_length = hdr_len;
if (vlan_oob) {
first_desc->total_hdr_length += VLAN_HLEN;
first_desc->tcp_hdr_offset = VLAN_HLEN;
first_desc->ip_hdr_offset = VLAN_HLEN;
/* Only in case of TSO on vlan device */
flags |= FLAGS_VLAN_TAGGED;
}
opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
TX_TCP_LSO6 : TX_TCP_LSO;
tso = 1;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4proto;
if (protocol == cpu_to_be16(ETH_P_IP)) {
l4proto = ip_hdr(skb)->protocol;
if (l4proto == IPPROTO_TCP)
opcode = TX_TCP_PKT;
else if(l4proto == IPPROTO_UDP)
opcode = TX_UDP_PKT;
} else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
l4proto = ipv6_hdr(skb)->nexthdr;
if (l4proto == IPPROTO_TCP)
opcode = TX_TCPV6_PKT;
else if(l4proto == IPPROTO_UDP)
opcode = TX_UDPV6_PKT;
}
}
first_desc->tcp_hdr_offset += skb_transport_offset(skb);
first_desc->ip_hdr_offset += skb_network_offset(skb);
netxen_set_tx_flags_opcode(first_desc, flags, opcode);
if (!tso)
return;
/* For LSO, we need to copy the MAC/IP/TCP headers into
* the descriptor ring
*/
producer = tx_ring->producer;
copied = 0;
offset = 2;
if (vlan_oob) {
/* Create a TSO vlan header template for firmware */
hwdesc = &tx_ring->desc_head[producer];
tx_ring->cmd_buf_arr[producer].skb = NULL;
copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
hdr_len + VLAN_HLEN);
vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
skb_copy_from_linear_data(skb, vh, 12);
vh->h_vlan_proto = htons(ETH_P_8021Q);
vh->h_vlan_TCI = htons(vid);
skb_copy_from_linear_data_offset(skb, 12,
(char *)vh + 16, copy_len - 16);
copied = copy_len - VLAN_HLEN;
offset = 0;
producer = get_next_index(producer, tx_ring->num_desc);
}
while (copied < hdr_len) {
copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
(hdr_len - copied));
hwdesc = &tx_ring->desc_head[producer];
tx_ring->cmd_buf_arr[producer].skb = NULL;
skb_copy_from_linear_data_offset(skb, copied,
(char *)hwdesc + offset, copy_len);
copied += copy_len;
offset = 0;
producer = get_next_index(producer, tx_ring->num_desc);
}
tx_ring->producer = producer;
barrier();
}
static int
netxen_map_tx_skb(struct pci_dev *pdev,
struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
{
struct netxen_skb_frag *nf;
struct skb_frag_struct *frag;
int i, nr_frags;
dma_addr_t map;
nr_frags = skb_shinfo(skb)->nr_frags;
nf = &pbuf->frag_array[0];
map = pci_map_single(pdev, skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, map))
goto out_err;
nf->dma = map;
nf->length = skb_headlen(skb);
for (i = 0; i < nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
nf = &pbuf->frag_array[i+1];
map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, map))
goto unwind;
nf->dma = map;
nf->length = skb_frag_size(frag);
}
return 0;
unwind:
while (--i >= 0) {
nf = &pbuf->frag_array[i+1];
pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
nf->dma = 0ULL;
}
nf = &pbuf->frag_array[0];
pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
nf->dma = 0ULL;
out_err:
return -ENOMEM;
}
static inline void
netxen_clear_cmddesc(u64 *desc)
{
desc[0] = 0ULL;
desc[2] = 0ULL;
}
static netdev_tx_t
netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
struct netxen_cmd_buffer *pbuf;
struct netxen_skb_frag *buffrag;
struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev;
int i, k;
int delta = 0;
struct skb_frag_struct *frag;
u32 producer;
int frag_count, no_of_desc;
u32 num_txd = tx_ring->num_desc;
frag_count = skb_shinfo(skb)->nr_frags + 1;
/* 14 frags supported for normal packet and
* 32 frags supported for TSO packet
*/
if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
frag = &skb_shinfo(skb)->frags[i];
delta += skb_frag_size(frag);
}
if (!__pskb_pull_tail(skb, delta))
goto drop_packet;
frag_count = 1 + skb_shinfo(skb)->nr_frags;
}
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;
if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
netif_stop_queue(netdev);
smp_mb();
if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
netif_start_queue(netdev);
else
return NETDEV_TX_BUSY;
}
producer = tx_ring->producer;
pbuf = &tx_ring->cmd_buf_arr[producer];
pdev = adapter->pdev;
if (netxen_map_tx_skb(pdev, skb, pbuf))
goto drop_packet;
pbuf->skb = skb;
pbuf->frag_count = frag_count;
first_desc = hwdesc = &tx_ring->desc_head[producer];
netxen_clear_cmddesc((u64 *)hwdesc);
netxen_set_tx_frags_len(first_desc, frag_count, skb->len);
netxen_set_tx_port(first_desc, adapter->portnum);
for (i = 0; i < frag_count; i++) {
k = i % 4;
if ((k == 0) && (i > 0)) {
/* move to next desc.*/
producer = get_next_index(producer, num_txd);
hwdesc = &tx_ring->desc_head[producer];
netxen_clear_cmddesc((u64 *)hwdesc);
tx_ring->cmd_buf_arr[producer].skb = NULL;
}
buffrag = &pbuf->frag_array[i];
hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
switch (k) {
case 0:
hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
break;
case 1:
hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
break;
case 2:
hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
break;
case 3:
hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
break;
}
}
tx_ring->producer = get_next_index(producer, num_txd);
netxen_tso_check(netdev, tx_ring, first_desc, skb);
adapter->stats.txbytes += skb->len;
adapter->stats.xmitcalled++;
netxen_nic_update_cmd_producer(adapter, tx_ring);
return NETDEV_TX_OK;
drop_packet:
adapter->stats.txdropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
static int netxen_nic_check_temp(struct netxen_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
uint32_t temp, temp_state, temp_val;
int rv = 0;
temp = NXRD32(adapter, CRB_TEMP_STATE);
temp_state = nx_get_temp_state(temp);
temp_val = nx_get_temp_val(temp);
if (temp_state == NX_TEMP_PANIC) {
printk(KERN_ALERT
"%s: Device temperature %d degrees C exceeds"
" maximum allowed. Hardware has been shut down.\n",
netdev->name, temp_val);
rv = 1;
} else if (temp_state == NX_TEMP_WARN) {
if (adapter->temp == NX_TEMP_NORMAL) {
printk(KERN_ALERT
"%s: Device temperature %d degrees C "
"exceeds operating range."
" Immediate action needed.\n",
netdev->name, temp_val);
}
} else {
if (adapter->temp == NX_TEMP_WARN) {
printk(KERN_INFO
"%s: Device temperature is now %d degrees C"
" in normal range.\n", netdev->name,
temp_val);
}
}
adapter->temp = temp_state;
return rv;
}
void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
{
struct net_device *netdev = adapter->netdev;
if (adapter->ahw.linkup && !linkup) {
printk(KERN_INFO "%s: %s NIC Link is down\n",
netxen_nic_driver_name, netdev->name);
adapter->ahw.linkup = 0;
if (netif_running(netdev)) {
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
adapter->link_changed = !adapter->has_link_events;
} else if (!adapter->ahw.linkup && linkup) {
printk(KERN_INFO "%s: %s NIC Link is up\n",
netxen_nic_driver_name, netdev->name);
adapter->ahw.linkup = 1;
if (netif_running(netdev)) {
netif_carrier_on(netdev);
netif_wake_queue(netdev);
}
adapter->link_changed = !adapter->has_link_events;
}
}
static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
{
u32 val, port, linkup;
port = adapter->physical_port;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
val = NXRD32(adapter, CRB_XG_STATE_P3);
val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
linkup = (val == XG_LINK_UP_P3);
} else {
val = NXRD32(adapter, CRB_XG_STATE);
val = (val >> port*8) & 0xff;
linkup = (val == XG_LINK_UP);
}
netxen_advert_link_change(adapter, linkup);
}
static void netxen_tx_timeout(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
if (test_bit(__NX_RESETTING, &adapter->state))
return;
dev_err(&netdev->dev, "transmit timeout, resetting.\n");
schedule_work(&adapter->tx_timeout_task);
}
static void netxen_tx_timeout_task(struct work_struct *work)
{
struct netxen_adapter *adapter =
container_of(work, struct netxen_adapter, tx_timeout_task);
if (!netif_running(adapter->netdev))
return;
if (test_and_set_bit(__NX_RESETTING, &adapter->state))
return;
if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS)
goto request_reset;
rtnl_lock();
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
/* try to scrub interrupt */
netxen_napi_disable(adapter);
netxen_napi_enable(adapter);
netif_wake_queue(adapter->netdev);
clear_bit(__NX_RESETTING, &adapter->state);
} else {
clear_bit(__NX_RESETTING, &adapter->state);
if (netxen_nic_reset_context(adapter)) {
rtnl_unlock();
goto request_reset;
}
}
adapter->netdev->trans_start = jiffies;
rtnl_unlock();
return;
request_reset:
adapter->need_fw_reset = 1;
clear_bit(__NX_RESETTING, &adapter->state);
}
static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
stats->rx_bytes = adapter->stats.rxbytes;
stats->tx_bytes = adapter->stats.txbytes;
stats->rx_dropped = adapter->stats.rxdropped;
stats->tx_dropped = adapter->stats.txdropped;
return stats;
}
static irqreturn_t netxen_intr(int irq, void *data)
{
struct nx_host_sds_ring *sds_ring = data;
struct netxen_adapter *adapter = sds_ring->adapter;
u32 status = 0;
status = readl(adapter->isr_int_vec);
if (!(status & adapter->int_vec_bit))
return IRQ_NONE;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
/* check interrupt state machine, to be sure */
status = readl(adapter->crb_int_state_reg);
if (!ISR_LEGACY_INT_TRIGGERED(status))
return IRQ_NONE;
} else {
unsigned long our_int = 0;
our_int = readl(adapter->crb_int_state_reg);
/* not our interrupt */
if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
return IRQ_NONE;
/* claim interrupt */
writel((our_int & 0xffffffff), adapter->crb_int_state_reg);
/* clear interrupt */
netxen_nic_disable_int(sds_ring);
}
writel(0xffffffff, adapter->tgt_status_reg);
/* read twice to ensure write is flushed */
readl(adapter->isr_int_vec);
readl(adapter->isr_int_vec);
napi_schedule(&sds_ring->napi);
return IRQ_HANDLED;
}
static irqreturn_t netxen_msi_intr(int irq, void *data)
{
struct nx_host_sds_ring *sds_ring = data;
struct netxen_adapter *adapter = sds_ring->adapter;
/* clear interrupt */
writel(0xffffffff, adapter->tgt_status_reg);
napi_schedule(&sds_ring->napi);
return IRQ_HANDLED;
}
static irqreturn_t netxen_msix_intr(int irq, void *data)
{
struct nx_host_sds_ring *sds_ring = data;
napi_schedule(&sds_ring->napi);
return IRQ_HANDLED;
}
static int netxen_nic_poll(struct napi_struct *napi, int budget)
{
struct nx_host_sds_ring *sds_ring =
container_of(napi, struct nx_host_sds_ring, napi);
struct netxen_adapter *adapter = sds_ring->adapter;
int tx_complete;
int work_done;
tx_complete = netxen_process_cmd_ring(adapter);
work_done = netxen_process_rcv_ring(sds_ring, budget);
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
if (test_bit(__NX_DEV_UP, &adapter->state))
netxen_nic_enable_int(sds_ring);
}
return work_done;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netxen_nic_poll_controller(struct net_device *netdev)
{
int ring;
struct nx_host_sds_ring *sds_ring;
struct netxen_adapter *adapter = netdev_priv(netdev);
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
disable_irq(adapter->irq);
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netxen_intr(adapter->irq, sds_ring);
}
enable_irq(adapter->irq);
}
#endif
static int
nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
{
int count;
if (netxen_api_lock(adapter))
return -EIO;
count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count);
netxen_api_unlock(adapter);
return count;
}
static int
nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
{
int count, state;
if (netxen_api_lock(adapter))
return -EIO;
count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
WARN_ON(count == 0);
NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count);
state = NXRD32(adapter, NX_CRB_DEV_STATE);
if (count == 0 && state != NX_DEV_FAILED)
NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD);
netxen_api_unlock(adapter);
return count;
}
static int
nx_dev_request_aer(struct netxen_adapter *adapter)
{
u32 state;
int ret = -EINVAL;
if (netxen_api_lock(adapter))
return ret;
state = NXRD32(adapter, NX_CRB_DEV_STATE);
if (state == NX_DEV_NEED_AER)
ret = 0;
else if (state == NX_DEV_READY) {
NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER);
ret = 0;
}
netxen_api_unlock(adapter);
return ret;
}
int
nx_dev_request_reset(struct netxen_adapter *adapter)
{
u32 state;
int ret = -EINVAL;
if (netxen_api_lock(adapter))
return ret;
state = NXRD32(adapter, NX_CRB_DEV_STATE);
if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED)
ret = 0;
else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
adapter->flags |= NETXEN_FW_RESET_OWNER;
ret = 0;
}
netxen_api_unlock(adapter);
return ret;
}
static int
netxen_can_start_firmware(struct netxen_adapter *adapter)
{
int count;
int can_start = 0;
if (netxen_api_lock(adapter)) {
nx_incr_dev_ref_cnt(adapter);
return -1;
}
count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
if ((count < 0) || (count >= NX_MAX_PCI_FUNC))
count = 0;
if (count == 0) {
can_start = 1;
NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING);
}
NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count);
netxen_api_unlock(adapter);
return can_start;
}
static void
netxen_schedule_work(struct netxen_adapter *adapter,
work_func_t func, int delay)
{
INIT_DELAYED_WORK(&adapter->fw_work, func);
schedule_delayed_work(&adapter->fw_work, delay);
}
static void
netxen_cancel_fw_work(struct netxen_adapter *adapter)
{
while (test_and_set_bit(__NX_RESETTING, &adapter->state))
msleep(10);
cancel_delayed_work_sync(&adapter->fw_work);
}
static void
netxen_attach_work(struct work_struct *work)
{
struct netxen_adapter *adapter = container_of(work,
struct netxen_adapter, fw_work.work);
struct net_device *netdev = adapter->netdev;
int err = 0;
if (netif_running(netdev)) {
err = netxen_nic_attach(adapter);
if (err)
goto done;
err = netxen_nic_up(adapter, netdev);
if (err) {
netxen_nic_detach(adapter);
goto done;
}
netxen_restore_indev_addr(netdev, NETDEV_UP);
}
netif_device_attach(netdev);
done:
adapter->fw_fail_cnt = 0;
clear_bit(__NX_RESETTING, &adapter->state);
netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
}
static void
netxen_fwinit_work(struct work_struct *work)
{
struct netxen_adapter *adapter = container_of(work,
struct netxen_adapter, fw_work.work);
int dev_state;
int count;
dev_state = NXRD32(adapter, NX_CRB_DEV_STATE);
if (adapter->flags & NETXEN_FW_RESET_OWNER) {
count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
WARN_ON(count == 0);
if (count == 1) {
if (adapter->mdump.md_enabled) {
rtnl_lock();
netxen_dump_fw(adapter);
rtnl_unlock();
}
adapter->flags &= ~NETXEN_FW_RESET_OWNER;
if (netxen_api_lock(adapter)) {
clear_bit(__NX_RESETTING, &adapter->state);
NXWR32(adapter, NX_CRB_DEV_STATE,
NX_DEV_FAILED);
return;
}
count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count);
NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD);
dev_state = NX_DEV_COLD;
netxen_api_unlock(adapter);
}
}
switch (dev_state) {
case NX_DEV_COLD:
case NX_DEV_READY:
if (!netxen_start_firmware(adapter)) {
netxen_schedule_work(adapter, netxen_attach_work, 0);
return;
}
break;
case NX_DEV_NEED_RESET:
case NX_DEV_INITALIZING:
netxen_schedule_work(adapter,
netxen_fwinit_work, 2 * FW_POLL_DELAY);
return;
case NX_DEV_FAILED:
default:
nx_incr_dev_ref_cnt(adapter);
break;
}
if (netxen_api_lock(adapter)) {
clear_bit(__NX_RESETTING, &adapter->state);
return;
}
NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_FAILED);
netxen_api_unlock(adapter);
dev_err(&adapter->pdev->dev, "%s: Device initialization Failed\n",
adapter->netdev->name);
clear_bit(__NX_RESETTING, &adapter->state);
}
static void
netxen_detach_work(struct work_struct *work)
{
struct netxen_adapter *adapter = container_of(work,
struct netxen_adapter, fw_work.work);
struct net_device *netdev = adapter->netdev;
int ref_cnt = 0, delay;
u32 status;
netif_device_detach(netdev);
netxen_nic_down(adapter, netdev);
rtnl_lock();
netxen_nic_detach(adapter);
rtnl_unlock();
status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
if (status & NX_RCODE_FATAL_ERROR)
goto err_ret;
if (adapter->temp == NX_TEMP_PANIC)
goto err_ret;
if (!(adapter->flags & NETXEN_FW_RESET_OWNER))
ref_cnt = nx_decr_dev_ref_cnt(adapter);
if (ref_cnt == -EIO)
goto err_ret;
delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY);
adapter->fw_wait_cnt = 0;
netxen_schedule_work(adapter, netxen_fwinit_work, delay);
return;
err_ret:
clear_bit(__NX_RESETTING, &adapter->state);
}
static int
netxen_check_health(struct netxen_adapter *adapter)
{
u32 state, heartbit;
u32 peg_status;
struct net_device *netdev = adapter->netdev;
state = NXRD32(adapter, NX_CRB_DEV_STATE);
if (state == NX_DEV_NEED_AER)
return 0;
if (netxen_nic_check_temp(adapter))
goto detach;
if (adapter->need_fw_reset) {
if (nx_dev_request_reset(adapter))
return 0;
goto detach;
}
/* NX_DEV_NEED_RESET, this state can be marked in two cases
* 1. Tx timeout 2. Fw hang
* Send request to destroy context in case of tx timeout only
* and doesn't required in case of Fw hang
*/
if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) {
adapter->need_fw_reset = 1;
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
goto detach;
}
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
if (heartbit != adapter->heartbit) {
adapter->heartbit = heartbit;
adapter->fw_fail_cnt = 0;
if (adapter->need_fw_reset)
goto detach;
return 0;
}
if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
return 0;
if (nx_dev_request_reset(adapter))
return 0;
clear_bit(__NX_FW_ATTACHED, &adapter->state);
dev_err(&netdev->dev, "firmware hang detected\n");
peg_status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
"PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
"PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
"PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
"PEG_NET_4_PC: 0x%x\n",
peg_status,
NXRD32(adapter, NETXEN_PEG_HALT_STATUS2),
NXRD32(adapter, NETXEN_CRB_PEG_NET_0 + 0x3c),
NXRD32(adapter, NETXEN_CRB_PEG_NET_1 + 0x3c),
NXRD32(adapter, NETXEN_CRB_PEG_NET_2 + 0x3c),
NXRD32(adapter, NETXEN_CRB_PEG_NET_3 + 0x3c),
NXRD32(adapter, NETXEN_CRB_PEG_NET_4 + 0x3c));
if (NX_FWERROR_PEGSTAT1(peg_status) == 0x67)
dev_err(&adapter->pdev->dev,
"Firmware aborted with error code 0x00006700. "
"Device is being reset.\n");
detach:
if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
!test_and_set_bit(__NX_RESETTING, &adapter->state))
netxen_schedule_work(adapter, netxen_detach_work, 0);
return 1;
}
static void
netxen_fw_poll_work(struct work_struct *work)
{
struct netxen_adapter *adapter = container_of(work,
struct netxen_adapter, fw_work.work);
if (test_bit(__NX_RESETTING, &adapter->state))
goto reschedule;
if (test_bit(__NX_DEV_UP, &adapter->state)) {
if (!adapter->has_link_events) {
netxen_nic_handle_phy_intr(adapter);
if (adapter->link_changed)
netxen_nic_set_link_parameters(adapter);
}
}
if (netxen_check_health(adapter))
return;
reschedule:
netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
}
static ssize_t
netxen_store_bridged_mode(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct net_device *net = to_net_dev(dev);
struct netxen_adapter *adapter = netdev_priv(net);
unsigned long new;
int ret = -EINVAL;
if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG))
goto err_out;
if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
goto err_out;
if (strict_strtoul(buf, 2, &new))
goto err_out;
if (!netxen_config_bridged_mode(adapter, !!new))
ret = len;
err_out:
return ret;
}
static ssize_t
netxen_show_bridged_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *net = to_net_dev(dev);
struct netxen_adapter *adapter;
int bridged_mode = 0;
adapter = netdev_priv(net);
if (adapter->capabilities & NX_FW_CAPABILITY_BDG)
bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED);
return sprintf(buf, "%d\n", bridged_mode);
}
static struct device_attribute dev_attr_bridged_mode = {
.attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
.show = netxen_show_bridged_mode,
.store = netxen_store_bridged_mode,
};
static ssize_t
netxen_store_diag_mode(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct netxen_adapter *adapter = dev_get_drvdata(dev);
unsigned long new;
if (strict_strtoul(buf, 2, &new))
return -EINVAL;
if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
adapter->flags ^= NETXEN_NIC_DIAG_ENABLED;
return len;
}
static ssize_t
netxen_show_diag_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct netxen_adapter *adapter = dev_get_drvdata(dev);
return sprintf(buf, "%d\n",
!!(adapter->flags & NETXEN_NIC_DIAG_ENABLED));
}
static struct device_attribute dev_attr_diag_mode = {
.attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
.show = netxen_show_diag_mode,
.store = netxen_store_diag_mode,
};
static int
netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
loff_t offset, size_t size)
{
size_t crb_size = 4;
if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
return -EIO;
if (offset < NETXEN_PCI_CRBSPACE) {
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return -EINVAL;
if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
NETXEN_PCI_CAMQM_2M_END))
crb_size = 8;
else
return -EINVAL;
}
if ((size != crb_size) || (offset & (crb_size-1)))
return -EINVAL;
return 0;
}
static ssize_t
netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u32 data;
u64 qmdata;
int ret;
ret = netxen_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
NETXEN_PCI_CAMQM_2M_END)) {
netxen_pci_camqm_read_2M(adapter, offset, &qmdata);
memcpy(buf, &qmdata, size);
} else {
data = NXRD32(adapter, offset);
memcpy(buf, &data, size);
}
return size;
}
static ssize_t
netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u32 data;
u64 qmdata;
int ret;
ret = netxen_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
NETXEN_PCI_CAMQM_2M_END)) {
memcpy(&qmdata, buf, size);
netxen_pci_camqm_write_2M(adapter, offset, qmdata);
} else {
memcpy(&data, buf, size);
NXWR32(adapter, offset, data);
}
return size;
}
static int
netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
loff_t offset, size_t size)
{
if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
return -EIO;
if ((size != 8) || (offset & 0x7))
return -EIO;
return 0;
}
static ssize_t
netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u64 data;
int ret;
ret = netxen_sysfs_validate_mem(adapter, offset, size);
if (ret != 0)
return ret;
if (adapter->pci_mem_read(adapter, offset, &data))
return -EIO;
memcpy(buf, &data, size);
return size;
}
static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u64 data;
int ret;
ret = netxen_sysfs_validate_mem(adapter, offset, size);
if (ret != 0)
return ret;
memcpy(&data, buf, size);
if (adapter->pci_mem_write(adapter, offset, data))
return -EIO;
return size;
}
static struct bin_attribute bin_attr_crb = {
.attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
.size = 0,
.read = netxen_sysfs_read_crb,
.write = netxen_sysfs_write_crb,
};
static struct bin_attribute bin_attr_mem = {
.attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
.size = 0,
.read = netxen_sysfs_read_mem,
.write = netxen_sysfs_write_mem,
};
static ssize_t
netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
struct net_device *netdev = adapter->netdev;
struct netxen_dimm_cfg dimm;
u8 dw, rows, cols, banks, ranks;
u32 val;
if (size != sizeof(struct netxen_dimm_cfg)) {
netdev_err(netdev, "Invalid size\n");
return -1;
}
memset(&dimm, 0, sizeof(struct netxen_dimm_cfg));
val = NXRD32(adapter, NETXEN_DIMM_CAPABILITY);
/* Checks if DIMM info is valid. */
if (val & NETXEN_DIMM_VALID_FLAG) {
netdev_err(netdev, "Invalid DIMM flag\n");
dimm.presence = 0xff;
goto out;
}
rows = NETXEN_DIMM_NUMROWS(val);
cols = NETXEN_DIMM_NUMCOLS(val);
ranks = NETXEN_DIMM_NUMRANKS(val);
banks = NETXEN_DIMM_NUMBANKS(val);
dw = NETXEN_DIMM_DATAWIDTH(val);
dimm.presence = (val & NETXEN_DIMM_PRESENT);
/* Checks if DIMM info is present. */
if (!dimm.presence) {
netdev_err(netdev, "DIMM not present\n");
goto out;
}
dimm.dimm_type = NETXEN_DIMM_TYPE(val);
switch (dimm.dimm_type) {
case NETXEN_DIMM_TYPE_RDIMM:
case NETXEN_DIMM_TYPE_UDIMM:
case NETXEN_DIMM_TYPE_SO_DIMM:
case NETXEN_DIMM_TYPE_Micro_DIMM:
case NETXEN_DIMM_TYPE_Mini_RDIMM:
case NETXEN_DIMM_TYPE_Mini_UDIMM:
break;
default:
netdev_err(netdev, "Invalid DIMM type %x\n", dimm.dimm_type);
goto out;
}
if (val & NETXEN_DIMM_MEMTYPE_DDR2_SDRAM)
dimm.mem_type = NETXEN_DIMM_MEM_DDR2_SDRAM;
else
dimm.mem_type = NETXEN_DIMM_MEMTYPE(val);
if (val & NETXEN_DIMM_SIZE) {
dimm.size = NETXEN_DIMM_STD_MEM_SIZE;
goto out;
}
if (!rows) {
netdev_err(netdev, "Invalid no of rows %x\n", rows);
goto out;
}
if (!cols) {
netdev_err(netdev, "Invalid no of columns %x\n", cols);
goto out;
}
if (!banks) {
netdev_err(netdev, "Invalid no of banks %x\n", banks);
goto out;
}
ranks += 1;
switch (dw) {
case 0x0:
dw = 32;
break;
case 0x1:
dw = 33;
break;
case 0x2:
dw = 36;
break;
case 0x3:
dw = 64;
break;
case 0x4:
dw = 72;
break;
case 0x5:
dw = 80;
break;
case 0x6:
dw = 128;
break;
case 0x7:
dw = 144;
break;
default:
netdev_err(netdev, "Invalid data-width %x\n", dw);
goto out;
}
dimm.size = ((1 << rows) * (1 << cols) * dw * banks * ranks) / 8;
/* Size returned in MB. */
dimm.size = (dimm.size) / 0x100000;
out:
memcpy(buf, &dimm, sizeof(struct netxen_dimm_cfg));
return sizeof(struct netxen_dimm_cfg);
}
static struct bin_attribute bin_attr_dimm = {
.attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) },
.size = 0,
.read = netxen_sysfs_read_dimm,
};
static void
netxen_create_sysfs_entries(struct netxen_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
if (adapter->capabilities & NX_FW_CAPABILITY_BDG) {
/* bridged_mode control */
if (device_create_file(dev, &dev_attr_bridged_mode)) {
dev_warn(dev,
"failed to create bridged_mode sysfs entry\n");
}
}
}
static void
netxen_remove_sysfs_entries(struct netxen_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
if (adapter->capabilities & NX_FW_CAPABILITY_BDG)
device_remove_file(dev, &dev_attr_bridged_mode);
}
static void
netxen_create_diag_entries(struct netxen_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct device *dev;
dev = &pdev->dev;
if (device_create_file(dev, &dev_attr_diag_mode))
dev_info(dev, "failed to create diag_mode sysfs entry\n");
if (device_create_bin_file(dev, &bin_attr_crb))
dev_info(dev, "failed to create crb sysfs entry\n");
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
if (device_create_bin_file(dev, &bin_attr_dimm))
dev_info(dev, "failed to create dimm sysfs entry\n");
}
static void
netxen_remove_diag_entries(struct netxen_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &pdev->dev;
device_remove_file(dev, &dev_attr_diag_mode);
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
device_remove_bin_file(dev, &bin_attr_dimm);
}
#ifdef CONFIG_INET
#define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops)
static int
netxen_destip_supported(struct netxen_adapter *adapter)
{
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
if (adapter->ahw.cut_through)
return 0;
return 1;
}
static void
netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
{
struct nx_ip_list *cur, *tmp_cur;
list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) {
if (master) {
if (cur->master) {
netxen_config_ipaddr(adapter, cur->ip_addr,
NX_IP_DOWN);
list_del(&cur->list);
kfree(cur);
}
} else {
netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
list_del(&cur->list);
kfree(cur);
}
}
}
static bool
netxen_list_config_ip(struct netxen_adapter *adapter,
struct in_ifaddr *ifa, unsigned long event)
{
struct net_device *dev;
struct nx_ip_list *cur, *tmp_cur;
struct list_head *head;
bool ret = false;
dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
if (dev == NULL)
goto out;
switch (event) {
case NX_IP_UP:
list_for_each(head, &adapter->ip_list) {
cur = list_entry(head, struct nx_ip_list, list);
if (cur->ip_addr == ifa->ifa_address)
goto out;
}
cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC);
if (cur == NULL)
goto out;
if (dev->priv_flags & IFF_802_1Q_VLAN)
dev = vlan_dev_real_dev(dev);
cur->master = !!netif_is_bond_master(dev);
cur->ip_addr = ifa->ifa_address;
list_add_tail(&cur->list, &adapter->ip_list);
netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
ret = true;
break;
case NX_IP_DOWN:
list_for_each_entry_safe(cur, tmp_cur,
&adapter->ip_list, list) {
if (cur->ip_addr == ifa->ifa_address) {
list_del(&cur->list);
kfree(cur);
netxen_config_ipaddr(adapter, ifa->ifa_address,
NX_IP_DOWN);
ret = true;
break;
}
}
}
out:
return ret;
}
static void
netxen_config_indev_addr(struct netxen_adapter *adapter,
struct net_device *dev, unsigned long event)
{
struct in_device *indev;
if (!netxen_destip_supported(adapter))
return;
indev = in_dev_get(dev);
if (!indev)
return;
for_ifa(indev) {
switch (event) {
case NETDEV_UP:
netxen_list_config_ip(adapter, ifa, NX_IP_UP);
break;
case NETDEV_DOWN:
netxen_list_config_ip(adapter, ifa, NX_IP_DOWN);
break;
default:
break;
}
} endfor_ifa(indev);
in_dev_put(indev);
}
static void
netxen_restore_indev_addr(struct net_device *netdev, unsigned long event)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
struct nx_ip_list *pos, *tmp_pos;
unsigned long ip_event;
ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
netxen_config_indev_addr(adapter, netdev, event);
list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) {
netxen_config_ipaddr(adapter, pos->ip_addr, ip_event);
}
}
static inline bool
netxen_config_checkdev(struct net_device *dev)
{
struct netxen_adapter *adapter;
if (!is_netxen_netdev(dev))
return false;
adapter = netdev_priv(dev);
if (!adapter)
return false;
if (!netxen_destip_supported(adapter))
return false;
if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
return false;
return true;
}
/**
* netxen_config_master - configure addresses based on master
* @dev: netxen device
* @event: netdev event
*/
static void netxen_config_master(struct net_device *dev, unsigned long event)
{
struct net_device *master, *slave;
struct netxen_adapter *adapter = netdev_priv(dev);
rcu_read_lock();
master = netdev_master_upper_dev_get_rcu(dev);
/*
* This is the case where the netxen nic is being
* enslaved and is dev_open()ed in bond_enslave()
* Now we should program the bond's (and its vlans')
* addresses in the netxen NIC.
*/
if (master && netif_is_bond_master(master) &&
!netif_is_bond_slave(dev)) {
netxen_config_indev_addr(adapter, master, event);
for_each_netdev_rcu(&init_net, slave)
if (slave->priv_flags & IFF_802_1Q_VLAN &&
vlan_dev_real_dev(slave) == master)
netxen_config_indev_addr(adapter, slave, event);
}
rcu_read_unlock();
/*
* This is the case where the netxen nic is being
* released and is dev_close()ed in bond_release()
* just before IFF_BONDING is stripped.
*/
if (!master && dev->priv_flags & IFF_BONDING)
netxen_free_ip_list(adapter, true);
}
static int netxen_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netxen_adapter *adapter;
struct net_device *dev = (struct net_device *)ptr;
struct net_device *orig_dev = dev;
struct net_device *slave;
recheck:
if (dev == NULL)
goto done;
if (dev->priv_flags & IFF_802_1Q_VLAN) {
dev = vlan_dev_real_dev(dev);
goto recheck;
}
if (event == NETDEV_UP || event == NETDEV_DOWN) {
/* If this is a bonding device, look for netxen-based slaves*/
if (netif_is_bond_master(dev)) {
rcu_read_lock();
for_each_netdev_in_bond_rcu(dev, slave) {
if (!netxen_config_checkdev(slave))
continue;
adapter = netdev_priv(slave);
netxen_config_indev_addr(adapter,
orig_dev, event);
}
rcu_read_unlock();
} else {
if (!netxen_config_checkdev(dev))
goto done;
adapter = netdev_priv(dev);
/* Act only if the actual netxen is the target */
if (orig_dev == dev)
netxen_config_master(dev, event);
netxen_config_indev_addr(adapter, orig_dev, event);
}
}
done:
return NOTIFY_DONE;
}
static int
netxen_inetaddr_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netxen_adapter *adapter;
struct net_device *dev, *slave;
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
unsigned long ip_event;
dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
recheck:
if (dev == NULL)
goto done;
if (dev->priv_flags & IFF_802_1Q_VLAN) {
dev = vlan_dev_real_dev(dev);
goto recheck;
}
if (event == NETDEV_UP || event == NETDEV_DOWN) {
/* If this is a bonding device, look for netxen-based slaves*/
if (netif_is_bond_master(dev)) {
rcu_read_lock();
for_each_netdev_in_bond_rcu(dev, slave) {
if (!netxen_config_checkdev(slave))
continue;
adapter = netdev_priv(slave);
netxen_list_config_ip(adapter, ifa, ip_event);
}
rcu_read_unlock();
} else {
if (!netxen_config_checkdev(dev))
goto done;
adapter = netdev_priv(dev);
netxen_list_config_ip(adapter, ifa, ip_event);
}
}
done:
return NOTIFY_DONE;
}
static struct notifier_block netxen_netdev_cb = {
.notifier_call = netxen_netdev_event,
};
static struct notifier_block netxen_inetaddr_cb = {
.notifier_call = netxen_inetaddr_event,
};
#else
static void
netxen_restore_indev_addr(struct net_device *dev, unsigned long event)
{ }
static void
netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
{ }
#endif
static const struct pci_error_handlers netxen_err_handler = {
.error_detected = netxen_io_error_detected,
.slot_reset = netxen_io_slot_reset,
.resume = netxen_io_resume,
};
static struct pci_driver netxen_driver = {
.name = netxen_nic_driver_name,
.id_table = netxen_pci_tbl,
.probe = netxen_nic_probe,
.remove = netxen_nic_remove,
#ifdef CONFIG_PM
.suspend = netxen_nic_suspend,
.resume = netxen_nic_resume,
#endif
.shutdown = netxen_nic_shutdown,
.err_handler = &netxen_err_handler
};
static int __init netxen_init_module(void)
{
printk(KERN_INFO "%s\n", netxen_nic_driver_string);
#ifdef CONFIG_INET
register_netdevice_notifier(&netxen_netdev_cb);
register_inetaddr_notifier(&netxen_inetaddr_cb);
#endif
return pci_register_driver(&netxen_driver);
}
module_init(netxen_init_module);
static void __exit netxen_exit_module(void)
{
pci_unregister_driver(&netxen_driver);
#ifdef CONFIG_INET
unregister_inetaddr_notifier(&netxen_inetaddr_cb);
unregister_netdevice_notifier(&netxen_netdev_cb);
#endif
}
module_exit(netxen_exit_module);
| gpl-2.0 |
johnso51/linux | sound/oss/mpu401.c | 1711 | 38480 | /*
* sound/oss/mpu401.c
*
* The low level driver for Roland MPU-401 compatible Midi cards.
*/
/*
* Copyright (C) by Hannu Savolainen 1993-1997
*
* OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
*
*
* Thomas Sailer ioctl code reworked (vmalloc/vfree removed)
* Alan Cox modularisation, use normal request_irq, use dev_id
* Bartlomiej Zolnierkiewicz removed some __init to allow using many drivers
* Chris Rankin Update the module-usage counter for the coprocessor
* Zwane Mwaikambo Changed attach/unload resource freeing
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#define USE_SEQ_MACROS
#define USE_SIMPLE_MACROS
#include "sound_config.h"
#include "coproc.h"
#include "mpu401.h"
static int timer_mode = TMR_INTERNAL, timer_caps = TMR_INTERNAL;
struct mpu_config
{
int base; /*
* I/O base
*/
int irq;
int opened; /*
* Open mode
*/
int devno;
int synthno;
int uart_mode;
int initialized;
int mode;
#define MODE_MIDI 1
#define MODE_SYNTH 2
unsigned char version, revision;
unsigned int capabilities;
#define MPU_CAP_INTLG 0x10000000
#define MPU_CAP_SYNC 0x00000010
#define MPU_CAP_FSK 0x00000020
#define MPU_CAP_CLS 0x00000040
#define MPU_CAP_SMPTE 0x00000080
#define MPU_CAP_2PORT 0x00000001
int timer_flag;
#define MBUF_MAX 10
#define BUFTEST(dc) if (dc->m_ptr >= MBUF_MAX || dc->m_ptr < 0) \
{printk( "MPU: Invalid buffer pointer %d/%d, s=%d\n", dc->m_ptr, dc->m_left, dc->m_state);dc->m_ptr--;}
int m_busy;
unsigned char m_buf[MBUF_MAX];
int m_ptr;
int m_state;
int m_left;
unsigned char last_status;
void (*inputintr) (int dev, unsigned char data);
int shared_irq;
int *osp;
spinlock_t lock;
};
#define DATAPORT(base) (base)
#define COMDPORT(base) (base+1)
#define STATPORT(base) (base+1)
static void mpu401_close(int dev);
static inline int mpu401_status(struct mpu_config *devc)
{
return inb(STATPORT(devc->base));
}
#define input_avail(devc) (!(mpu401_status(devc)&INPUT_AVAIL))
#define output_ready(devc) (!(mpu401_status(devc)&OUTPUT_READY))
static inline void write_command(struct mpu_config *devc, unsigned char cmd)
{
outb(cmd, COMDPORT(devc->base));
}
static inline int read_data(struct mpu_config *devc)
{
return inb(DATAPORT(devc->base));
}
static inline void write_data(struct mpu_config *devc, unsigned char byte)
{
outb(byte, DATAPORT(devc->base));
}
#define OUTPUT_READY 0x40
#define INPUT_AVAIL 0x80
#define MPU_ACK 0xFE
#define MPU_RESET 0xFF
#define UART_MODE_ON 0x3F
static struct mpu_config dev_conf[MAX_MIDI_DEV];
static int n_mpu_devs;
static int reset_mpu401(struct mpu_config *devc);
static void set_uart_mode(int dev, struct mpu_config *devc, int arg);
static int mpu_timer_init(int midi_dev);
static void mpu_timer_interrupt(void);
static void timer_ext_event(struct mpu_config *devc, int event, int parm);
static struct synth_info mpu_synth_info_proto = {
"MPU-401 MIDI interface",
0,
SYNTH_TYPE_MIDI,
MIDI_TYPE_MPU401,
0, 128,
0, 128,
SYNTH_CAP_INPUT
};
static struct synth_info mpu_synth_info[MAX_MIDI_DEV];
/*
* States for the input scanner
*/
#define ST_INIT 0 /* Ready for timing byte or msg */
#define ST_TIMED 1 /* Leading timing byte rcvd */
#define ST_DATABYTE 2 /* Waiting for (nr_left) data bytes */
#define ST_SYSMSG 100 /* System message (sysx etc). */
#define ST_SYSEX 101 /* System exclusive msg */
#define ST_MTC 102 /* Midi Time Code (MTC) qframe msg */
#define ST_SONGSEL 103 /* Song select */
#define ST_SONGPOS 104 /* Song position pointer */
static unsigned char len_tab[] = /* # of data bytes following a status
*/
{
2, /* 8x */
2, /* 9x */
2, /* Ax */
2, /* Bx */
1, /* Cx */
1, /* Dx */
2, /* Ex */
0 /* Fx */
};
#define STORE(cmd) \
{ \
int len; \
unsigned char obuf[8]; \
cmd; \
seq_input_event(obuf, len); \
}
#define _seqbuf obuf
#define _seqbufptr 0
#define _SEQ_ADVBUF(x) len=x
static int mpu_input_scanner(struct mpu_config *devc, unsigned char midic)
{
switch (devc->m_state)
{
case ST_INIT:
switch (midic)
{
case 0xf8:
/* Timer overflow */
break;
case 0xfc:
printk("<all end>");
break;
case 0xfd:
if (devc->timer_flag)
mpu_timer_interrupt();
break;
case 0xfe:
return MPU_ACK;
case 0xf0:
case 0xf1:
case 0xf2:
case 0xf3:
case 0xf4:
case 0xf5:
case 0xf6:
case 0xf7:
printk("<Trk data rq #%d>", midic & 0x0f);
break;
case 0xf9:
printk("<conductor rq>");
break;
case 0xff:
devc->m_state = ST_SYSMSG;
break;
default:
if (midic <= 0xef)
{
/* printk( "mpu time: %d ", midic); */
devc->m_state = ST_TIMED;
}
else
printk("<MPU: Unknown event %02x> ", midic);
}
break;
case ST_TIMED:
{
int msg = ((int) (midic & 0xf0) >> 4);
devc->m_state = ST_DATABYTE;
if (msg < 8) /* Data byte */
{
/* printk( "midi msg (running status) "); */
msg = ((int) (devc->last_status & 0xf0) >> 4);
msg -= 8;
devc->m_left = len_tab[msg] - 1;
devc->m_ptr = 2;
devc->m_buf[0] = devc->last_status;
devc->m_buf[1] = midic;
if (devc->m_left <= 0)
{
devc->m_state = ST_INIT;
do_midi_msg(devc->synthno, devc->m_buf, devc->m_ptr);
devc->m_ptr = 0;
}
}
else if (msg == 0xf) /* MPU MARK */
{
devc->m_state = ST_INIT;
switch (midic)
{
case 0xf8:
/* printk( "NOP "); */
break;
case 0xf9:
/* printk( "meas end "); */
break;
case 0xfc:
/* printk( "data end "); */
break;
default:
printk("Unknown MPU mark %02x\n", midic);
}
}
else
{
devc->last_status = midic;
/* printk( "midi msg "); */
msg -= 8;
devc->m_left = len_tab[msg];
devc->m_ptr = 1;
devc->m_buf[0] = midic;
if (devc->m_left <= 0)
{
devc->m_state = ST_INIT;
do_midi_msg(devc->synthno, devc->m_buf, devc->m_ptr);
devc->m_ptr = 0;
}
}
}
break;
case ST_SYSMSG:
switch (midic)
{
case 0xf0:
printk("<SYX>");
devc->m_state = ST_SYSEX;
break;
case 0xf1:
devc->m_state = ST_MTC;
break;
case 0xf2:
devc->m_state = ST_SONGPOS;
devc->m_ptr = 0;
break;
case 0xf3:
devc->m_state = ST_SONGSEL;
break;
case 0xf6:
/* printk( "tune_request\n"); */
devc->m_state = ST_INIT;
break;
/*
* Real time messages
*/
case 0xf8:
/* midi clock */
devc->m_state = ST_INIT;
timer_ext_event(devc, TMR_CLOCK, 0);
break;
case 0xfA:
devc->m_state = ST_INIT;
timer_ext_event(devc, TMR_START, 0);
break;
case 0xFB:
devc->m_state = ST_INIT;
timer_ext_event(devc, TMR_CONTINUE, 0);
break;
case 0xFC:
devc->m_state = ST_INIT;
timer_ext_event(devc, TMR_STOP, 0);
break;
case 0xFE:
/* active sensing */
devc->m_state = ST_INIT;
break;
case 0xff:
/* printk( "midi hard reset"); */
devc->m_state = ST_INIT;
break;
default:
printk("unknown MIDI sysmsg %0x\n", midic);
devc->m_state = ST_INIT;
}
break;
case ST_MTC:
devc->m_state = ST_INIT;
printk("MTC frame %x02\n", midic);
break;
case ST_SYSEX:
if (midic == 0xf7)
{
printk("<EOX>");
devc->m_state = ST_INIT;
}
else
printk("%02x ", midic);
break;
case ST_SONGPOS:
BUFTEST(devc);
devc->m_buf[devc->m_ptr++] = midic;
if (devc->m_ptr == 2)
{
devc->m_state = ST_INIT;
devc->m_ptr = 0;
timer_ext_event(devc, TMR_SPP,
((devc->m_buf[1] & 0x7f) << 7) |
(devc->m_buf[0] & 0x7f));
}
break;
case ST_DATABYTE:
BUFTEST(devc);
devc->m_buf[devc->m_ptr++] = midic;
if ((--devc->m_left) <= 0)
{
devc->m_state = ST_INIT;
do_midi_msg(devc->synthno, devc->m_buf, devc->m_ptr);
devc->m_ptr = 0;
}
break;
default:
printk("Bad state %d ", devc->m_state);
devc->m_state = ST_INIT;
}
return 1;
}
static void mpu401_input_loop(struct mpu_config *devc)
{
unsigned long flags;
int busy;
int n;
spin_lock_irqsave(&devc->lock,flags);
busy = devc->m_busy;
devc->m_busy = 1;
spin_unlock_irqrestore(&devc->lock,flags);
if (busy) /* Already inside the scanner */
return;
n = 50;
while (input_avail(devc) && n-- > 0)
{
unsigned char c = read_data(devc);
if (devc->mode == MODE_SYNTH)
{
mpu_input_scanner(devc, c);
}
else if (devc->opened & OPEN_READ && devc->inputintr != NULL)
devc->inputintr(devc->devno, c);
}
devc->m_busy = 0;
}
static irqreturn_t mpuintr(int irq, void *dev_id)
{
struct mpu_config *devc;
int dev = (int)(unsigned long) dev_id;
int handled = 0;
devc = &dev_conf[dev];
if (input_avail(devc))
{
handled = 1;
if (devc->base != 0 && (devc->opened & OPEN_READ || devc->mode == MODE_SYNTH))
mpu401_input_loop(devc);
else
{
/* Dummy read (just to acknowledge the interrupt) */
read_data(devc);
}
}
return IRQ_RETVAL(handled);
}
static int mpu401_open(int dev, int mode,
void (*input) (int dev, unsigned char data),
void (*output) (int dev)
)
{
int err;
struct mpu_config *devc;
struct coproc_operations *coprocessor;
if (dev < 0 || dev >= num_midis || midi_devs[dev] == NULL)
return -ENXIO;
devc = &dev_conf[dev];
if (devc->opened)
return -EBUSY;
/*
* Verify that the device is really running.
* Some devices (such as Ensoniq SoundScape don't
* work before the on board processor (OBP) is initialized
* by downloading its microcode.
*/
if (!devc->initialized)
{
if (mpu401_status(devc) == 0xff) /* Bus float */
{
printk(KERN_ERR "mpu401: Device not initialized properly\n");
return -EIO;
}
reset_mpu401(devc);
}
if ( (coprocessor = midi_devs[dev]->coproc) != NULL )
{
if (!try_module_get(coprocessor->owner)) {
mpu401_close(dev);
return -ENODEV;
}
if ((err = coprocessor->open(coprocessor->devc, COPR_MIDI)) < 0)
{
printk(KERN_WARNING "MPU-401: Can't access coprocessor device\n");
mpu401_close(dev);
return err;
}
}
set_uart_mode(dev, devc, 1);
devc->mode = MODE_MIDI;
devc->synthno = 0;
mpu401_input_loop(devc);
devc->inputintr = input;
devc->opened = mode;
return 0;
}
static void mpu401_close(int dev)
{
struct mpu_config *devc;
struct coproc_operations *coprocessor;
devc = &dev_conf[dev];
if (devc->uart_mode)
reset_mpu401(devc); /*
* This disables the UART mode
*/
devc->mode = 0;
devc->inputintr = NULL;
coprocessor = midi_devs[dev]->coproc;
if (coprocessor) {
coprocessor->close(coprocessor->devc, COPR_MIDI);
module_put(coprocessor->owner);
}
devc->opened = 0;
}
static int mpu401_out(int dev, unsigned char midi_byte)
{
int timeout;
unsigned long flags;
struct mpu_config *devc;
devc = &dev_conf[dev];
/*
* Sometimes it takes about 30000 loops before the output becomes ready
* (After reset). Normally it takes just about 10 loops.
*/
for (timeout = 30000; timeout > 0 && !output_ready(devc); timeout--);
spin_lock_irqsave(&devc->lock,flags);
if (!output_ready(devc))
{
printk(KERN_WARNING "mpu401: Send data timeout\n");
spin_unlock_irqrestore(&devc->lock,flags);
return 0;
}
write_data(devc, midi_byte);
spin_unlock_irqrestore(&devc->lock,flags);
return 1;
}
static int mpu401_command(int dev, mpu_command_rec * cmd)
{
int i, timeout, ok;
unsigned long flags;
struct mpu_config *devc;
devc = &dev_conf[dev];
if (devc->uart_mode) /*
* Not possible in UART mode
*/
{
printk(KERN_WARNING "mpu401: commands not possible in the UART mode\n");
return -EINVAL;
}
/*
* Test for input since pending input seems to block the output.
*/
if (input_avail(devc))
mpu401_input_loop(devc);
/*
* Sometimes it takes about 50000 loops before the output becomes ready
* (After reset). Normally it takes just about 10 loops.
*/
timeout = 50000;
retry:
if (timeout-- <= 0)
{
printk(KERN_WARNING "mpu401: Command (0x%x) timeout\n", (int) cmd->cmd);
return -EIO;
}
spin_lock_irqsave(&devc->lock,flags);
if (!output_ready(devc))
{
spin_unlock_irqrestore(&devc->lock,flags);
goto retry;
}
write_command(devc, cmd->cmd);
ok = 0;
for (timeout = 50000; timeout > 0 && !ok; timeout--)
{
if (input_avail(devc))
{
if (devc->opened && devc->mode == MODE_SYNTH)
{
if (mpu_input_scanner(devc, read_data(devc)) == MPU_ACK)
ok = 1;
}
else
{
/* Device is not currently open. Use simpler method */
if (read_data(devc) == MPU_ACK)
ok = 1;
}
}
}
if (!ok)
{
spin_unlock_irqrestore(&devc->lock,flags);
return -EIO;
}
if (cmd->nr_args)
{
for (i = 0; i < cmd->nr_args; i++)
{
for (timeout = 3000; timeout > 0 && !output_ready(devc); timeout--);
if (!mpu401_out(dev, cmd->data[i]))
{
spin_unlock_irqrestore(&devc->lock,flags);
printk(KERN_WARNING "mpu401: Command (0x%x), parm send failed.\n", (int) cmd->cmd);
return -EIO;
}
}
}
cmd->data[0] = 0;
if (cmd->nr_returns)
{
for (i = 0; i < cmd->nr_returns; i++)
{
ok = 0;
for (timeout = 5000; timeout > 0 && !ok; timeout--)
if (input_avail(devc))
{
cmd->data[i] = read_data(devc);
ok = 1;
}
if (!ok)
{
spin_unlock_irqrestore(&devc->lock,flags);
return -EIO;
}
}
}
spin_unlock_irqrestore(&devc->lock,flags);
return 0;
}
static int mpu_cmd(int dev, int cmd, int data)
{
int ret;
static mpu_command_rec rec;
rec.cmd = cmd & 0xff;
rec.nr_args = ((cmd & 0xf0) == 0xE0);
rec.nr_returns = ((cmd & 0xf0) == 0xA0);
rec.data[0] = data & 0xff;
if ((ret = mpu401_command(dev, &rec)) < 0)
return ret;
return (unsigned char) rec.data[0];
}
static int mpu401_prefix_cmd(int dev, unsigned char status)
{
struct mpu_config *devc = &dev_conf[dev];
if (devc->uart_mode)
return 1;
if (status < 0xf0)
{
if (mpu_cmd(dev, 0xD0, 0) < 0)
return 0;
return 1;
}
switch (status)
{
case 0xF0:
if (mpu_cmd(dev, 0xDF, 0) < 0)
return 0;
return 1;
default:
return 0;
}
}
static int mpu401_start_read(int dev)
{
return 0;
}
static int mpu401_end_read(int dev)
{
return 0;
}
static int mpu401_ioctl(int dev, unsigned cmd, void __user *arg)
{
struct mpu_config *devc;
mpu_command_rec rec;
int val, ret;
devc = &dev_conf[dev];
switch (cmd)
{
case SNDCTL_MIDI_MPUMODE:
if (!(devc->capabilities & MPU_CAP_INTLG)) { /* No intelligent mode */
printk(KERN_WARNING "mpu401: Intelligent mode not supported by the HW\n");
return -EINVAL;
}
if (get_user(val, (int __user *)arg))
return -EFAULT;
set_uart_mode(dev, devc, !val);
return 0;
case SNDCTL_MIDI_MPUCMD:
if (copy_from_user(&rec, arg, sizeof(rec)))
return -EFAULT;
if ((ret = mpu401_command(dev, &rec)) < 0)
return ret;
if (copy_to_user(arg, &rec, sizeof(rec)))
return -EFAULT;
return 0;
default:
return -EINVAL;
}
}
static void mpu401_kick(int dev)
{
}
static int mpu401_buffer_status(int dev)
{
return 0; /*
* No data in buffers
*/
}
static int mpu_synth_ioctl(int dev, unsigned int cmd, void __user *arg)
{
int midi_dev;
struct mpu_config *devc;
midi_dev = synth_devs[dev]->midi_dev;
if (midi_dev < 0 || midi_dev >= num_midis || midi_devs[midi_dev] == NULL)
return -ENXIO;
devc = &dev_conf[midi_dev];
switch (cmd)
{
case SNDCTL_SYNTH_INFO:
if (copy_to_user(arg, &mpu_synth_info[midi_dev],
sizeof(struct synth_info)))
return -EFAULT;
return 0;
case SNDCTL_SYNTH_MEMAVL:
return 0x7fffffff;
default:
return -EINVAL;
}
}
static int mpu_synth_open(int dev, int mode)
{
int midi_dev, err;
struct mpu_config *devc;
struct coproc_operations *coprocessor;
midi_dev = synth_devs[dev]->midi_dev;
if (midi_dev < 0 || midi_dev > num_midis || midi_devs[midi_dev] == NULL)
return -ENXIO;
devc = &dev_conf[midi_dev];
/*
* Verify that the device is really running.
* Some devices (such as Ensoniq SoundScape don't
* work before the on board processor (OBP) is initialized
* by downloading its microcode.
*/
if (!devc->initialized)
{
if (mpu401_status(devc) == 0xff) /* Bus float */
{
printk(KERN_ERR "mpu401: Device not initialized properly\n");
return -EIO;
}
reset_mpu401(devc);
}
if (devc->opened)
return -EBUSY;
devc->mode = MODE_SYNTH;
devc->synthno = dev;
devc->inputintr = NULL;
coprocessor = midi_devs[midi_dev]->coproc;
if (coprocessor) {
if (!try_module_get(coprocessor->owner))
return -ENODEV;
if ((err = coprocessor->open(coprocessor->devc, COPR_MIDI)) < 0)
{
printk(KERN_WARNING "mpu401: Can't access coprocessor device\n");
return err;
}
}
devc->opened = mode;
reset_mpu401(devc);
if (mode & OPEN_READ)
{
mpu_cmd(midi_dev, 0x8B, 0); /* Enable data in stop mode */
mpu_cmd(midi_dev, 0x34, 0); /* Return timing bytes in stop mode */
mpu_cmd(midi_dev, 0x87, 0); /* Enable pitch & controller */
}
return 0;
}
static void mpu_synth_close(int dev)
{
int midi_dev;
struct mpu_config *devc;
struct coproc_operations *coprocessor;
midi_dev = synth_devs[dev]->midi_dev;
devc = &dev_conf[midi_dev];
mpu_cmd(midi_dev, 0x15, 0); /* Stop recording, playback and MIDI */
mpu_cmd(midi_dev, 0x8a, 0); /* Disable data in stopped mode */
devc->inputintr = NULL;
coprocessor = midi_devs[midi_dev]->coproc;
if (coprocessor) {
coprocessor->close(coprocessor->devc, COPR_MIDI);
module_put(coprocessor->owner);
}
devc->opened = 0;
devc->mode = 0;
}
#define MIDI_SYNTH_NAME "MPU-401 UART Midi"
#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
#include "midi_synth.h"
static struct synth_operations mpu401_synth_proto =
{
.owner = THIS_MODULE,
.id = "MPU401",
.info = NULL,
.midi_dev = 0,
.synth_type = SYNTH_TYPE_MIDI,
.synth_subtype = 0,
.open = mpu_synth_open,
.close = mpu_synth_close,
.ioctl = mpu_synth_ioctl,
.kill_note = midi_synth_kill_note,
.start_note = midi_synth_start_note,
.set_instr = midi_synth_set_instr,
.reset = midi_synth_reset,
.hw_control = midi_synth_hw_control,
.load_patch = midi_synth_load_patch,
.aftertouch = midi_synth_aftertouch,
.controller = midi_synth_controller,
.panning = midi_synth_panning,
.bender = midi_synth_bender,
.setup_voice = midi_synth_setup_voice,
.send_sysex = midi_synth_send_sysex
};
static struct synth_operations *mpu401_synth_operations[MAX_MIDI_DEV];
static struct midi_operations mpu401_midi_proto =
{
.owner = THIS_MODULE,
.info = {"MPU-401 Midi", 0, MIDI_CAP_MPU401, SNDCARD_MPU401},
.in_info = {0},
.open = mpu401_open,
.close = mpu401_close,
.ioctl = mpu401_ioctl,
.outputc = mpu401_out,
.start_read = mpu401_start_read,
.end_read = mpu401_end_read,
.kick = mpu401_kick,
.buffer_status = mpu401_buffer_status,
.prefix_cmd = mpu401_prefix_cmd
};
static struct midi_operations mpu401_midi_operations[MAX_MIDI_DEV];
static void mpu401_chk_version(int n, struct mpu_config *devc)
{
int tmp;
devc->version = devc->revision = 0;
tmp = mpu_cmd(n, 0xAC, 0);
if (tmp < 0)
return;
if ((tmp & 0xf0) > 0x20) /* Why it's larger than 2.x ??? */
return;
devc->version = tmp;
if ((tmp = mpu_cmd(n, 0xAD, 0)) < 0) {
devc->version = 0;
return;
}
devc->revision = tmp;
}
int attach_mpu401(struct address_info *hw_config, struct module *owner)
{
unsigned long flags;
char revision_char;
int m, ret;
struct mpu_config *devc;
hw_config->slots[1] = -1;
m = sound_alloc_mididev();
if (m == -1)
{
printk(KERN_WARNING "MPU-401: Too many midi devices detected\n");
ret = -ENOMEM;
goto out_err;
}
devc = &dev_conf[m];
devc->base = hw_config->io_base;
devc->osp = hw_config->osp;
devc->irq = hw_config->irq;
devc->opened = 0;
devc->uart_mode = 0;
devc->initialized = 0;
devc->version = 0;
devc->revision = 0;
devc->capabilities = 0;
devc->timer_flag = 0;
devc->m_busy = 0;
devc->m_state = ST_INIT;
devc->shared_irq = hw_config->always_detect;
spin_lock_init(&devc->lock);
if (devc->irq < 0)
{
devc->irq *= -1;
devc->shared_irq = 1;
}
if (!hw_config->always_detect)
{
/* Verify the hardware again */
if (!reset_mpu401(devc))
{
printk(KERN_WARNING "mpu401: Device didn't respond\n");
ret = -ENODEV;
goto out_mididev;
}
if (!devc->shared_irq)
{
if (request_irq(devc->irq, mpuintr, 0, "mpu401",
hw_config) < 0)
{
printk(KERN_WARNING "mpu401: Failed to allocate IRQ%d\n", devc->irq);
ret = -ENOMEM;
goto out_mididev;
}
}
spin_lock_irqsave(&devc->lock,flags);
mpu401_chk_version(m, devc);
if (devc->version == 0)
mpu401_chk_version(m, devc);
spin_unlock_irqrestore(&devc->lock, flags);
}
if (devc->version != 0)
if (mpu_cmd(m, 0xC5, 0) >= 0) /* Set timebase OK */
if (mpu_cmd(m, 0xE0, 120) >= 0) /* Set tempo OK */
devc->capabilities |= MPU_CAP_INTLG; /* Supports intelligent mode */
mpu401_synth_operations[m] = kmalloc(sizeof(struct synth_operations), GFP_KERNEL);
if (mpu401_synth_operations[m] == NULL)
{
printk(KERN_ERR "mpu401: Can't allocate memory\n");
ret = -ENOMEM;
goto out_irq;
}
if (!(devc->capabilities & MPU_CAP_INTLG)) /* No intelligent mode */
{
memcpy((char *) mpu401_synth_operations[m],
(char *) &std_midi_synth,
sizeof(struct synth_operations));
}
else
{
memcpy((char *) mpu401_synth_operations[m],
(char *) &mpu401_synth_proto,
sizeof(struct synth_operations));
}
if (owner)
mpu401_synth_operations[m]->owner = owner;
memcpy((char *) &mpu401_midi_operations[m],
(char *) &mpu401_midi_proto,
sizeof(struct midi_operations));
mpu401_midi_operations[m].converter = mpu401_synth_operations[m];
memcpy((char *) &mpu_synth_info[m],
(char *) &mpu_synth_info_proto,
sizeof(struct synth_info));
n_mpu_devs++;
if (devc->version == 0x20 && devc->revision >= 0x07) /* MusicQuest interface */
{
int ports = (devc->revision & 0x08) ? 32 : 16;
devc->capabilities |= MPU_CAP_SYNC | MPU_CAP_SMPTE |
MPU_CAP_CLS | MPU_CAP_2PORT;
revision_char = (devc->revision == 0x7f) ? 'M' : ' ';
sprintf(mpu_synth_info[m].name, "MQX-%d%c MIDI Interface #%d",
ports,
revision_char,
n_mpu_devs);
}
else
{
revision_char = devc->revision ? devc->revision + '@' : ' ';
if ((int) devc->revision > ('Z' - '@'))
revision_char = '+';
devc->capabilities |= MPU_CAP_SYNC | MPU_CAP_FSK;
if (hw_config->name)
sprintf(mpu_synth_info[m].name, "%s (MPU401)", hw_config->name);
else
sprintf(mpu_synth_info[m].name,
"MPU-401 %d.%d%c MIDI #%d",
(int) (devc->version & 0xf0) >> 4,
devc->version & 0x0f,
revision_char,
n_mpu_devs);
}
strcpy(mpu401_midi_operations[m].info.name,
mpu_synth_info[m].name);
conf_printf(mpu_synth_info[m].name, hw_config);
mpu401_synth_operations[m]->midi_dev = devc->devno = m;
mpu401_synth_operations[devc->devno]->info = &mpu_synth_info[devc->devno];
if (devc->capabilities & MPU_CAP_INTLG) /* Intelligent mode */
hw_config->slots[2] = mpu_timer_init(m);
midi_devs[m] = &mpu401_midi_operations[devc->devno];
if (owner)
midi_devs[m]->owner = owner;
hw_config->slots[1] = m;
sequencer_init();
return 0;
out_irq:
free_irq(devc->irq, hw_config);
out_mididev:
sound_unload_mididev(m);
out_err:
release_region(hw_config->io_base, 2);
return ret;
}
static int reset_mpu401(struct mpu_config *devc)
{
unsigned long flags;
int ok, timeout, n;
int timeout_limit;
/*
* Send the RESET command. Try again if no success at the first time.
* (If the device is in the UART mode, it will not ack the reset cmd).
*/
ok = 0;
timeout_limit = devc->initialized ? 30000 : 100000;
devc->initialized = 1;
for (n = 0; n < 2 && !ok; n++)
{
for (timeout = timeout_limit; timeout > 0 && !ok; timeout--)
ok = output_ready(devc);
write_command(devc, MPU_RESET); /*
* Send MPU-401 RESET Command
*/
/*
* Wait at least 25 msec. This method is not accurate so let's make the
* loop bit longer. Cannot sleep since this is called during boot.
*/
for (timeout = timeout_limit * 2; timeout > 0 && !ok; timeout--)
{
spin_lock_irqsave(&devc->lock,flags);
if (input_avail(devc))
if (read_data(devc) == MPU_ACK)
ok = 1;
spin_unlock_irqrestore(&devc->lock,flags);
}
}
devc->m_state = ST_INIT;
devc->m_ptr = 0;
devc->m_left = 0;
devc->last_status = 0;
devc->uart_mode = 0;
return ok;
}
static void set_uart_mode(int dev, struct mpu_config *devc, int arg)
{
if (!arg && (devc->capabilities & MPU_CAP_INTLG))
return;
if ((devc->uart_mode == 0) == (arg == 0))
return; /* Already set */
reset_mpu401(devc); /* This exits the uart mode */
if (arg)
{
if (mpu_cmd(dev, UART_MODE_ON, 0) < 0)
{
printk(KERN_ERR "mpu401: Can't enter UART mode\n");
devc->uart_mode = 0;
return;
}
}
devc->uart_mode = arg;
}
int probe_mpu401(struct address_info *hw_config, struct resource *ports)
{
int ok = 0;
struct mpu_config tmp_devc;
tmp_devc.base = hw_config->io_base;
tmp_devc.irq = hw_config->irq;
tmp_devc.initialized = 0;
tmp_devc.opened = 0;
tmp_devc.osp = hw_config->osp;
if (hw_config->always_detect)
return 1;
if (inb(hw_config->io_base + 1) == 0xff)
{
DDB(printk("MPU401: Port %x looks dead.\n", hw_config->io_base));
return 0; /* Just bus float? */
}
ok = reset_mpu401(&tmp_devc);
if (!ok)
{
DDB(printk("MPU401: Reset failed on port %x\n", hw_config->io_base));
}
return ok;
}
void unload_mpu401(struct address_info *hw_config)
{
void *p;
int n=hw_config->slots[1];
if (n != -1) {
release_region(hw_config->io_base, 2);
if (hw_config->always_detect == 0 && hw_config->irq > 0)
free_irq(hw_config->irq, hw_config);
p=mpu401_synth_operations[n];
sound_unload_mididev(n);
sound_unload_timerdev(hw_config->slots[2]);
kfree(p);
}
}
/*****************************************************
* Timer stuff
****************************************************/
static volatile int timer_initialized = 0, timer_open = 0, tmr_running = 0;
static volatile int curr_tempo, curr_timebase, hw_timebase;
static int max_timebase = 8; /* 8*24=192 ppqn */
static volatile unsigned long next_event_time;
static volatile unsigned long curr_ticks, curr_clocks;
static unsigned long prev_event_time;
static int metronome_mode;
static unsigned long clocks2ticks(unsigned long clocks)
{
/*
* The MPU-401 supports just a limited set of possible timebase values.
* Since the applications require more choices, the driver has to
* program the HW to do its best and to convert between the HW and
* actual timebases.
*/
return ((clocks * curr_timebase) + (hw_timebase / 2)) / hw_timebase;
}
static void set_timebase(int midi_dev, int val)
{
int hw_val;
if (val < 48)
val = 48;
if (val > 1000)
val = 1000;
hw_val = val;
hw_val = (hw_val + 12) / 24;
if (hw_val > max_timebase)
hw_val = max_timebase;
if (mpu_cmd(midi_dev, 0xC0 | (hw_val & 0x0f), 0) < 0)
{
printk(KERN_WARNING "mpu401: Can't set HW timebase to %d\n", hw_val * 24);
return;
}
hw_timebase = hw_val * 24;
curr_timebase = val;
}
static void tmr_reset(struct mpu_config *devc)
{
unsigned long flags;
spin_lock_irqsave(&devc->lock,flags);
next_event_time = (unsigned long) -1;
prev_event_time = 0;
curr_ticks = curr_clocks = 0;
spin_unlock_irqrestore(&devc->lock,flags);
}
static void set_timer_mode(int midi_dev)
{
if (timer_mode & TMR_MODE_CLS)
mpu_cmd(midi_dev, 0x3c, 0); /* Use CLS sync */
else if (timer_mode & TMR_MODE_SMPTE)
mpu_cmd(midi_dev, 0x3d, 0); /* Use SMPTE sync */
if (timer_mode & TMR_INTERNAL)
{
mpu_cmd(midi_dev, 0x80, 0); /* Use MIDI sync */
}
else
{
if (timer_mode & (TMR_MODE_MIDI | TMR_MODE_CLS))
{
mpu_cmd(midi_dev, 0x82, 0); /* Use MIDI sync */
mpu_cmd(midi_dev, 0x91, 0); /* Enable ext MIDI ctrl */
}
else if (timer_mode & TMR_MODE_FSK)
mpu_cmd(midi_dev, 0x81, 0); /* Use FSK sync */
}
}
static void stop_metronome(int midi_dev)
{
mpu_cmd(midi_dev, 0x84, 0); /* Disable metronome */
}
static void setup_metronome(int midi_dev)
{
int numerator, denominator;
int clks_per_click, num_32nds_per_beat;
int beats_per_measure;
numerator = ((unsigned) metronome_mode >> 24) & 0xff;
denominator = ((unsigned) metronome_mode >> 16) & 0xff;
clks_per_click = ((unsigned) metronome_mode >> 8) & 0xff;
num_32nds_per_beat = (unsigned) metronome_mode & 0xff;
beats_per_measure = (numerator * 4) >> denominator;
if (!metronome_mode)
mpu_cmd(midi_dev, 0x84, 0); /* Disable metronome */
else
{
mpu_cmd(midi_dev, 0xE4, clks_per_click);
mpu_cmd(midi_dev, 0xE6, beats_per_measure);
mpu_cmd(midi_dev, 0x83, 0); /* Enable metronome without accents */
}
}
static int mpu_start_timer(int midi_dev)
{
struct mpu_config *devc= &dev_conf[midi_dev];
tmr_reset(devc);
set_timer_mode(midi_dev);
if (tmr_running)
return TIMER_NOT_ARMED; /* Already running */
if (timer_mode & TMR_INTERNAL)
{
mpu_cmd(midi_dev, 0x02, 0); /* Send MIDI start */
tmr_running = 1;
return TIMER_NOT_ARMED;
}
else
{
mpu_cmd(midi_dev, 0x35, 0); /* Enable mode messages to PC */
mpu_cmd(midi_dev, 0x38, 0); /* Enable sys common messages to PC */
mpu_cmd(midi_dev, 0x39, 0); /* Enable real time messages to PC */
mpu_cmd(midi_dev, 0x97, 0); /* Enable system exclusive messages to PC */
}
return TIMER_ARMED;
}
static int mpu_timer_open(int dev, int mode)
{
int midi_dev = sound_timer_devs[dev]->devlink;
struct mpu_config *devc= &dev_conf[midi_dev];
if (timer_open)
return -EBUSY;
tmr_reset(devc);
curr_tempo = 50;
mpu_cmd(midi_dev, 0xE0, 50);
curr_timebase = hw_timebase = 120;
set_timebase(midi_dev, 120);
timer_open = 1;
metronome_mode = 0;
set_timer_mode(midi_dev);
mpu_cmd(midi_dev, 0xe7, 0x04); /* Send all clocks to host */
mpu_cmd(midi_dev, 0x95, 0); /* Enable clock to host */
return 0;
}
static void mpu_timer_close(int dev)
{
int midi_dev = sound_timer_devs[dev]->devlink;
timer_open = tmr_running = 0;
mpu_cmd(midi_dev, 0x15, 0); /* Stop all */
mpu_cmd(midi_dev, 0x94, 0); /* Disable clock to host */
mpu_cmd(midi_dev, 0x8c, 0); /* Disable measure end messages to host */
stop_metronome(midi_dev);
}
static int mpu_timer_event(int dev, unsigned char *event)
{
unsigned char command = event[1];
unsigned long parm = *(unsigned int *) &event[4];
int midi_dev = sound_timer_devs[dev]->devlink;
switch (command)
{
case TMR_WAIT_REL:
parm += prev_event_time;
case TMR_WAIT_ABS:
if (parm > 0)
{
long time;
if (parm <= curr_ticks) /* It's the time */
return TIMER_NOT_ARMED;
time = parm;
next_event_time = prev_event_time = time;
return TIMER_ARMED;
}
break;
case TMR_START:
if (tmr_running)
break;
return mpu_start_timer(midi_dev);
case TMR_STOP:
mpu_cmd(midi_dev, 0x01, 0); /* Send MIDI stop */
stop_metronome(midi_dev);
tmr_running = 0;
break;
case TMR_CONTINUE:
if (tmr_running)
break;
mpu_cmd(midi_dev, 0x03, 0); /* Send MIDI continue */
setup_metronome(midi_dev);
tmr_running = 1;
break;
case TMR_TEMPO:
if (parm)
{
if (parm < 8)
parm = 8;
if (parm > 250)
parm = 250;
if (mpu_cmd(midi_dev, 0xE0, parm) < 0)
printk(KERN_WARNING "mpu401: Can't set tempo to %d\n", (int) parm);
curr_tempo = parm;
}
break;
case TMR_ECHO:
seq_copy_to_input(event, 8);
break;
case TMR_TIMESIG:
if (metronome_mode) /* Metronome enabled */
{
metronome_mode = parm;
setup_metronome(midi_dev);
}
break;
default:;
}
return TIMER_NOT_ARMED;
}
static unsigned long mpu_timer_get_time(int dev)
{
if (!timer_open)
return 0;
return curr_ticks;
}
static int mpu_timer_ioctl(int dev, unsigned int command, void __user *arg)
{
int midi_dev = sound_timer_devs[dev]->devlink;
int __user *p = (int __user *)arg;
switch (command)
{
case SNDCTL_TMR_SOURCE:
{
int parm;
if (get_user(parm, p))
return -EFAULT;
parm &= timer_caps;
if (parm != 0)
{
timer_mode = parm;
if (timer_mode & TMR_MODE_CLS)
mpu_cmd(midi_dev, 0x3c, 0); /* Use CLS sync */
else if (timer_mode & TMR_MODE_SMPTE)
mpu_cmd(midi_dev, 0x3d, 0); /* Use SMPTE sync */
}
if (put_user(timer_mode, p))
return -EFAULT;
return timer_mode;
}
break;
case SNDCTL_TMR_START:
mpu_start_timer(midi_dev);
return 0;
case SNDCTL_TMR_STOP:
tmr_running = 0;
mpu_cmd(midi_dev, 0x01, 0); /* Send MIDI stop */
stop_metronome(midi_dev);
return 0;
case SNDCTL_TMR_CONTINUE:
if (tmr_running)
return 0;
tmr_running = 1;
mpu_cmd(midi_dev, 0x03, 0); /* Send MIDI continue */
return 0;
case SNDCTL_TMR_TIMEBASE:
{
int val;
if (get_user(val, p))
return -EFAULT;
if (val)
set_timebase(midi_dev, val);
if (put_user(curr_timebase, p))
return -EFAULT;
return curr_timebase;
}
break;
case SNDCTL_TMR_TEMPO:
{
int val;
int ret;
if (get_user(val, p))
return -EFAULT;
if (val)
{
if (val < 8)
val = 8;
if (val > 250)
val = 250;
if ((ret = mpu_cmd(midi_dev, 0xE0, val)) < 0)
{
printk(KERN_WARNING "mpu401: Can't set tempo to %d\n", (int) val);
return ret;
}
curr_tempo = val;
}
if (put_user(curr_tempo, p))
return -EFAULT;
return curr_tempo;
}
break;
case SNDCTL_SEQ_CTRLRATE:
{
int val;
if (get_user(val, p))
return -EFAULT;
if (val != 0) /* Can't change */
return -EINVAL;
val = ((curr_tempo * curr_timebase) + 30)/60;
if (put_user(val, p))
return -EFAULT;
return val;
}
break;
case SNDCTL_SEQ_GETTIME:
if (put_user(curr_ticks, p))
return -EFAULT;
return curr_ticks;
case SNDCTL_TMR_METRONOME:
if (get_user(metronome_mode, p))
return -EFAULT;
setup_metronome(midi_dev);
return 0;
default:;
}
return -EINVAL;
}
static void mpu_timer_arm(int dev, long time)
{
if (time < 0)
time = curr_ticks + 1;
else if (time <= curr_ticks) /* It's the time */
return;
next_event_time = prev_event_time = time;
return;
}
static struct sound_timer_operations mpu_timer =
{
.owner = THIS_MODULE,
.info = {"MPU-401 Timer", 0},
.priority = 10, /* Priority */
.devlink = 0, /* Local device link */
.open = mpu_timer_open,
.close = mpu_timer_close,
.event = mpu_timer_event,
.get_time = mpu_timer_get_time,
.ioctl = mpu_timer_ioctl,
.arm_timer = mpu_timer_arm
};
static void mpu_timer_interrupt(void)
{
if (!timer_open)
return;
if (!tmr_running)
return;
curr_clocks++;
curr_ticks = clocks2ticks(curr_clocks);
if (curr_ticks >= next_event_time)
{
next_event_time = (unsigned long) -1;
sequencer_timer(0);
}
}
static void timer_ext_event(struct mpu_config *devc, int event, int parm)
{
int midi_dev = devc->devno;
if (!devc->timer_flag)
return;
switch (event)
{
case TMR_CLOCK:
printk("<MIDI clk>");
break;
case TMR_START:
printk("Ext MIDI start\n");
if (!tmr_running)
{
if (timer_mode & TMR_EXTERNAL)
{
tmr_running = 1;
setup_metronome(midi_dev);
next_event_time = 0;
STORE(SEQ_START_TIMER());
}
}
break;
case TMR_STOP:
printk("Ext MIDI stop\n");
if (timer_mode & TMR_EXTERNAL)
{
tmr_running = 0;
stop_metronome(midi_dev);
STORE(SEQ_STOP_TIMER());
}
break;
case TMR_CONTINUE:
printk("Ext MIDI continue\n");
if (timer_mode & TMR_EXTERNAL)
{
tmr_running = 1;
setup_metronome(midi_dev);
STORE(SEQ_CONTINUE_TIMER());
}
break;
case TMR_SPP:
printk("Songpos: %d\n", parm);
if (timer_mode & TMR_EXTERNAL)
{
STORE(SEQ_SONGPOS(parm));
}
break;
}
}
static int mpu_timer_init(int midi_dev)
{
struct mpu_config *devc;
int n;
devc = &dev_conf[midi_dev];
if (timer_initialized)
return -1; /* There is already a similar timer */
timer_initialized = 1;
mpu_timer.devlink = midi_dev;
dev_conf[midi_dev].timer_flag = 1;
n = sound_alloc_timerdev();
if (n == -1)
n = 0;
sound_timer_devs[n] = &mpu_timer;
if (devc->version < 0x20) /* Original MPU-401 */
timer_caps = TMR_INTERNAL | TMR_EXTERNAL | TMR_MODE_FSK | TMR_MODE_MIDI;
else
{
/*
* The version number 2.0 is used (at least) by the
* MusicQuest cards and the Roland Super-MPU.
*
* MusicQuest has given a special meaning to the bits of the
* revision number. The Super-MPU returns 0.
*/
if (devc->revision)
timer_caps |= TMR_EXTERNAL | TMR_MODE_MIDI;
if (devc->revision & 0x02)
timer_caps |= TMR_MODE_CLS;
if (devc->revision & 0x40)
max_timebase = 10; /* Has the 216 and 240 ppqn modes */
}
timer_mode = (TMR_INTERNAL | TMR_MODE_MIDI) & timer_caps;
return n;
}
EXPORT_SYMBOL(probe_mpu401);
EXPORT_SYMBOL(attach_mpu401);
EXPORT_SYMBOL(unload_mpu401);
static struct address_info cfg;
static int io = -1;
static int irq = -1;
module_param(irq, int, 0);
module_param(io, int, 0);
static int __init init_mpu401(void)
{
int ret;
/* Can be loaded either for module use or to provide functions
to others */
if (io != -1 && irq != -1) {
struct resource *ports;
cfg.irq = irq;
cfg.io_base = io;
ports = request_region(io, 2, "mpu401");
if (!ports)
return -EBUSY;
if (probe_mpu401(&cfg, ports) == 0) {
release_region(io, 2);
return -ENODEV;
}
if ((ret = attach_mpu401(&cfg, THIS_MODULE)))
return ret;
}
return 0;
}
static void __exit cleanup_mpu401(void)
{
if (io != -1 && irq != -1) {
/* Check for use by, for example, sscape driver */
unload_mpu401(&cfg);
}
}
module_init(init_mpu401);
module_exit(cleanup_mpu401);
#ifndef MODULE
static int __init setup_mpu401(char *str)
{
/* io, irq */
int ints[3];
str = get_options(str, ARRAY_SIZE(ints), ints);
io = ints[1];
irq = ints[2];
return 1;
}
__setup("mpu401=", setup_mpu401);
#endif
MODULE_LICENSE("GPL");
| gpl-2.0 |
wimpknocker/android_kernel_samsung_viennalte | drivers/input/misc/pmic8058-vib-memless.c | 3503 | 6408 | /* Copyright (c) 2010, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pmic8058-vibrator.h>
#include <linux/mfd/pmic8058.h>
#include <linux/pm.h>
#include <linux/input.h>
#include <linux/slab.h>
#define VIB_DRV 0x4A
#define VIB_DRV_SEL_MASK 0xf8
#define VIB_DRV_SEL_SHIFT 0x03
#define VIB_DRV_EN_MANUAL_MASK 0xfc
#define VIB_MAX_LEVEL_mV (3100)
#define VIB_MIN_LEVEL_mV (1200)
#define VIB_MAX_LEVELS (VIB_MAX_LEVEL_mV - VIB_MIN_LEVEL_mV)
#define MAX_FF_SPEED 0xff
struct pmic8058_vib {
struct input_dev *info;
spinlock_t lock;
struct work_struct work;
bool enabled;
int speed;
struct device *dev;
struct pmic8058_vibrator_pdata *pdata;
int state;
int level;
u8 reg_vib_drv;
struct pm8058_chip *pm_chip;
};
/* REVISIT: just for debugging, will be removed in final working version */
static void __dump_vib_regs(struct pmic8058_vib *vib, char *msg)
{
u8 temp;
dev_dbg(vib->dev, "%s\n", msg);
pm8058_read(vib->pm_chip, VIB_DRV, &temp, 1);
dev_dbg(vib->dev, "VIB_DRV - %X\n", temp);
}
static int pmic8058_vib_read_u8(struct pmic8058_vib *vib,
u8 *data, u16 reg)
{
int rc;
rc = pm8058_read(vib->pm_chip, reg, data, 1);
if (rc < 0)
dev_warn(vib->dev, "Error reading pmic8058: %X - ret %X\n",
reg, rc);
return rc;
}
static int pmic8058_vib_write_u8(struct pmic8058_vib *vib,
u8 data, u16 reg)
{
int rc;
rc = pm8058_write(vib->pm_chip, reg, &data, 1);
if (rc < 0)
dev_warn(vib->dev, "Error writing pmic8058: %X - ret %X\n",
reg, rc);
return rc;
}
static int pmic8058_vib_set(struct pmic8058_vib *vib, int on)
{
int rc;
u8 val;
if (on) {
val = vib->reg_vib_drv;
val |= ((vib->level << VIB_DRV_SEL_SHIFT) & VIB_DRV_SEL_MASK);
rc = pmic8058_vib_write_u8(vib, val, VIB_DRV);
if (rc < 0)
return rc;
vib->reg_vib_drv = val;
vib->enabled = 1;
} else {
val = vib->reg_vib_drv;
val &= ~VIB_DRV_SEL_MASK;
rc = pmic8058_vib_write_u8(vib, val, VIB_DRV);
if (rc < 0)
return rc;
vib->reg_vib_drv = val;
vib->enabled = 0;
}
__dump_vib_regs(vib, "vib_set_end");
return rc;
}
static void pmic8058_work_handler(struct work_struct *work)
{
u8 val;
int rc;
struct pmic8058_vib *info;
info = container_of(work, struct pmic8058_vib, work);
rc = pmic8058_vib_read_u8(info, &val, VIB_DRV);
if (rc < 0)
return;
/*
* Vibrator support voltage ranges from 1.2 to 3.1V, so
* scale the FF speed to these range.
*/
if (info->speed) {
info->state = 1;
info->level = ((VIB_MAX_LEVELS * info->speed) / MAX_FF_SPEED) +
VIB_MIN_LEVEL_mV;
info->level /= 100;
} else {
info->state = 0;
info->level = VIB_MIN_LEVEL_mV / 100;
}
pmic8058_vib_set(info, info->state);
}
static int pmic8058_vib_play_effect(struct input_dev *dev, void *data,
struct ff_effect *effect)
{
struct pmic8058_vib *info = input_get_drvdata(dev);
info->speed = effect->u.rumble.strong_magnitude >> 8;
if (!info->speed)
info->speed = effect->u.rumble.weak_magnitude >> 9;
schedule_work(&info->work);
return 0;
}
static int __devinit pmic8058_vib_probe(struct platform_device *pdev)
{
struct pmic8058_vibrator_pdata *pdata = pdev->dev.platform_data;
struct pmic8058_vib *vib;
u8 val;
int rc;
struct pm8058_chip *pm_chip;
pm_chip = dev_get_drvdata(pdev->parent.dev);
if (pm_chip == NULL) {
dev_err(&pdev->dev, "no parent data passed in\n");
return -EFAULT;
}
if (!pdata)
return -EINVAL;
if (pdata->level_mV < VIB_MIN_LEVEL_mV ||
pdata->level_mV > VIB_MAX_LEVEL_mV)
return -EINVAL;
vib = kzalloc(sizeof(*vib), GFP_KERNEL);
if (!vib)
return -ENOMEM;
vib->pm_chip = pm_chip;
vib->enabled = 0;
vib->pdata = pdata;
vib->level = pdata->level_mV / 100;
vib->dev = &pdev->dev;
spin_lock_init(&vib->lock);
INIT_WORK(&vib->work, pmic8058_work_handler);
vib->info = input_allocate_device();
if (vib->info == NULL) {
dev_err(&pdev->dev, "couldn't allocate input device\n");
return -ENOMEM;
}
input_set_drvdata(vib->info, vib);
vib->info->name = "pmic8058:vibrator";
vib->info->id.version = 1;
vib->info->dev.parent = pdev->dev.parent;
__set_bit(FF_RUMBLE, vib->info->ffbit);
__dump_vib_regs(vib, "boot_vib_default");
/* operate in manual mode */
rc = pmic8058_vib_read_u8(vib, &val, VIB_DRV);
if (rc < 0)
goto err_read_vib;
val &= ~VIB_DRV_EN_MANUAL_MASK;
rc = pmic8058_vib_write_u8(vib, val, VIB_DRV);
if (rc < 0)
goto err_read_vib;
vib->reg_vib_drv = val;
rc = input_ff_create_memless(vib->info, NULL, pmic8058_vib_play_effect);
if (rc < 0) {
dev_dbg(&pdev->dev, "couldn't register vibrator to FF\n");
goto create_memless_err;
}
platform_set_drvdata(pdev, vib);
rc = input_register_device(vib->info);
if (rc < 0) {
dev_dbg(&pdev->dev, "couldn't register input device\n");
goto reg_err;
}
return 0;
reg_err:
input_ff_destroy(vib->info);
create_memless_err:
input_free_device(vib->info);
err_read_vib:
kfree(vib);
return rc;
}
static int __devexit pmic8058_vib_remove(struct platform_device *pdev)
{
struct pmic8058_vib *vib = platform_get_drvdata(pdev);
cancel_work_sync(&vib->work);
if (vib->enabled)
pmic8058_vib_set(vib, 0);
input_unregister_device(vib->info);
kfree(vib);
return 0;
}
static struct platform_driver pmic8058_vib_driver = {
.probe = pmic8058_vib_probe,
.remove = __devexit_p(pmic8058_vib_remove),
.driver = {
.name = "pm8058-vib",
.owner = THIS_MODULE,
},
};
static int __init pmic8058_vib_init(void)
{
return platform_driver_register(&pmic8058_vib_driver);
}
module_init(pmic8058_vib_init);
static void __exit pmic8058_vib_exit(void)
{
platform_driver_unregister(&pmic8058_vib_driver);
}
module_exit(pmic8058_vib_exit);
MODULE_ALIAS("platform:pmic8058_vib");
MODULE_DESCRIPTION("PMIC8058 vibrator driver memless framework");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
GustavoRD78/78Kernel-MOB31E.Z1.3657 | drivers/input/misc/isa1200-ff-memless.c | 3503 | 10737 | /*
* Copyright (C) 2009 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
*
* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/pwm.h>
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/pm.h>
#include <linux/i2c/isa1200.h>
#define ISA1200_HCTRL0 0x30
#define HCTRL0_MODE_CTRL_BIT (3)
#define HCTRL0_OVERDRIVE_HIGH_BIT (5)
#define HCTRL0_OVERDRIVE_EN_BIT (6)
#define HCTRL0_HAP_EN (7)
#define HCTRL0_RESET 0x01
#define HCTRL1_RESET 0x4B
#define ISA1200_HCTRL1 0x31
#define HCTRL1_SMART_ENABLE_BIT (3)
#define HCTRL1_ERM_BIT (5)
#define HCTRL1_EXT_CLK_ENABLE_BIT (7)
#define ISA1200_HCTRL5 0x35
#define HCTRL5_VIB_STRT 0xD5
#define HCTRL5_VIB_STOP 0x6B
#define DIVIDER_128 (128)
#define DIVIDER_1024 (1024)
#define DIVIDE_SHIFTER_128 (7)
#define FREQ_22400 (22400)
#define FREQ_172600 (172600)
#define POR_DELAY_USEC 250
struct isa1200_chip {
const struct isa1200_platform_data *pdata;
struct i2c_client *client;
struct input_dev *input_device;
struct pwm_device *pwm;
unsigned int period_ns;
unsigned int state;
struct work_struct work;
};
static void isa1200_vib_set(struct isa1200_chip *haptic, int enable)
{
int rc;
if (enable) {
if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
int period_us = haptic->period_ns / NSEC_PER_USEC;
rc = pwm_config(haptic->pwm,
(period_us * haptic->pdata->duty) / 100,
period_us);
if (rc < 0)
pr_err("pwm_config fail\n");
rc = pwm_enable(haptic->pwm);
if (rc < 0)
pr_err("pwm_enable fail\n");
} else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
rc = i2c_smbus_write_byte_data(haptic->client,
ISA1200_HCTRL5,
HCTRL5_VIB_STRT);
if (rc < 0)
pr_err("start vibration fail\n");
}
} else {
if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE)
pwm_disable(haptic->pwm);
else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
rc = i2c_smbus_write_byte_data(haptic->client,
ISA1200_HCTRL5,
HCTRL5_VIB_STOP);
if (rc < 0)
pr_err("stop vibration fail\n");
}
}
}
static int isa1200_setup(struct i2c_client *client)
{
struct isa1200_chip *haptic = i2c_get_clientdata(client);
int value, temp, rc;
gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
udelay(POR_DELAY_USEC);
gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 1);
value = (haptic->pdata->smart_en << HCTRL1_SMART_ENABLE_BIT) |
(haptic->pdata->is_erm << HCTRL1_ERM_BIT) |
(haptic->pdata->ext_clk_en << HCTRL1_EXT_CLK_ENABLE_BIT);
rc = i2c_smbus_write_byte_data(client, ISA1200_HCTRL1, value);
if (rc < 0) {
pr_err("i2c write failure\n");
return rc;
}
if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
temp = haptic->pdata->pwm_fd.pwm_div;
if (temp < DIVIDER_128 || temp > DIVIDER_1024 ||
temp % DIVIDER_128) {
pr_err("Invalid divider\n");
rc = -EINVAL;
goto reset_hctrl1;
}
value = ((temp >> DIVIDE_SHIFTER_128) - 1);
} else if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
temp = haptic->pdata->pwm_fd.pwm_freq;
if (temp < FREQ_22400 || temp > FREQ_172600 ||
temp % FREQ_22400) {
pr_err("Invalid frequency\n");
rc = -EINVAL;
goto reset_hctrl1;
}
value = ((temp / FREQ_22400) - 1);
haptic->period_ns = NSEC_PER_SEC / temp;
}
value |= (haptic->pdata->mode_ctrl << HCTRL0_MODE_CTRL_BIT) |
(haptic->pdata->overdrive_high << HCTRL0_OVERDRIVE_HIGH_BIT) |
(haptic->pdata->overdrive_en << HCTRL0_OVERDRIVE_EN_BIT) |
(haptic->pdata->chip_en << HCTRL0_HAP_EN);
rc = i2c_smbus_write_byte_data(client, ISA1200_HCTRL0, value);
if (rc < 0) {
pr_err("i2c write failure\n");
goto reset_hctrl1;
}
return 0;
reset_hctrl1:
i2c_smbus_write_byte_data(client, ISA1200_HCTRL1,
HCTRL1_RESET);
return rc;
}
static void isa1200_worker(struct work_struct *work)
{
struct isa1200_chip *haptic;
haptic = container_of(work, struct isa1200_chip, work);
isa1200_vib_set(haptic, !!haptic->state);
}
static int isa1200_play_effect(struct input_dev *dev, void *data,
struct ff_effect *effect)
{
struct isa1200_chip *haptic = input_get_drvdata(dev);
/* support basic vibration */
haptic->state = effect->u.rumble.strong_magnitude >> 8;
if (!haptic->state)
haptic->state = effect->u.rumble.weak_magnitude >> 9;
schedule_work(&haptic->work);
return 0;
}
#ifdef CONFIG_PM
static int isa1200_suspend(struct device *dev)
{
struct isa1200_chip *haptic = dev_get_drvdata(dev);
int rc;
cancel_work_sync(&haptic->work);
/* turn-off current vibration */
isa1200_vib_set(haptic, 0);
if (haptic->pdata->power_on) {
rc = haptic->pdata->power_on(0);
if (rc) {
pr_err("power-down failed\n");
return rc;
}
}
return 0;
}
static int isa1200_resume(struct device *dev)
{
struct isa1200_chip *haptic = dev_get_drvdata(dev);
int rc;
if (haptic->pdata->power_on) {
rc = haptic->pdata->power_on(1);
if (rc) {
pr_err("power-up failed\n");
return rc;
}
}
isa1200_setup(haptic->client);
return 0;
}
#else
#define isa1200_suspend NULL
#define isa1200_resume NULL
#endif
static int isa1200_open(struct input_dev *dev)
{
struct isa1200_chip *haptic = input_get_drvdata(dev);
int rc;
/* device setup */
if (haptic->pdata->dev_setup) {
rc = haptic->pdata->dev_setup(true);
if (rc < 0) {
pr_err("setup failed!\n");
return rc;
}
}
/* power on */
if (haptic->pdata->power_on) {
rc = haptic->pdata->power_on(true);
if (rc < 0) {
pr_err("power failed\n");
goto err_setup;
}
}
/* request gpio */
rc = gpio_is_valid(haptic->pdata->hap_en_gpio);
if (rc) {
rc = gpio_request(haptic->pdata->hap_en_gpio, "haptic_gpio");
if (rc) {
pr_err("gpio %d request failed\n",
haptic->pdata->hap_en_gpio);
goto err_power_on;
}
} else {
pr_err("Invalid gpio %d\n",
haptic->pdata->hap_en_gpio);
goto err_power_on;
}
rc = gpio_direction_output(haptic->pdata->hap_en_gpio, 0);
if (rc) {
pr_err("gpio %d set direction failed\n",
haptic->pdata->hap_en_gpio);
goto err_gpio_free;
}
/* setup registers */
rc = isa1200_setup(haptic->client);
if (rc < 0) {
pr_err("setup fail %d\n", rc);
goto err_gpio_free;
}
if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
haptic->pwm = pwm_request(haptic->pdata->pwm_ch_id,
haptic->client->driver->id_table->name);
if (IS_ERR(haptic->pwm)) {
pr_err("pwm request failed\n");
rc = PTR_ERR(haptic->pwm);
goto err_reset_hctrl0;
}
}
/* init workqeueue */
INIT_WORK(&haptic->work, isa1200_worker);
return 0;
err_reset_hctrl0:
i2c_smbus_write_byte_data(haptic->client, ISA1200_HCTRL0,
HCTRL0_RESET);
err_gpio_free:
gpio_free(haptic->pdata->hap_en_gpio);
err_power_on:
if (haptic->pdata->power_on)
haptic->pdata->power_on(0);
err_setup:
if (haptic->pdata->dev_setup)
haptic->pdata->dev_setup(false);
return rc;
}
static void isa1200_close(struct input_dev *dev)
{
struct isa1200_chip *haptic = input_get_drvdata(dev);
/* turn-off current vibration */
isa1200_vib_set(haptic, 0);
if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE)
pwm_free(haptic->pwm);
gpio_free(haptic->pdata->hap_en_gpio);
/* reset hardware registers */
i2c_smbus_write_byte_data(haptic->client, ISA1200_HCTRL0,
HCTRL0_RESET);
i2c_smbus_write_byte_data(haptic->client, ISA1200_HCTRL1,
HCTRL1_RESET);
if (haptic->pdata->dev_setup)
haptic->pdata->dev_setup(false);
/* power-off the chip */
if (haptic->pdata->power_on)
haptic->pdata->power_on(0);
}
static int __devinit isa1200_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct isa1200_chip *haptic;
int rc;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA)) {
pr_err("i2c is not supported\n");
return -EIO;
}
if (!client->dev.platform_data) {
pr_err("pdata is not avaiable\n");
return -EINVAL;
}
haptic = kzalloc(sizeof(struct isa1200_chip), GFP_KERNEL);
if (!haptic) {
pr_err("no memory\n");
return -ENOMEM;
}
haptic->pdata = client->dev.platform_data;
haptic->client = client;
i2c_set_clientdata(client, haptic);
haptic->input_device = input_allocate_device();
if (!haptic->input_device) {
pr_err("input device alloc failed\n");
rc = -ENOMEM;
goto err_mem_alloc;
}
input_set_drvdata(haptic->input_device, haptic);
haptic->input_device->name = haptic->pdata->name ? :
"isa1200-ff-memless";
haptic->input_device->dev.parent = &client->dev;
input_set_capability(haptic->input_device, EV_FF, FF_RUMBLE);
haptic->input_device->open = isa1200_open;
haptic->input_device->close = isa1200_close;
rc = input_ff_create_memless(haptic->input_device, NULL,
isa1200_play_effect);
if (rc < 0) {
pr_err("unable to register with ff\n");
goto err_free_dev;
}
rc = input_register_device(haptic->input_device);
if (rc < 0) {
pr_err("unable to register input device\n");
goto err_ff_destroy;
}
return 0;
err_ff_destroy:
input_ff_destroy(haptic->input_device);
err_free_dev:
input_free_device(haptic->input_device);
err_mem_alloc:
kfree(haptic);
return rc;
}
static int __devexit isa1200_remove(struct i2c_client *client)
{
struct isa1200_chip *haptic = i2c_get_clientdata(client);
input_unregister_device(haptic->input_device);
kfree(haptic);
return 0;
}
static const struct i2c_device_id isa1200_id_table[] = {
{"isa1200_1", 0},
{ },
};
MODULE_DEVICE_TABLE(i2c, isa1200_id_table);
static const struct dev_pm_ops isa1200_pm_ops = {
.suspend = isa1200_suspend,
.resume = isa1200_resume,
};
static struct i2c_driver isa1200_driver = {
.driver = {
.name = "isa1200-ff-memless",
.owner = THIS_MODULE,
.pm = &isa1200_pm_ops,
},
.probe = isa1200_probe,
.remove = __devexit_p(isa1200_remove),
.id_table = isa1200_id_table,
};
static int __init isa1200_init(void)
{
return i2c_add_driver(&isa1200_driver);
}
module_init(isa1200_init);
static void __exit isa1200_exit(void)
{
i2c_del_driver(&isa1200_driver);
}
module_exit(isa1200_exit);
MODULE_DESCRIPTION("isa1200 based vibrator chip driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
| gpl-2.0 |
somesnow/AK-OnePone | drivers/base/regmap/regmap-spi.c | 4527 | 2452 | /*
* Register map access API - SPI support
*
* Copyright 2011 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/init.h>
#include <linux/module.h>
static int regmap_spi_write(struct device *dev, const void *data, size_t count)
{
struct spi_device *spi = to_spi_device(dev);
return spi_write(spi, data, count);
}
static int regmap_spi_gather_write(struct device *dev,
const void *reg, size_t reg_len,
const void *val, size_t val_len)
{
struct spi_device *spi = to_spi_device(dev);
struct spi_message m;
struct spi_transfer t[2] = { { .tx_buf = reg, .len = reg_len, },
{ .tx_buf = val, .len = val_len, }, };
spi_message_init(&m);
spi_message_add_tail(&t[0], &m);
spi_message_add_tail(&t[1], &m);
return spi_sync(spi, &m);
}
static int regmap_spi_read(struct device *dev,
const void *reg, size_t reg_size,
void *val, size_t val_size)
{
struct spi_device *spi = to_spi_device(dev);
return spi_write_then_read(spi, reg, reg_size, val, val_size);
}
static struct regmap_bus regmap_spi = {
.write = regmap_spi_write,
.gather_write = regmap_spi_gather_write,
.read = regmap_spi_read,
.read_flag_mask = 0x80,
};
/**
* regmap_init_spi(): Initialise register map
*
* @spi: Device that will be interacted with
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
* a struct regmap.
*/
struct regmap *regmap_init_spi(struct spi_device *spi,
const struct regmap_config *config)
{
return regmap_init(&spi->dev, ®map_spi, config);
}
EXPORT_SYMBOL_GPL(regmap_init_spi);
/**
* devm_regmap_init_spi(): Initialise register map
*
* @spi: Device that will be interacted with
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer
* to a struct regmap. The map will be automatically freed by the
* device management code.
*/
struct regmap *devm_regmap_init_spi(struct spi_device *spi,
const struct regmap_config *config)
{
return devm_regmap_init(&spi->dev, ®map_spi, config);
}
EXPORT_SYMBOL_GPL(devm_regmap_init_spi);
MODULE_LICENSE("GPL");
| gpl-2.0 |
vantjnh1991/F160-JB | drivers/net/ethernet/smsc/smc911x.c | 4527 | 58936 | /*
* smc911x.c
* This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices.
*
* Copyright (C) 2005 Sensoria Corp
* Derived from the unified SMC91x driver by Nicolas Pitre
* and the smsc911x.c reference driver by SMSC
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Arguments:
* watchdog = TX watchdog timeout
* tx_fifo_kb = Size of TX FIFO in KB
*
* History:
* 04/16/05 Dustin McIntire Initial version
*/
static const char version[] =
"smc911x.c: v1.0 04-16-2005 by Dustin McIntire <dustin@sensoria.com>\n";
/* Debugging options */
#define ENABLE_SMC_DEBUG_RX 0
#define ENABLE_SMC_DEBUG_TX 0
#define ENABLE_SMC_DEBUG_DMA 0
#define ENABLE_SMC_DEBUG_PKTS 0
#define ENABLE_SMC_DEBUG_MISC 0
#define ENABLE_SMC_DEBUG_FUNC 0
#define SMC_DEBUG_RX ((ENABLE_SMC_DEBUG_RX ? 1 : 0) << 0)
#define SMC_DEBUG_TX ((ENABLE_SMC_DEBUG_TX ? 1 : 0) << 1)
#define SMC_DEBUG_DMA ((ENABLE_SMC_DEBUG_DMA ? 1 : 0) << 2)
#define SMC_DEBUG_PKTS ((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3)
#define SMC_DEBUG_MISC ((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4)
#define SMC_DEBUG_FUNC ((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5)
#ifndef SMC_DEBUG
#define SMC_DEBUG ( SMC_DEBUG_RX | \
SMC_DEBUG_TX | \
SMC_DEBUG_DMA | \
SMC_DEBUG_PKTS | \
SMC_DEBUG_MISC | \
SMC_DEBUG_FUNC \
)
#endif
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/crc32.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/workqueue.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <asm/io.h>
#include "smc911x.h"
/*
* Transmit timeout, default 5 seconds.
*/
static int watchdog = 5000;
module_param(watchdog, int, 0400);
MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
static int tx_fifo_kb=8;
module_param(tx_fifo_kb, int, 0400);
MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:smc911x");
/*
* The internal workings of the driver. If you are changing anything
* here with the SMC stuff, you should have the datasheet and know
* what you are doing.
*/
#define CARDNAME "smc911x"
/*
* Use power-down feature of the chip
*/
#define POWER_DOWN 1
#if SMC_DEBUG > 0
#define DBG(n, args...) \
do { \
if (SMC_DEBUG & (n)) \
printk(args); \
} while (0)
#define PRINTK(args...) printk(args)
#else
#define DBG(n, args...) do { } while (0)
#define PRINTK(args...) printk(KERN_DEBUG args)
#endif
#if SMC_DEBUG_PKTS > 0
static void PRINT_PKT(u_char *buf, int length)
{
int i;
int remainder;
int lines;
lines = length / 16;
remainder = length % 16;
for (i = 0; i < lines ; i ++) {
int cur;
for (cur = 0; cur < 8; cur++) {
u_char a, b;
a = *buf++;
b = *buf++;
printk("%02x%02x ", a, b);
}
printk("\n");
}
for (i = 0; i < remainder/2 ; i++) {
u_char a, b;
a = *buf++;
b = *buf++;
printk("%02x%02x ", a, b);
}
printk("\n");
}
#else
#define PRINT_PKT(x...) do { } while (0)
#endif
/* this enables an interrupt in the interrupt mask register */
#define SMC_ENABLE_INT(lp, x) do { \
unsigned int __mask; \
__mask = SMC_GET_INT_EN((lp)); \
__mask |= (x); \
SMC_SET_INT_EN((lp), __mask); \
} while (0)
/* this disables an interrupt from the interrupt mask register */
#define SMC_DISABLE_INT(lp, x) do { \
unsigned int __mask; \
__mask = SMC_GET_INT_EN((lp)); \
__mask &= ~(x); \
SMC_SET_INT_EN((lp), __mask); \
} while (0)
/*
* this does a soft reset on the device
*/
static void smc911x_reset(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned int reg, timeout=0, resets=1, irq_cfg;
unsigned long flags;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
/* Take out of PM setting first */
if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
/* Write to the bytetest will take out of powerdown */
SMC_SET_BYTE_TEST(lp, 0);
timeout=10;
do {
udelay(10);
reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_;
} while (--timeout && !reg);
if (timeout == 0) {
PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name);
return;
}
}
/* Disable all interrupts */
spin_lock_irqsave(&lp->lock, flags);
SMC_SET_INT_EN(lp, 0);
spin_unlock_irqrestore(&lp->lock, flags);
while (resets--) {
SMC_SET_HW_CFG(lp, HW_CFG_SRST_);
timeout=10;
do {
udelay(10);
reg = SMC_GET_HW_CFG(lp);
/* If chip indicates reset timeout then try again */
if (reg & HW_CFG_SRST_TO_) {
PRINTK("%s: chip reset timeout, retrying...\n", dev->name);
resets++;
break;
}
} while (--timeout && (reg & HW_CFG_SRST_));
}
if (timeout == 0) {
PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name);
return;
}
/* make sure EEPROM has finished loading before setting GPIO_CFG */
timeout=1000;
while (--timeout && (SMC_GET_E2P_CMD(lp) & E2P_CMD_EPC_BUSY_))
udelay(10);
if (timeout == 0){
PRINTK("%s: smc911x_reset timeout waiting for EEPROM busy\n", dev->name);
return;
}
/* Initialize interrupts */
SMC_SET_INT_EN(lp, 0);
SMC_ACK_INT(lp, -1);
/* Reset the FIFO level and flow control settings */
SMC_SET_HW_CFG(lp, (lp->tx_fifo_kb & 0xF) << 16);
//TODO: Figure out what appropriate pause time is
SMC_SET_FLOW(lp, FLOW_FCPT_ | FLOW_FCEN_);
SMC_SET_AFC_CFG(lp, lp->afc_cfg);
/* Set to LED outputs */
SMC_SET_GPIO_CFG(lp, 0x70070000);
/*
* Deassert IRQ for 1*10us for edge type interrupts
* and drive IRQ pin push-pull
*/
irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_;
#ifdef SMC_DYNAMIC_BUS_CONFIG
if (lp->cfg.irq_polarity)
irq_cfg |= INT_CFG_IRQ_POL_;
#endif
SMC_SET_IRQ_CFG(lp, irq_cfg);
/* clear anything saved */
if (lp->pending_tx_skb != NULL) {
dev_kfree_skb (lp->pending_tx_skb);
lp->pending_tx_skb = NULL;
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
}
}
/*
* Enable Interrupts, Receive, and Transmit
*/
static void smc911x_enable(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned mask, cfg, cr;
unsigned long flags;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
spin_lock_irqsave(&lp->lock, flags);
SMC_SET_MAC_ADDR(lp, dev->dev_addr);
/* Enable TX */
cfg = SMC_GET_HW_CFG(lp);
cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF;
cfg |= HW_CFG_SF_;
SMC_SET_HW_CFG(lp, cfg);
SMC_SET_FIFO_TDA(lp, 0xFF);
/* Update TX stats on every 64 packets received or every 1 sec */
SMC_SET_FIFO_TSL(lp, 64);
SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
SMC_GET_MAC_CR(lp, cr);
cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
SMC_SET_MAC_CR(lp, cr);
SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_);
/* Add 2 byte padding to start of packets */
SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_);
/* Turn on receiver and enable RX */
if (cr & MAC_CR_RXEN_)
DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name);
SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
/* Interrupt on every received packet */
SMC_SET_FIFO_RSA(lp, 0x01);
SMC_SET_FIFO_RSL(lp, 0x00);
/* now, enable interrupts */
mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ |
INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ |
INT_EN_PHY_INT_EN_;
if (IS_REV_A(lp->revision))
mask|=INT_EN_RDFL_EN_;
else {
mask|=INT_EN_RDFO_EN_;
}
SMC_ENABLE_INT(lp, mask);
spin_unlock_irqrestore(&lp->lock, flags);
}
/*
* this puts the device in an inactive state
*/
static void smc911x_shutdown(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned cr;
unsigned long flags;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __func__);
/* Disable IRQ's */
SMC_SET_INT_EN(lp, 0);
/* Turn of Rx and TX */
spin_lock_irqsave(&lp->lock, flags);
SMC_GET_MAC_CR(lp, cr);
cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_);
SMC_SET_MAC_CR(lp, cr);
SMC_SET_TX_CFG(lp, TX_CFG_STOP_TX_);
spin_unlock_irqrestore(&lp->lock, flags);
}
static inline void smc911x_drop_pkt(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned int fifo_count, timeout, reg;
DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __func__);
fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
if (fifo_count <= 4) {
/* Manually dump the packet data */
while (fifo_count--)
SMC_GET_RX_FIFO(lp);
} else {
/* Fast forward through the bad packet */
SMC_SET_RX_DP_CTRL(lp, RX_DP_CTRL_FFWD_BUSY_);
timeout=50;
do {
udelay(10);
reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_;
} while (--timeout && reg);
if (timeout == 0) {
PRINTK("%s: timeout waiting for RX fast forward\n", dev->name);
}
}
}
/*
* This is the procedure to handle the receipt of a packet.
* It should be called after checking for packet presence in
* the RX status FIFO. It must be called with the spin lock
* already held.
*/
static inline void smc911x_rcv(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned int pkt_len, status;
struct sk_buff *skb;
unsigned char *data;
DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
dev->name, __func__);
status = SMC_GET_RX_STS_FIFO(lp);
DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n",
dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
if (status & RX_STS_ES_) {
/* Deal with a bad packet */
dev->stats.rx_errors++;
if (status & RX_STS_CRC_ERR_)
dev->stats.rx_crc_errors++;
else {
if (status & RX_STS_LEN_ERR_)
dev->stats.rx_length_errors++;
if (status & RX_STS_MCAST_)
dev->stats.multicast++;
}
/* Remove the bad packet data from the RX FIFO */
smc911x_drop_pkt(dev);
} else {
/* Receive a valid packet */
/* Alloc a buffer with extra room for DMA alignment */
skb = netdev_alloc_skb(dev, pkt_len+32);
if (unlikely(skb == NULL)) {
PRINTK( "%s: Low memory, rcvd packet dropped.\n",
dev->name);
dev->stats.rx_dropped++;
smc911x_drop_pkt(dev);
return;
}
/* Align IP header to 32 bits
* Note that the device is configured to add a 2
* byte padding to the packet start, so we really
* want to write to the orignal data pointer */
data = skb->data;
skb_reserve(skb, 2);
skb_put(skb,pkt_len-4);
#ifdef SMC_USE_DMA
{
unsigned int fifo;
/* Lower the FIFO threshold if possible */
fifo = SMC_GET_FIFO_INT(lp);
if (fifo & 0xFF) fifo--;
DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n",
dev->name, fifo & 0xff);
SMC_SET_FIFO_INT(lp, fifo);
/* Setup RX DMA */
SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
lp->rxdma_active = 1;
lp->current_rx_skb = skb;
SMC_PULL_DATA(lp, data, (pkt_len+2+15) & ~15);
/* Packet processing deferred to DMA RX interrupt */
}
#else
SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
SMC_PULL_DATA(lp, data, pkt_len+2+3);
DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name);
PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len-4;
#endif
}
}
/*
* This is called to actually send a packet to the chip.
*/
static void smc911x_hardware_send_pkt(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
struct sk_buff *skb;
unsigned int cmdA, cmdB, len;
unsigned char *buf;
DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__);
BUG_ON(lp->pending_tx_skb == NULL);
skb = lp->pending_tx_skb;
lp->pending_tx_skb = NULL;
/* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */
/* cmdB {31:16] pkt tag [10:0] length */
#ifdef SMC_USE_DMA
/* 16 byte buffer alignment mode */
buf = (char*)((u32)(skb->data) & ~0xF);
len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF;
cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) |
TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
skb->len;
#else
buf = (char*)((u32)skb->data & ~0x3);
len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3;
cmdA = (((u32)skb->data & 0x3) << 16) |
TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
skb->len;
#endif
/* tag is packet length so we can use this in stats update later */
cmdB = (skb->len << 16) | (skb->len & 0x7FF);
DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
dev->name, len, len, buf, cmdA, cmdB);
SMC_SET_TX_FIFO(lp, cmdA);
SMC_SET_TX_FIFO(lp, cmdB);
DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name);
PRINT_PKT(buf, len <= 64 ? len : 64);
/* Send pkt via PIO or DMA */
#ifdef SMC_USE_DMA
lp->current_tx_skb = skb;
SMC_PUSH_DATA(lp, buf, len);
/* DMA complete IRQ will free buffer and set jiffies */
#else
SMC_PUSH_DATA(lp, buf, len);
dev->trans_start = jiffies;
dev_kfree_skb_irq(skb);
#endif
if (!lp->tx_throttle) {
netif_wake_queue(dev);
}
SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
}
/*
* Since I am not sure if I will have enough room in the chip's ram
* to store the packet, I call this routine which either sends it
* now, or set the card to generates an interrupt when ready
* for the packet.
*/
static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned int free;
unsigned long flags;
DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
dev->name, __func__);
spin_lock_irqsave(&lp->lock, flags);
BUG_ON(lp->pending_tx_skb != NULL);
free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free);
/* Turn off the flow when running out of space in FIFO */
if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n",
dev->name, free);
/* Reenable when at least 1 packet of size MTU present */
SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
lp->tx_throttle = 1;
netif_stop_queue(dev);
}
/* Drop packets when we run out of space in TX FIFO
* Account for overhead required for:
*
* Tx command words 8 bytes
* Start offset 15 bytes
* End padding 15 bytes
*/
if (unlikely(free < (skb->len + 8 + 15 + 15))) {
printk("%s: No Tx free space %d < %d\n",
dev->name, free, skb->len);
lp->pending_tx_skb = NULL;
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
spin_unlock_irqrestore(&lp->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
#ifdef SMC_USE_DMA
{
/* If the DMA is already running then defer this packet Tx until
* the DMA IRQ starts it
*/
if (lp->txdma_active) {
DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name);
lp->pending_tx_skb = skb;
netif_stop_queue(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
} else {
DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name);
lp->txdma_active = 1;
}
}
#endif
lp->pending_tx_skb = skb;
smc911x_hardware_send_pkt(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
}
/*
* This handles a TX status interrupt, which is only called when:
* - a TX error occurred, or
* - TX of a packet completed.
*/
static void smc911x_tx(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned int tx_status;
DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
dev->name, __func__);
/* Collect the TX status */
while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n",
dev->name,
(SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
tx_status = SMC_GET_TX_STS_FIFO(lp);
dev->stats.tx_packets++;
dev->stats.tx_bytes+=tx_status>>16;
DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n",
dev->name, (tx_status & 0xffff0000) >> 16,
tx_status & 0x0000ffff);
/* count Tx errors, but ignore lost carrier errors when in
* full-duplex mode */
if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx &&
!(tx_status & 0x00000306))) {
dev->stats.tx_errors++;
}
if (tx_status & TX_STS_MANY_COLL_) {
dev->stats.collisions+=16;
dev->stats.tx_aborted_errors++;
} else {
dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3;
}
/* carrier error only has meaning for half-duplex communication */
if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) &&
!lp->ctl_rfduplx) {
dev->stats.tx_carrier_errors++;
}
if (tx_status & TX_STS_LATE_COLL_) {
dev->stats.collisions++;
dev->stats.tx_aborted_errors++;
}
}
}
/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
/*
* Reads a register from the MII Management serial interface
*/
static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned int phydata;
SMC_GET_MII(lp, phyreg, phyaddr, phydata);
DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
__func__, phyaddr, phyreg, phydata);
return phydata;
}
/*
* Writes a register to the MII Management serial interface
*/
static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
int phydata)
{
struct smc911x_local *lp = netdev_priv(dev);
DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
__func__, phyaddr, phyreg, phydata);
SMC_SET_MII(lp, phyreg, phyaddr, phydata);
}
/*
* Finds and reports the PHY address (115 and 117 have external
* PHY interface 118 has internal only
*/
static void smc911x_phy_detect(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
int phyaddr;
unsigned int cfg, id1, id2;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
lp->phy_type = 0;
/*
* Scan all 32 PHY addresses if necessary, starting at
* PHY#1 to PHY#31, and then PHY#0 last.
*/
switch(lp->version) {
case CHIP_9115:
case CHIP_9117:
case CHIP_9215:
case CHIP_9217:
cfg = SMC_GET_HW_CFG(lp);
if (cfg & HW_CFG_EXT_PHY_DET_) {
cfg &= ~HW_CFG_PHY_CLK_SEL_;
cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
SMC_SET_HW_CFG(lp, cfg);
udelay(10); /* Wait for clocks to stop */
cfg |= HW_CFG_EXT_PHY_EN_;
SMC_SET_HW_CFG(lp, cfg);
udelay(10); /* Wait for clocks to stop */
cfg &= ~HW_CFG_PHY_CLK_SEL_;
cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
SMC_SET_HW_CFG(lp, cfg);
udelay(10); /* Wait for clocks to stop */
cfg |= HW_CFG_SMI_SEL_;
SMC_SET_HW_CFG(lp, cfg);
for (phyaddr = 1; phyaddr < 32; ++phyaddr) {
/* Read the PHY identifiers */
SMC_GET_PHY_ID1(lp, phyaddr & 31, id1);
SMC_GET_PHY_ID2(lp, phyaddr & 31, id2);
/* Make sure it is a valid identifier */
if (id1 != 0x0000 && id1 != 0xffff &&
id1 != 0x8000 && id2 != 0x0000 &&
id2 != 0xffff && id2 != 0x8000) {
/* Save the PHY's address */
lp->mii.phy_id = phyaddr & 31;
lp->phy_type = id1 << 16 | id2;
break;
}
}
if (phyaddr < 32)
/* Found an external PHY */
break;
}
default:
/* Internal media only */
SMC_GET_PHY_ID1(lp, 1, id1);
SMC_GET_PHY_ID2(lp, 1, id2);
/* Save the PHY's address */
lp->mii.phy_id = 1;
lp->phy_type = id1 << 16 | id2;
}
DBG(SMC_DEBUG_MISC, "%s: phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
dev->name, id1, id2, lp->mii.phy_id);
}
/*
* Sets the PHY to a configuration as determined by the user.
* Called with spin_lock held.
*/
static int smc911x_phy_fixed(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
int phyaddr = lp->mii.phy_id;
int bmcr;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
/* Enter Link Disable state */
SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
bmcr |= BMCR_PDOWN;
SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
/*
* Set our fixed capabilities
* Disable auto-negotiation
*/
bmcr &= ~BMCR_ANENABLE;
if (lp->ctl_rfduplx)
bmcr |= BMCR_FULLDPLX;
if (lp->ctl_rspeed == 100)
bmcr |= BMCR_SPEED100;
/* Write our capabilities to the phy control register */
SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
/* Re-Configure the Receive/Phy Control register */
bmcr &= ~BMCR_PDOWN;
SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
return 1;
}
/*
* smc911x_phy_reset - reset the phy
* @dev: net device
* @phy: phy address
*
* Issue a software reset for the specified PHY and
* wait up to 100ms for the reset to complete. We should
* not access the PHY for 50ms after issuing the reset.
*
* The time to wait appears to be dependent on the PHY.
*
*/
static int smc911x_phy_reset(struct net_device *dev, int phy)
{
struct smc911x_local *lp = netdev_priv(dev);
int timeout;
unsigned long flags;
unsigned int reg;
DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
spin_lock_irqsave(&lp->lock, flags);
reg = SMC_GET_PMT_CTRL(lp);
reg &= ~0xfffff030;
reg |= PMT_CTRL_PHY_RST_;
SMC_SET_PMT_CTRL(lp, reg);
spin_unlock_irqrestore(&lp->lock, flags);
for (timeout = 2; timeout; timeout--) {
msleep(50);
spin_lock_irqsave(&lp->lock, flags);
reg = SMC_GET_PMT_CTRL(lp);
spin_unlock_irqrestore(&lp->lock, flags);
if (!(reg & PMT_CTRL_PHY_RST_)) {
/* extra delay required because the phy may
* not be completed with its reset
* when PHY_BCR_RESET_ is cleared. 256us
* should suffice, but use 500us to be safe
*/
udelay(500);
break;
}
}
return reg & PMT_CTRL_PHY_RST_;
}
/*
* smc911x_phy_powerdown - powerdown phy
* @dev: net device
* @phy: phy address
*
* Power down the specified PHY
*/
static void smc911x_phy_powerdown(struct net_device *dev, int phy)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned int bmcr;
/* Enter Link Disable state */
SMC_GET_PHY_BMCR(lp, phy, bmcr);
bmcr |= BMCR_PDOWN;
SMC_SET_PHY_BMCR(lp, phy, bmcr);
}
/*
* smc911x_phy_check_media - check the media status and adjust BMCR
* @dev: net device
* @init: set true for initialisation
*
* Select duplex mode depending on negotiation state. This
* also updates our carrier state.
*/
static void smc911x_phy_check_media(struct net_device *dev, int init)
{
struct smc911x_local *lp = netdev_priv(dev);
int phyaddr = lp->mii.phy_id;
unsigned int bmcr, cr;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
/* duplex state has changed */
SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
SMC_GET_MAC_CR(lp, cr);
if (lp->mii.full_duplex) {
DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name);
bmcr |= BMCR_FULLDPLX;
cr |= MAC_CR_RCVOWN_;
} else {
DBG(SMC_DEBUG_MISC, "%s: Configuring for half-duplex mode\n", dev->name);
bmcr &= ~BMCR_FULLDPLX;
cr &= ~MAC_CR_RCVOWN_;
}
SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
SMC_SET_MAC_CR(lp, cr);
}
}
/*
* Configures the specified PHY through the MII management interface
* using Autonegotiation.
* Calls smc911x_phy_fixed() if the user has requested a certain config.
* If RPC ANEG bit is set, the media selection is dependent purely on
* the selection by the MII (either in the MII BMCR reg or the result
* of autonegotiation.) If the RPC ANEG bit is cleared, the selection
* is controlled by the RPC SPEED and RPC DPLX bits.
*/
static void smc911x_phy_configure(struct work_struct *work)
{
struct smc911x_local *lp = container_of(work, struct smc911x_local,
phy_configure);
struct net_device *dev = lp->netdev;
int phyaddr = lp->mii.phy_id;
int my_phy_caps; /* My PHY capabilities */
int my_ad_caps; /* My Advertised capabilities */
int status;
unsigned long flags;
DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
/*
* We should not be called if phy_type is zero.
*/
if (lp->phy_type == 0)
return;
if (smc911x_phy_reset(dev, phyaddr)) {
printk("%s: PHY reset timed out\n", dev->name);
return;
}
spin_lock_irqsave(&lp->lock, flags);
/*
* Enable PHY Interrupts (for register 18)
* Interrupts listed here are enabled
*/
SMC_SET_PHY_INT_MASK(lp, phyaddr, PHY_INT_MASK_ENERGY_ON_ |
PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ |
PHY_INT_MASK_LINK_DOWN_);
/* If the user requested no auto neg, then go set his request */
if (lp->mii.force_media) {
smc911x_phy_fixed(dev);
goto smc911x_phy_configure_exit;
}
/* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps);
if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
printk(KERN_INFO "Auto negotiation NOT supported\n");
smc911x_phy_fixed(dev);
goto smc911x_phy_configure_exit;
}
/* CSMA capable w/ both pauses */
my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
if (my_phy_caps & BMSR_100BASE4)
my_ad_caps |= ADVERTISE_100BASE4;
if (my_phy_caps & BMSR_100FULL)
my_ad_caps |= ADVERTISE_100FULL;
if (my_phy_caps & BMSR_100HALF)
my_ad_caps |= ADVERTISE_100HALF;
if (my_phy_caps & BMSR_10FULL)
my_ad_caps |= ADVERTISE_10FULL;
if (my_phy_caps & BMSR_10HALF)
my_ad_caps |= ADVERTISE_10HALF;
/* Disable capabilities not selected by our user */
if (lp->ctl_rspeed != 100)
my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
if (!lp->ctl_rfduplx)
my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
/* Update our Auto-Neg Advertisement Register */
SMC_SET_PHY_MII_ADV(lp, phyaddr, my_ad_caps);
lp->mii.advertising = my_ad_caps;
/*
* Read the register back. Without this, it appears that when
* auto-negotiation is restarted, sometimes it isn't ready and
* the link does not come up.
*/
udelay(10);
SMC_GET_PHY_MII_ADV(lp, phyaddr, status);
DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps);
DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps);
/* Restart auto-negotiation process in order to advertise my caps */
SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
smc911x_phy_check_media(dev, 1);
smc911x_phy_configure_exit:
spin_unlock_irqrestore(&lp->lock, flags);
}
/*
* smc911x_phy_interrupt
*
* Purpose: Handle interrupts relating to PHY register 18. This is
* called from the "hard" interrupt handler under our private spinlock.
*/
static void smc911x_phy_interrupt(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
int phyaddr = lp->mii.phy_id;
int status;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
if (lp->phy_type == 0)
return;
smc911x_phy_check_media(dev, 0);
/* read to clear status bits */
SMC_GET_PHY_INT_SRC(lp, phyaddr,status);
DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n",
dev->name, status & 0xffff);
DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n",
dev->name, SMC_GET_AFC_CFG(lp));
}
/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
/*
* This is the main routine of the driver, to handle the device when
* it needs some attention.
*/
static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct smc911x_local *lp = netdev_priv(dev);
unsigned int status, mask, timeout;
unsigned int rx_overrun=0, cr, pkts;
unsigned long flags;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
spin_lock_irqsave(&lp->lock, flags);
/* Spurious interrupt check */
if ((SMC_GET_IRQ_CFG(lp) & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
(INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
spin_unlock_irqrestore(&lp->lock, flags);
return IRQ_NONE;
}
mask = SMC_GET_INT_EN(lp);
SMC_SET_INT_EN(lp, 0);
/* set a timeout value, so I don't stay here forever */
timeout = 8;
do {
status = SMC_GET_INT(lp);
DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
dev->name, status, mask, status & ~mask);
status &= mask;
if (!status)
break;
/* Handle SW interrupt condition */
if (status & INT_STS_SW_INT_) {
SMC_ACK_INT(lp, INT_STS_SW_INT_);
mask &= ~INT_EN_SW_INT_EN_;
}
/* Handle various error conditions */
if (status & INT_STS_RXE_) {
SMC_ACK_INT(lp, INT_STS_RXE_);
dev->stats.rx_errors++;
}
if (status & INT_STS_RXDFH_INT_) {
SMC_ACK_INT(lp, INT_STS_RXDFH_INT_);
dev->stats.rx_dropped+=SMC_GET_RX_DROP(lp);
}
/* Undocumented interrupt-what is the right thing to do here? */
if (status & INT_STS_RXDF_INT_) {
SMC_ACK_INT(lp, INT_STS_RXDF_INT_);
}
/* Rx Data FIFO exceeds set level */
if (status & INT_STS_RDFL_) {
if (IS_REV_A(lp->revision)) {
rx_overrun=1;
SMC_GET_MAC_CR(lp, cr);
cr &= ~MAC_CR_RXEN_;
SMC_SET_MAC_CR(lp, cr);
DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_fifo_errors++;
}
SMC_ACK_INT(lp, INT_STS_RDFL_);
}
if (status & INT_STS_RDFO_) {
if (!IS_REV_A(lp->revision)) {
SMC_GET_MAC_CR(lp, cr);
cr &= ~MAC_CR_RXEN_;
SMC_SET_MAC_CR(lp, cr);
rx_overrun=1;
DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_fifo_errors++;
}
SMC_ACK_INT(lp, INT_STS_RDFO_);
}
/* Handle receive condition */
if ((status & INT_STS_RSFL_) || rx_overrun) {
unsigned int fifo;
DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name);
fifo = SMC_GET_RX_FIFO_INF(lp);
pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n",
dev->name, pkts, fifo & 0xFFFF );
if (pkts != 0) {
#ifdef SMC_USE_DMA
unsigned int fifo;
if (lp->rxdma_active){
DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
"%s: RX DMA active\n", dev->name);
/* The DMA is already running so up the IRQ threshold */
fifo = SMC_GET_FIFO_INT(lp) & ~0xFF;
fifo |= pkts & 0xFF;
DBG(SMC_DEBUG_RX,
"%s: Setting RX stat FIFO threshold to %d\n",
dev->name, fifo & 0xff);
SMC_SET_FIFO_INT(lp, fifo);
} else
#endif
smc911x_rcv(dev);
}
SMC_ACK_INT(lp, INT_STS_RSFL_);
}
/* Handle transmit FIFO available */
if (status & INT_STS_TDFA_) {
DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name);
SMC_SET_FIFO_TDA(lp, 0xFF);
lp->tx_throttle = 0;
#ifdef SMC_USE_DMA
if (!lp->txdma_active)
#endif
netif_wake_queue(dev);
SMC_ACK_INT(lp, INT_STS_TDFA_);
}
/* Handle transmit done condition */
#if 1
if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC,
"%s: Tx stat FIFO limit (%d) /GPT irq\n",
dev->name, (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
smc911x_tx(dev);
SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
SMC_ACK_INT(lp, INT_STS_TSFL_);
SMC_ACK_INT(lp, INT_STS_TSFL_ | INT_STS_GPT_INT_);
}
#else
if (status & INT_STS_TSFL_) {
DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, );
smc911x_tx(dev);
SMC_ACK_INT(lp, INT_STS_TSFL_);
}
if (status & INT_STS_GPT_INT_) {
DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
dev->name,
SMC_GET_IRQ_CFG(lp),
SMC_GET_FIFO_INT(lp),
SMC_GET_RX_CFG(lp));
DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x "
"Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
dev->name,
(SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
SMC_GET_RX_FIFO_INF(lp) & 0xffff,
SMC_GET_RX_STS_FIFO_PEEK(lp));
SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
SMC_ACK_INT(lp, INT_STS_GPT_INT_);
}
#endif
/* Handle PHY interrupt condition */
if (status & INT_STS_PHY_INT_) {
DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name);
smc911x_phy_interrupt(dev);
SMC_ACK_INT(lp, INT_STS_PHY_INT_);
}
} while (--timeout);
/* restore mask state */
SMC_SET_INT_EN(lp, mask);
DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n",
dev->name, 8-timeout);
spin_unlock_irqrestore(&lp->lock, flags);
return IRQ_HANDLED;
}
#ifdef SMC_USE_DMA
static void
smc911x_tx_dma_irq(int dma, void *data)
{
struct net_device *dev = (struct net_device *)data;
struct smc911x_local *lp = netdev_priv(dev);
struct sk_buff *skb = lp->current_tx_skb;
unsigned long flags;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name);
/* Clear the DMA interrupt sources */
SMC_DMA_ACK_IRQ(dev, dma);
BUG_ON(skb == NULL);
dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
dev->trans_start = jiffies;
dev_kfree_skb_irq(skb);
lp->current_tx_skb = NULL;
if (lp->pending_tx_skb != NULL)
smc911x_hardware_send_pkt(dev);
else {
DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
"%s: No pending Tx packets. DMA disabled\n", dev->name);
spin_lock_irqsave(&lp->lock, flags);
lp->txdma_active = 0;
if (!lp->tx_throttle) {
netif_wake_queue(dev);
}
spin_unlock_irqrestore(&lp->lock, flags);
}
DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
"%s: TX DMA irq completed\n", dev->name);
}
static void
smc911x_rx_dma_irq(int dma, void *data)
{
struct net_device *dev = (struct net_device *)data;
unsigned long ioaddr = dev->base_addr;
struct smc911x_local *lp = netdev_priv(dev);
struct sk_buff *skb = lp->current_rx_skb;
unsigned long flags;
unsigned int pkts;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name);
/* Clear the DMA interrupt sources */
SMC_DMA_ACK_IRQ(dev, dma);
dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
BUG_ON(skb == NULL);
lp->current_rx_skb = NULL;
PRINT_PKT(skb->data, skb->len);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
netif_rx(skb);
spin_lock_irqsave(&lp->lock, flags);
pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16;
if (pkts != 0) {
smc911x_rcv(dev);
}else {
lp->rxdma_active = 0;
}
spin_unlock_irqrestore(&lp->lock, flags);
DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
"%s: RX DMA irq completed. DMA RX FIFO PKTS %d\n",
dev->name, pkts);
}
#endif /* SMC_USE_DMA */
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling receive - used by netconsole and other diagnostic tools
* to allow network i/o with interrupts disabled.
*/
static void smc911x_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
smc911x_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
/* Our watchdog timed out. Called by the networking layer */
static void smc911x_timeout(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
int status, mask;
unsigned long flags;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
spin_lock_irqsave(&lp->lock, flags);
status = SMC_GET_INT(lp);
mask = SMC_GET_INT_EN(lp);
spin_unlock_irqrestore(&lp->lock, flags);
DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n",
dev->name, status, mask);
/* Dump the current TX FIFO contents and restart */
mask = SMC_GET_TX_CFG(lp);
SMC_SET_TX_CFG(lp, mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_);
/*
* Reconfiguring the PHY doesn't seem like a bad idea here, but
* smc911x_phy_configure() calls msleep() which calls schedule_timeout()
* which calls schedule(). Hence we use a work queue.
*/
if (lp->phy_type != 0)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
/*
* This routine will, depending on the values passed to it,
* either make it accept multicast packets, go into
* promiscuous mode (for TCPDUMP and cousins) or accept
* a select set of multicast packets
*/
static void smc911x_set_multicast_list(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned int multicast_table[2];
unsigned int mcr, update_multicast = 0;
unsigned long flags;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
spin_lock_irqsave(&lp->lock, flags);
SMC_GET_MAC_CR(lp, mcr);
spin_unlock_irqrestore(&lp->lock, flags);
if (dev->flags & IFF_PROMISC) {
DBG(SMC_DEBUG_MISC, "%s: RCR_PRMS\n", dev->name);
mcr |= MAC_CR_PRMS_;
}
/*
* Here, I am setting this to accept all multicast packets.
* I don't need to zero the multicast table, because the flag is
* checked before the table is
*/
else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
mcr |= MAC_CR_MCPAS_;
}
/*
* This sets the internal hardware table to filter out unwanted
* multicast packets before they take up memory.
*
* The SMC chip uses a hash table where the high 6 bits of the CRC of
* address are the offset into the table. If that bit is 1, then the
* multicast packet is accepted. Otherwise, it's dropped silently.
*
* To use the 6 bits as an offset into the table, the high 1 bit is
* the number of the 32 bit register, while the low 5 bits are the bit
* within that register.
*/
else if (!netdev_mc_empty(dev)) {
struct netdev_hw_addr *ha;
/* Set the Hash perfec mode */
mcr |= MAC_CR_HPFILT_;
/* start with a table of all zeros: reject all */
memset(multicast_table, 0, sizeof(multicast_table));
netdev_for_each_mc_addr(ha, dev) {
u32 position;
/* upper 6 bits are used as hash index */
position = ether_crc(ETH_ALEN, ha->addr)>>26;
multicast_table[position>>5] |= 1 << (position&0x1f);
}
/* be sure I get rid of flags I might have set */
mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
/* now, the table can be loaded into the chipset */
update_multicast = 1;
} else {
DBG(SMC_DEBUG_MISC, "%s: ~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n",
dev->name);
mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
/*
* since I'm disabling all multicast entirely, I need to
* clear the multicast list
*/
memset(multicast_table, 0, sizeof(multicast_table));
update_multicast = 1;
}
spin_lock_irqsave(&lp->lock, flags);
SMC_SET_MAC_CR(lp, mcr);
if (update_multicast) {
DBG(SMC_DEBUG_MISC,
"%s: update mcast hash table 0x%08x 0x%08x\n",
dev->name, multicast_table[0], multicast_table[1]);
SMC_SET_HASHL(lp, multicast_table[0]);
SMC_SET_HASHH(lp, multicast_table[1]);
}
spin_unlock_irqrestore(&lp->lock, flags);
}
/*
* Open and Initialize the board
*
* Set up everything, reset the card, etc..
*/
static int
smc911x_open(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
/*
* Check that the address is valid. If its not, refuse
* to bring the device up. The user must specify an
* address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
*/
if (!is_valid_ether_addr(dev->dev_addr)) {
PRINTK("%s: no valid ethernet hw addr\n", __func__);
return -EINVAL;
}
/* reset the hardware */
smc911x_reset(dev);
/* Configure the PHY, initialize the link state */
smc911x_phy_configure(&lp->phy_configure);
/* Turn on Tx + Rx */
smc911x_enable(dev);
netif_start_queue(dev);
return 0;
}
/*
* smc911x_close
*
* this makes the board clean up everything that it can
* and not talk to the outside world. Caused by
* an 'ifconfig ethX down'
*/
static int smc911x_close(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
netif_stop_queue(dev);
netif_carrier_off(dev);
/* clear everything */
smc911x_shutdown(dev);
if (lp->phy_type != 0) {
/* We need to ensure that no calls to
* smc911x_phy_configure are pending.
*/
cancel_work_sync(&lp->phy_configure);
smc911x_phy_powerdown(dev, lp->mii.phy_id);
}
if (lp->pending_tx_skb) {
dev_kfree_skb(lp->pending_tx_skb);
lp->pending_tx_skb = NULL;
}
return 0;
}
/*
* Ethtool support
*/
static int
smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret, status;
unsigned long flags;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
cmd->maxtxpkt = 1;
cmd->maxrxpkt = 1;
if (lp->phy_type != 0) {
spin_lock_irqsave(&lp->lock, flags);
ret = mii_ethtool_gset(&lp->mii, cmd);
spin_unlock_irqrestore(&lp->lock, flags);
} else {
cmd->supported = SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_TP | SUPPORTED_AUI;
if (lp->ctl_rspeed == 10)
ethtool_cmd_speed_set(cmd, SPEED_10);
else if (lp->ctl_rspeed == 100)
ethtool_cmd_speed_set(cmd, SPEED_100);
cmd->autoneg = AUTONEG_DISABLE;
if (lp->mii.phy_id==1)
cmd->transceiver = XCVR_INTERNAL;
else
cmd->transceiver = XCVR_EXTERNAL;
cmd->port = 0;
SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status);
cmd->duplex =
(status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
DUPLEX_FULL : DUPLEX_HALF;
ret = 0;
}
return ret;
}
static int
smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret;
unsigned long flags;
if (lp->phy_type != 0) {
spin_lock_irqsave(&lp->lock, flags);
ret = mii_ethtool_sset(&lp->mii, cmd);
spin_unlock_irqrestore(&lp->lock, flags);
} else {
if (cmd->autoneg != AUTONEG_DISABLE ||
cmd->speed != SPEED_10 ||
(cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
(cmd->port != PORT_TP && cmd->port != PORT_AUI))
return -EINVAL;
lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
ret = 0;
}
return ret;
}
static void
smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strncpy(info->driver, CARDNAME, sizeof(info->driver));
strncpy(info->version, version, sizeof(info->version));
strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
}
static int smc911x_ethtool_nwayreset(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret = -EINVAL;
unsigned long flags;
if (lp->phy_type != 0) {
spin_lock_irqsave(&lp->lock, flags);
ret = mii_nway_restart(&lp->mii);
spin_unlock_irqrestore(&lp->lock, flags);
}
return ret;
}
static u32 smc911x_ethtool_getmsglevel(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
return lp->msg_enable;
}
static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level)
{
struct smc911x_local *lp = netdev_priv(dev);
lp->msg_enable = level;
}
static int smc911x_ethtool_getregslen(struct net_device *dev)
{
/* System regs + MAC regs + PHY regs */
return (((E2P_CMD - ID_REV)/4 + 1) +
(WUCSR - MAC_CR)+1 + 32) * sizeof(u32);
}
static void smc911x_ethtool_getregs(struct net_device *dev,
struct ethtool_regs* regs, void *buf)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned long flags;
u32 reg,i,j=0;
u32 *data = (u32*)buf;
regs->version = lp->version;
for(i=ID_REV;i<=E2P_CMD;i+=4) {
data[j++] = SMC_inl(lp, i);
}
for(i=MAC_CR;i<=WUCSR;i++) {
spin_lock_irqsave(&lp->lock, flags);
SMC_GET_MAC_CSR(lp, i, reg);
spin_unlock_irqrestore(&lp->lock, flags);
data[j++] = reg;
}
for(i=0;i<=31;i++) {
spin_lock_irqsave(&lp->lock, flags);
SMC_GET_MII(lp, i, lp->mii.phy_id, reg);
spin_unlock_irqrestore(&lp->lock, flags);
data[j++] = reg & 0xFFFF;
}
}
static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned int timeout;
int e2p_cmd;
e2p_cmd = SMC_GET_E2P_CMD(lp);
for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
PRINTK("%s: %s timeout waiting for EEPROM to respond\n",
dev->name, __func__);
return -EFAULT;
}
mdelay(1);
e2p_cmd = SMC_GET_E2P_CMD(lp);
}
if (timeout == 0) {
PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n",
dev->name, __func__);
return -ETIMEDOUT;
}
return 0;
}
static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
int cmd, int addr)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret;
if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
return ret;
SMC_SET_E2P_CMD(lp, E2P_CMD_EPC_BUSY_ |
((cmd) & (0x7<<28)) |
((addr) & 0xFF));
return 0;
}
static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
u8 *data)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret;
if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
return ret;
*data = SMC_GET_E2P_DATA(lp);
return 0;
}
static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
u8 data)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret;
if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
return ret;
SMC_SET_E2P_DATA(lp, data);
return 0;
}
static int smc911x_ethtool_geteeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
u8 eebuf[SMC911X_EEPROM_LEN];
int i, ret;
for(i=0;i<SMC911X_EEPROM_LEN;i++) {
if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_READ_, i ))!=0)
return ret;
if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0)
return ret;
}
memcpy(data, eebuf+eeprom->offset, eeprom->len);
return 0;
}
static int smc911x_ethtool_seteeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
int i, ret;
/* Enable erase */
if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0)
return ret;
for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) {
/* erase byte */
if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0)
return ret;
/* write byte */
if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0)
return ret;
if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0)
return ret;
}
return 0;
}
static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
{
return SMC911X_EEPROM_LEN;
}
static const struct ethtool_ops smc911x_ethtool_ops = {
.get_settings = smc911x_ethtool_getsettings,
.set_settings = smc911x_ethtool_setsettings,
.get_drvinfo = smc911x_ethtool_getdrvinfo,
.get_msglevel = smc911x_ethtool_getmsglevel,
.set_msglevel = smc911x_ethtool_setmsglevel,
.nway_reset = smc911x_ethtool_nwayreset,
.get_link = ethtool_op_get_link,
.get_regs_len = smc911x_ethtool_getregslen,
.get_regs = smc911x_ethtool_getregs,
.get_eeprom_len = smc911x_ethtool_geteeprom_len,
.get_eeprom = smc911x_ethtool_geteeprom,
.set_eeprom = smc911x_ethtool_seteeprom,
};
/*
* smc911x_findirq
*
* This routine has a simple purpose -- make the SMC chip generate an
* interrupt, so an auto-detect routine can detect it, and find the IRQ,
*/
static int __devinit smc911x_findirq(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
int timeout = 20;
unsigned long cookie;
DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
cookie = probe_irq_on();
/*
* Force a SW interrupt
*/
SMC_SET_INT_EN(lp, INT_EN_SW_INT_EN_);
/*
* Wait until positive that the interrupt has been generated
*/
do {
int int_status;
udelay(10);
int_status = SMC_GET_INT_EN(lp);
if (int_status & INT_EN_SW_INT_EN_)
break; /* got the interrupt */
} while (--timeout);
/*
* there is really nothing that I can do here if timeout fails,
* as autoirq_report will return a 0 anyway, which is what I
* want in this case. Plus, the clean up is needed in both
* cases.
*/
/* and disable all interrupts again */
SMC_SET_INT_EN(lp, 0);
/* and return what I found */
return probe_irq_off(cookie);
}
static const struct net_device_ops smc911x_netdev_ops = {
.ndo_open = smc911x_open,
.ndo_stop = smc911x_close,
.ndo_start_xmit = smc911x_hard_start_xmit,
.ndo_tx_timeout = smc911x_timeout,
.ndo_set_rx_mode = smc911x_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = smc911x_poll_controller,
#endif
};
/*
* Function: smc911x_probe(unsigned long ioaddr)
*
* Purpose:
* Tests to see if a given ioaddr points to an SMC911x chip.
* Returns a 0 on success
*
* Algorithm:
* (1) see if the endian word is OK
* (1) see if I recognize the chip ID in the appropriate register
*
* Here I do typical initialization tasks.
*
* o Initialize the structure if needed
* o print out my vanity message if not done so already
* o print out what type of hardware is detected
* o print out the ethernet address
* o find the IRQ
* o set up my private data
* o configure the dev structure with my subroutines
* o actually GRAB the irq.
* o GRAB the region
*/
static int __devinit smc911x_probe(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
int i, retval;
unsigned int val, chip_id, revision;
const char *version_string;
unsigned long irq_flags;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
/* First, see if the endian word is recognized */
val = SMC_GET_BYTE_TEST(lp);
DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
if (val != 0x87654321) {
printk(KERN_ERR "Invalid chip endian 0x%08x\n",val);
retval = -ENODEV;
goto err_out;
}
/*
* check if the revision register is something that I
* recognize. These might need to be added to later,
* as future revisions could be added.
*/
chip_id = SMC_GET_PN(lp);
DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id);
for(i=0;chip_ids[i].id != 0; i++) {
if (chip_ids[i].id == chip_id) break;
}
if (!chip_ids[i].id) {
printk(KERN_ERR "Unknown chip ID %04x\n", chip_id);
retval = -ENODEV;
goto err_out;
}
version_string = chip_ids[i].name;
revision = SMC_GET_REV(lp);
DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision);
/* At this point I'll assume that the chip is an SMC911x. */
DBG(SMC_DEBUG_MISC, "%s: Found a %s\n", CARDNAME, chip_ids[i].name);
/* Validate the TX FIFO size requested */
if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) {
printk(KERN_ERR "Invalid TX FIFO size requested %d\n", tx_fifo_kb);
retval = -EINVAL;
goto err_out;
}
/* fill in some of the fields */
lp->version = chip_ids[i].id;
lp->revision = revision;
lp->tx_fifo_kb = tx_fifo_kb;
/* Reverse calculate the RX FIFO size from the TX */
lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512;
lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15;
/* Set the automatic flow control values */
switch(lp->tx_fifo_kb) {
/*
* AFC_HI is about ((Rx Data Fifo Size)*2/3)/64
* AFC_LO is AFC_HI/2
* BACK_DUR is about 5uS*(AFC_LO) rounded down
*/
case 2:/* 13440 Rx Data Fifo Size */
lp->afc_cfg=0x008C46AF;break;
case 3:/* 12480 Rx Data Fifo Size */
lp->afc_cfg=0x0082419F;break;
case 4:/* 11520 Rx Data Fifo Size */
lp->afc_cfg=0x00783C9F;break;
case 5:/* 10560 Rx Data Fifo Size */
lp->afc_cfg=0x006E374F;break;
case 6:/* 9600 Rx Data Fifo Size */
lp->afc_cfg=0x0064328F;break;
case 7:/* 8640 Rx Data Fifo Size */
lp->afc_cfg=0x005A2D7F;break;
case 8:/* 7680 Rx Data Fifo Size */
lp->afc_cfg=0x0050287F;break;
case 9:/* 6720 Rx Data Fifo Size */
lp->afc_cfg=0x0046236F;break;
case 10:/* 5760 Rx Data Fifo Size */
lp->afc_cfg=0x003C1E6F;break;
case 11:/* 4800 Rx Data Fifo Size */
lp->afc_cfg=0x0032195F;break;
/*
* AFC_HI is ~1520 bytes less than RX Data Fifo Size
* AFC_LO is AFC_HI/2
* BACK_DUR is about 5uS*(AFC_LO) rounded down
*/
case 12:/* 3840 Rx Data Fifo Size */
lp->afc_cfg=0x0024124F;break;
case 13:/* 2880 Rx Data Fifo Size */
lp->afc_cfg=0x0015073F;break;
case 14:/* 1920 Rx Data Fifo Size */
lp->afc_cfg=0x0006032F;break;
default:
PRINTK("%s: ERROR -- no AFC_CFG setting found",
dev->name);
break;
}
DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX,
"%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
spin_lock_init(&lp->lock);
/* Get the MAC address */
SMC_GET_MAC_ADDR(lp, dev->dev_addr);
/* now, reset the chip, and put it into a known state */
smc911x_reset(dev);
/*
* If dev->irq is 0, then the device has to be banged on to see
* what the IRQ is.
*
* Specifying an IRQ is done with the assumption that the user knows
* what (s)he is doing. No checking is done!!!!
*/
if (dev->irq < 1) {
int trials;
trials = 3;
while (trials--) {
dev->irq = smc911x_findirq(dev);
if (dev->irq)
break;
/* kick the card and try again */
smc911x_reset(dev);
}
}
if (dev->irq == 0) {
printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
dev->name);
retval = -ENODEV;
goto err_out;
}
dev->irq = irq_canonicalize(dev->irq);
/* Fill in the fields of the device structure with ethernet values. */
ether_setup(dev);
dev->netdev_ops = &smc911x_netdev_ops;
dev->watchdog_timeo = msecs_to_jiffies(watchdog);
dev->ethtool_ops = &smc911x_ethtool_ops;
INIT_WORK(&lp->phy_configure, smc911x_phy_configure);
lp->mii.phy_id_mask = 0x1f;
lp->mii.reg_num_mask = 0x1f;
lp->mii.force_media = 0;
lp->mii.full_duplex = 0;
lp->mii.dev = dev;
lp->mii.mdio_read = smc911x_phy_read;
lp->mii.mdio_write = smc911x_phy_write;
/*
* Locate the phy, if any.
*/
smc911x_phy_detect(dev);
/* Set default parameters */
lp->msg_enable = NETIF_MSG_LINK;
lp->ctl_rfduplx = 1;
lp->ctl_rspeed = 100;
#ifdef SMC_DYNAMIC_BUS_CONFIG
irq_flags = lp->cfg.irq_flags;
#else
irq_flags = IRQF_SHARED | SMC_IRQ_SENSE;
#endif
/* Grab the IRQ */
retval = request_irq(dev->irq, smc911x_interrupt,
irq_flags, dev->name, dev);
if (retval)
goto err_out;
#ifdef SMC_USE_DMA
lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq);
lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq);
lp->rxdma_active = 0;
lp->txdma_active = 0;
dev->dma = lp->rxdma;
#endif
retval = register_netdev(dev);
if (retval == 0) {
/* now, print out the card info, in a short format.. */
printk("%s: %s (rev %d) at %#lx IRQ %d",
dev->name, version_string, lp->revision,
dev->base_addr, dev->irq);
#ifdef SMC_USE_DMA
if (lp->rxdma != -1)
printk(" RXDMA %d ", lp->rxdma);
if (lp->txdma != -1)
printk("TXDMA %d", lp->txdma);
#endif
printk("\n");
if (!is_valid_ether_addr(dev->dev_addr)) {
printk("%s: Invalid ethernet MAC address. Please "
"set using ifconfig\n", dev->name);
} else {
/* Print the Ethernet address */
printk("%s: Ethernet addr: %pM\n",
dev->name, dev->dev_addr);
}
if (lp->phy_type == 0) {
PRINTK("%s: No PHY found\n", dev->name);
} else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) {
PRINTK("%s: LAN911x Internal PHY\n", dev->name);
} else {
PRINTK("%s: External PHY 0x%08x\n", dev->name, lp->phy_type);
}
}
err_out:
#ifdef SMC_USE_DMA
if (retval) {
if (lp->rxdma != -1) {
SMC_DMA_FREE(dev, lp->rxdma);
}
if (lp->txdma != -1) {
SMC_DMA_FREE(dev, lp->txdma);
}
}
#endif
return retval;
}
/*
* smc911x_init(void)
*
* Output:
* 0 --> there is a device
* anything else, error
*/
static int __devinit smc911x_drv_probe(struct platform_device *pdev)
{
struct net_device *ndev;
struct resource *res;
struct smc911x_local *lp;
unsigned int *addr;
int ret;
DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
goto out;
}
/*
* Request the regions.
*/
if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) {
ret = -EBUSY;
goto out;
}
ndev = alloc_etherdev(sizeof(struct smc911x_local));
if (!ndev) {
ret = -ENOMEM;
goto release_1;
}
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->dma = (unsigned char)-1;
ndev->irq = platform_get_irq(pdev, 0);
lp = netdev_priv(ndev);
lp->netdev = ndev;
#ifdef SMC_DYNAMIC_BUS_CONFIG
{
struct smc911x_platdata *pd = pdev->dev.platform_data;
if (!pd) {
ret = -EINVAL;
goto release_both;
}
memcpy(&lp->cfg, pd, sizeof(lp->cfg));
}
#endif
addr = ioremap(res->start, SMC911X_IO_EXTENT);
if (!addr) {
ret = -ENOMEM;
goto release_both;
}
platform_set_drvdata(pdev, ndev);
lp->base = addr;
ndev->base_addr = res->start;
ret = smc911x_probe(ndev);
if (ret != 0) {
platform_set_drvdata(pdev, NULL);
iounmap(addr);
release_both:
free_netdev(ndev);
release_1:
release_mem_region(res->start, SMC911X_IO_EXTENT);
out:
printk("%s: not found (%d).\n", CARDNAME, ret);
}
#ifdef SMC_USE_DMA
else {
lp->physaddr = res->start;
lp->dev = &pdev->dev;
}
#endif
return ret;
}
static int __devexit smc911x_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct smc911x_local *lp = netdev_priv(ndev);
struct resource *res;
DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
free_irq(ndev->irq, ndev);
#ifdef SMC_USE_DMA
{
if (lp->rxdma != -1) {
SMC_DMA_FREE(dev, lp->rxdma);
}
if (lp->txdma != -1) {
SMC_DMA_FREE(dev, lp->txdma);
}
}
#endif
iounmap(lp->base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, SMC911X_IO_EXTENT);
free_netdev(ndev);
return 0;
}
static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
{
struct net_device *ndev = platform_get_drvdata(dev);
struct smc911x_local *lp = netdev_priv(ndev);
DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
if (ndev) {
if (netif_running(ndev)) {
netif_device_detach(ndev);
smc911x_shutdown(ndev);
#if POWER_DOWN
/* Set D2 - Energy detect only setting */
SMC_SET_PMT_CTRL(lp, 2<<12);
#endif
}
}
return 0;
}
static int smc911x_drv_resume(struct platform_device *dev)
{
struct net_device *ndev = platform_get_drvdata(dev);
DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
if (ndev) {
struct smc911x_local *lp = netdev_priv(ndev);
if (netif_running(ndev)) {
smc911x_reset(ndev);
if (lp->phy_type != 0)
smc911x_phy_configure(&lp->phy_configure);
smc911x_enable(ndev);
netif_device_attach(ndev);
}
}
return 0;
}
static struct platform_driver smc911x_driver = {
.probe = smc911x_drv_probe,
.remove = __devexit_p(smc911x_drv_remove),
.suspend = smc911x_drv_suspend,
.resume = smc911x_drv_resume,
.driver = {
.name = CARDNAME,
.owner = THIS_MODULE,
},
};
module_platform_driver(smc911x_driver);
| gpl-2.0 |
AOKP/kernel_samsung_jf | fs/xfs/xfs_dfrag.c | 4783 | 12761 | /*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_itable.h"
#include "xfs_dfrag.h"
#include "xfs_error.h"
#include "xfs_vnodeops.h"
#include "xfs_trace.h"
static int xfs_swap_extents(
xfs_inode_t *ip, /* target inode */
xfs_inode_t *tip, /* tmp inode */
xfs_swapext_t *sxp);
/*
* ioctl interface for swapext
*/
int
xfs_swapext(
xfs_swapext_t *sxp)
{
xfs_inode_t *ip, *tip;
struct file *file, *tmp_file;
int error = 0;
/* Pull information for the target fd */
file = fget((int)sxp->sx_fdtarget);
if (!file) {
error = XFS_ERROR(EINVAL);
goto out;
}
if (!(file->f_mode & FMODE_WRITE) ||
!(file->f_mode & FMODE_READ) ||
(file->f_flags & O_APPEND)) {
error = XFS_ERROR(EBADF);
goto out_put_file;
}
tmp_file = fget((int)sxp->sx_fdtmp);
if (!tmp_file) {
error = XFS_ERROR(EINVAL);
goto out_put_file;
}
if (!(tmp_file->f_mode & FMODE_WRITE) ||
!(tmp_file->f_mode & FMODE_READ) ||
(tmp_file->f_flags & O_APPEND)) {
error = XFS_ERROR(EBADF);
goto out_put_tmp_file;
}
if (IS_SWAPFILE(file->f_path.dentry->d_inode) ||
IS_SWAPFILE(tmp_file->f_path.dentry->d_inode)) {
error = XFS_ERROR(EINVAL);
goto out_put_tmp_file;
}
ip = XFS_I(file->f_path.dentry->d_inode);
tip = XFS_I(tmp_file->f_path.dentry->d_inode);
if (ip->i_mount != tip->i_mount) {
error = XFS_ERROR(EINVAL);
goto out_put_tmp_file;
}
if (ip->i_ino == tip->i_ino) {
error = XFS_ERROR(EINVAL);
goto out_put_tmp_file;
}
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
error = XFS_ERROR(EIO);
goto out_put_tmp_file;
}
error = xfs_swap_extents(ip, tip, sxp);
out_put_tmp_file:
fput(tmp_file);
out_put_file:
fput(file);
out:
return error;
}
/*
* We need to check that the format of the data fork in the temporary inode is
* valid for the target inode before doing the swap. This is not a problem with
* attr1 because of the fixed fork offset, but attr2 has a dynamically sized
* data fork depending on the space the attribute fork is taking so we can get
* invalid formats on the target inode.
*
* E.g. target has space for 7 extents in extent format, temp inode only has
* space for 6. If we defragment down to 7 extents, then the tmp format is a
* btree, but when swapped it needs to be in extent format. Hence we can't just
* blindly swap data forks on attr2 filesystems.
*
* Note that we check the swap in both directions so that we don't end up with
* a corrupt temporary inode, either.
*
* Note that fixing the way xfs_fsr sets up the attribute fork in the source
* inode will prevent this situation from occurring, so all we do here is
* reject and log the attempt. basically we are putting the responsibility on
* userspace to get this right.
*/
static int
xfs_swap_extents_check_format(
xfs_inode_t *ip, /* target inode */
xfs_inode_t *tip) /* tmp inode */
{
/* Should never get a local format */
if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
return EINVAL;
/*
* if the target inode has less extents that then temporary inode then
* why did userspace call us?
*/
if (ip->i_d.di_nextents < tip->i_d.di_nextents)
return EINVAL;
/*
* if the target inode is in extent form and the temp inode is in btree
* form then we will end up with the target inode in the wrong format
* as we already know there are less extents in the temp inode.
*/
if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
return EINVAL;
/* Check temp in extent form to max in target */
if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
return EINVAL;
/* Check target in extent form to max in temp */
if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
return EINVAL;
/*
* If we are in a btree format, check that the temp root block will fit
* in the target and that it has enough extents to be in btree format
* in the target.
*
* Note that we have to be careful to allow btree->extent conversions
* (a common defrag case) which will occur when the temp inode is in
* extent format...
*/
if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
if (XFS_IFORK_BOFF(ip) &&
tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip))
return EINVAL;
if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
return EINVAL;
}
/* Reciprocal target->temp btree format checks */
if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
if (XFS_IFORK_BOFF(tip) &&
ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip))
return EINVAL;
if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
return EINVAL;
}
return 0;
}
static int
xfs_swap_extents(
xfs_inode_t *ip, /* target inode */
xfs_inode_t *tip, /* tmp inode */
xfs_swapext_t *sxp)
{
xfs_mount_t *mp = ip->i_mount;
xfs_trans_t *tp;
xfs_bstat_t *sbp = &sxp->sx_stat;
xfs_ifork_t *tempifp, *ifp, *tifp;
int src_log_flags, target_log_flags;
int error = 0;
int aforkblks = 0;
int taforkblks = 0;
__uint64_t tmp;
tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
if (!tempifp) {
error = XFS_ERROR(ENOMEM);
goto out;
}
/*
* we have to do two separate lock calls here to keep lockdep
* happy. If we try to get all the locks in one call, lock will
* report false positives when we drop the ILOCK and regain them
* below.
*/
xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
/* Verify that both files have the same format */
if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
error = XFS_ERROR(EINVAL);
goto out_unlock;
}
/* Verify both files are either real-time or non-realtime */
if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
error = XFS_ERROR(EINVAL);
goto out_unlock;
}
if (VN_CACHED(VFS_I(tip)) != 0) {
error = xfs_flushinval_pages(tip, 0, -1,
FI_REMAPF_LOCKED);
if (error)
goto out_unlock;
}
/* Verify O_DIRECT for ftmp */
if (VN_CACHED(VFS_I(tip)) != 0) {
error = XFS_ERROR(EINVAL);
goto out_unlock;
}
/* Verify all data are being swapped */
if (sxp->sx_offset != 0 ||
sxp->sx_length != ip->i_d.di_size ||
sxp->sx_length != tip->i_d.di_size) {
error = XFS_ERROR(EFAULT);
goto out_unlock;
}
trace_xfs_swap_extent_before(ip, 0);
trace_xfs_swap_extent_before(tip, 1);
/* check inode formats now that data is flushed */
error = xfs_swap_extents_check_format(ip, tip);
if (error) {
xfs_notice(mp,
"%s: inode 0x%llx format is incompatible for exchanging.",
__func__, ip->i_ino);
goto out_unlock;
}
/*
* Compare the current change & modify times with that
* passed in. If they differ, we abort this swap.
* This is the mechanism used to ensure the calling
* process that the file was not changed out from
* under it.
*/
if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
(sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
(sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
(sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
error = XFS_ERROR(EBUSY);
goto out_unlock;
}
/* We need to fail if the file is memory mapped. Once we have tossed
* all existing pages, the page fault will have no option
* but to go to the filesystem for pages. By making the page fault call
* vop_read (or write in the case of autogrow) they block on the iolock
* until we have switched the extents.
*/
if (VN_MAPPED(VFS_I(ip))) {
error = XFS_ERROR(EBUSY);
goto out_unlock;
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_iunlock(tip, XFS_ILOCK_EXCL);
/*
* There is a race condition here since we gave up the
* ilock. However, the data fork will not change since
* we have the iolock (locked for truncation too) so we
* are safe. We don't really care if non-io related
* fields change.
*/
xfs_tosspages(ip, 0, -1, FI_REMAPF);
tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
if ((error = xfs_trans_reserve(tp, 0,
XFS_ICHANGE_LOG_RES(mp), 0,
0, 0))) {
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
xfs_iunlock(tip, XFS_IOLOCK_EXCL);
xfs_trans_cancel(tp, 0);
goto out;
}
xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
/*
* Count the number of extended attribute blocks
*/
if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
if (error)
goto out_trans_cancel;
}
if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
(tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
&taforkblks);
if (error)
goto out_trans_cancel;
}
/*
* Swap the data forks of the inodes
*/
ifp = &ip->i_df;
tifp = &tip->i_df;
*tempifp = *ifp; /* struct copy */
*ifp = *tifp; /* struct copy */
*tifp = *tempifp; /* struct copy */
/*
* Fix the on-disk inode values
*/
tmp = (__uint64_t)ip->i_d.di_nblocks;
ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
tmp = (__uint64_t) ip->i_d.di_nextents;
ip->i_d.di_nextents = tip->i_d.di_nextents;
tip->i_d.di_nextents = tmp;
tmp = (__uint64_t) ip->i_d.di_format;
ip->i_d.di_format = tip->i_d.di_format;
tip->i_d.di_format = tmp;
/*
* The extents in the source inode could still contain speculative
* preallocation beyond EOF (e.g. the file is open but not modified
* while defrag is in progress). In that case, we need to copy over the
* number of delalloc blocks the data fork in the source inode is
* tracking beyond EOF so that when the fork is truncated away when the
* temporary inode is unlinked we don't underrun the i_delayed_blks
* counter on that inode.
*/
ASSERT(tip->i_delayed_blks == 0);
tip->i_delayed_blks = ip->i_delayed_blks;
ip->i_delayed_blks = 0;
src_log_flags = XFS_ILOG_CORE;
switch (ip->i_d.di_format) {
case XFS_DINODE_FMT_EXTENTS:
/* If the extents fit in the inode, fix the
* pointer. Otherwise it's already NULL or
* pointing to the extent.
*/
if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
ifp->if_u1.if_extents =
ifp->if_u2.if_inline_ext;
}
src_log_flags |= XFS_ILOG_DEXT;
break;
case XFS_DINODE_FMT_BTREE:
src_log_flags |= XFS_ILOG_DBROOT;
break;
}
target_log_flags = XFS_ILOG_CORE;
switch (tip->i_d.di_format) {
case XFS_DINODE_FMT_EXTENTS:
/* If the extents fit in the inode, fix the
* pointer. Otherwise it's already NULL or
* pointing to the extent.
*/
if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
tifp->if_u1.if_extents =
tifp->if_u2.if_inline_ext;
}
target_log_flags |= XFS_ILOG_DEXT;
break;
case XFS_DINODE_FMT_BTREE:
target_log_flags |= XFS_ILOG_DBROOT;
break;
}
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_trans_log_inode(tp, ip, src_log_flags);
xfs_trans_log_inode(tp, tip, target_log_flags);
/*
* If this is a synchronous mount, make sure that the
* transaction goes to disk before returning to the user.
*/
if (mp->m_flags & XFS_MOUNT_WSYNC)
xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp, 0);
trace_xfs_swap_extent_after(ip, 0);
trace_xfs_swap_extent_after(tip, 1);
out:
kmem_free(tempifp);
return error;
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
goto out;
out_trans_cancel:
xfs_trans_cancel(tp, 0);
goto out_unlock;
}
| gpl-2.0 |
jawad6233/bindu-kernel-base | drivers/rtc/rtc-ds1307.c | 4783 | 25593 | /*
* rtc-ds1307.c - RTC driver for some mostly-compatible I2C chips.
*
* Copyright (C) 2005 James Chapman (ds1337 core)
* Copyright (C) 2006 David Brownell
* Copyright (C) 2009 Matthias Fuchs (rx8025 support)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/string.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
/*
* We can't determine type by probing, but if we expect pre-Linux code
* to have set the chip up as a clock (turning on the oscillator and
* setting the date and time), Linux can ignore the non-clock features.
* That's a natural job for a factory or repair bench.
*/
enum ds_type {
ds_1307,
ds_1337,
ds_1338,
ds_1339,
ds_1340,
ds_1388,
ds_3231,
m41t00,
mcp7941x,
rx_8025,
last_ds_type /* always last */
/* rs5c372 too? different address... */
};
/* RTC registers don't differ much, except for the century flag */
#define DS1307_REG_SECS 0x00 /* 00-59 */
# define DS1307_BIT_CH 0x80
# define DS1340_BIT_nEOSC 0x80
# define MCP7941X_BIT_ST 0x80
#define DS1307_REG_MIN 0x01 /* 00-59 */
#define DS1307_REG_HOUR 0x02 /* 00-23, or 1-12{am,pm} */
# define DS1307_BIT_12HR 0x40 /* in REG_HOUR */
# define DS1307_BIT_PM 0x20 /* in REG_HOUR */
# define DS1340_BIT_CENTURY_EN 0x80 /* in REG_HOUR */
# define DS1340_BIT_CENTURY 0x40 /* in REG_HOUR */
#define DS1307_REG_WDAY 0x03 /* 01-07 */
# define MCP7941X_BIT_VBATEN 0x08
#define DS1307_REG_MDAY 0x04 /* 01-31 */
#define DS1307_REG_MONTH 0x05 /* 01-12 */
# define DS1337_BIT_CENTURY 0x80 /* in REG_MONTH */
#define DS1307_REG_YEAR 0x06 /* 00-99 */
/*
* Other registers (control, status, alarms, trickle charge, NVRAM, etc)
* start at 7, and they differ a LOT. Only control and status matter for
* basic RTC date and time functionality; be careful using them.
*/
#define DS1307_REG_CONTROL 0x07 /* or ds1338 */
# define DS1307_BIT_OUT 0x80
# define DS1338_BIT_OSF 0x20
# define DS1307_BIT_SQWE 0x10
# define DS1307_BIT_RS1 0x02
# define DS1307_BIT_RS0 0x01
#define DS1337_REG_CONTROL 0x0e
# define DS1337_BIT_nEOSC 0x80
# define DS1339_BIT_BBSQI 0x20
# define DS3231_BIT_BBSQW 0x40 /* same as BBSQI */
# define DS1337_BIT_RS2 0x10
# define DS1337_BIT_RS1 0x08
# define DS1337_BIT_INTCN 0x04
# define DS1337_BIT_A2IE 0x02
# define DS1337_BIT_A1IE 0x01
#define DS1340_REG_CONTROL 0x07
# define DS1340_BIT_OUT 0x80
# define DS1340_BIT_FT 0x40
# define DS1340_BIT_CALIB_SIGN 0x20
# define DS1340_M_CALIBRATION 0x1f
#define DS1340_REG_FLAG 0x09
# define DS1340_BIT_OSF 0x80
#define DS1337_REG_STATUS 0x0f
# define DS1337_BIT_OSF 0x80
# define DS1337_BIT_A2I 0x02
# define DS1337_BIT_A1I 0x01
#define DS1339_REG_ALARM1_SECS 0x07
#define DS1339_REG_TRICKLE 0x10
#define RX8025_REG_CTRL1 0x0e
# define RX8025_BIT_2412 0x20
#define RX8025_REG_CTRL2 0x0f
# define RX8025_BIT_PON 0x10
# define RX8025_BIT_VDET 0x40
# define RX8025_BIT_XST 0x20
struct ds1307 {
u8 offset; /* register's offset */
u8 regs[11];
u16 nvram_offset;
struct bin_attribute *nvram;
enum ds_type type;
unsigned long flags;
#define HAS_NVRAM 0 /* bit 0 == sysfs file active */
#define HAS_ALARM 1 /* bit 1 == irq claimed */
struct i2c_client *client;
struct rtc_device *rtc;
struct work_struct work;
s32 (*read_block_data)(const struct i2c_client *client, u8 command,
u8 length, u8 *values);
s32 (*write_block_data)(const struct i2c_client *client, u8 command,
u8 length, const u8 *values);
};
struct chip_desc {
unsigned alarm:1;
u16 nvram_offset;
u16 nvram_size;
};
static const struct chip_desc chips[last_ds_type] = {
[ds_1307] = {
.nvram_offset = 8,
.nvram_size = 56,
},
[ds_1337] = {
.alarm = 1,
},
[ds_1338] = {
.nvram_offset = 8,
.nvram_size = 56,
},
[ds_1339] = {
.alarm = 1,
},
[ds_3231] = {
.alarm = 1,
},
[mcp7941x] = {
/* this is battery backed SRAM */
.nvram_offset = 0x20,
.nvram_size = 0x40,
},
};
static const struct i2c_device_id ds1307_id[] = {
{ "ds1307", ds_1307 },
{ "ds1337", ds_1337 },
{ "ds1338", ds_1338 },
{ "ds1339", ds_1339 },
{ "ds1388", ds_1388 },
{ "ds1340", ds_1340 },
{ "ds3231", ds_3231 },
{ "m41t00", m41t00 },
{ "mcp7941x", mcp7941x },
{ "pt7c4338", ds_1307 },
{ "rx8025", rx_8025 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds1307_id);
/*----------------------------------------------------------------------*/
#define BLOCK_DATA_MAX_TRIES 10
static s32 ds1307_read_block_data_once(const struct i2c_client *client,
u8 command, u8 length, u8 *values)
{
s32 i, data;
for (i = 0; i < length; i++) {
data = i2c_smbus_read_byte_data(client, command + i);
if (data < 0)
return data;
values[i] = data;
}
return i;
}
static s32 ds1307_read_block_data(const struct i2c_client *client, u8 command,
u8 length, u8 *values)
{
u8 oldvalues[I2C_SMBUS_BLOCK_MAX];
s32 ret;
int tries = 0;
dev_dbg(&client->dev, "ds1307_read_block_data (length=%d)\n", length);
ret = ds1307_read_block_data_once(client, command, length, values);
if (ret < 0)
return ret;
do {
if (++tries > BLOCK_DATA_MAX_TRIES) {
dev_err(&client->dev,
"ds1307_read_block_data failed\n");
return -EIO;
}
memcpy(oldvalues, values, length);
ret = ds1307_read_block_data_once(client, command, length,
values);
if (ret < 0)
return ret;
} while (memcmp(oldvalues, values, length));
return length;
}
static s32 ds1307_write_block_data(const struct i2c_client *client, u8 command,
u8 length, const u8 *values)
{
u8 currvalues[I2C_SMBUS_BLOCK_MAX];
int tries = 0;
dev_dbg(&client->dev, "ds1307_write_block_data (length=%d)\n", length);
do {
s32 i, ret;
if (++tries > BLOCK_DATA_MAX_TRIES) {
dev_err(&client->dev,
"ds1307_write_block_data failed\n");
return -EIO;
}
for (i = 0; i < length; i++) {
ret = i2c_smbus_write_byte_data(client, command + i,
values[i]);
if (ret < 0)
return ret;
}
ret = ds1307_read_block_data_once(client, command, length,
currvalues);
if (ret < 0)
return ret;
} while (memcmp(currvalues, values, length));
return length;
}
/*----------------------------------------------------------------------*/
/*
* The IRQ logic includes a "real" handler running in IRQ context just
* long enough to schedule this workqueue entry. We need a task context
* to talk to the RTC, since I2C I/O calls require that; and disable the
* IRQ until we clear its status on the chip, so that this handler can
* work with any type of triggering (not just falling edge).
*
* The ds1337 and ds1339 both have two alarms, but we only use the first
* one (with a "seconds" field). For ds1337 we expect nINTA is our alarm
* signal; ds1339 chips have only one alarm signal.
*/
static void ds1307_work(struct work_struct *work)
{
struct ds1307 *ds1307;
struct i2c_client *client;
struct mutex *lock;
int stat, control;
ds1307 = container_of(work, struct ds1307, work);
client = ds1307->client;
lock = &ds1307->rtc->ops_lock;
mutex_lock(lock);
stat = i2c_smbus_read_byte_data(client, DS1337_REG_STATUS);
if (stat < 0)
goto out;
if (stat & DS1337_BIT_A1I) {
stat &= ~DS1337_BIT_A1I;
i2c_smbus_write_byte_data(client, DS1337_REG_STATUS, stat);
control = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
if (control < 0)
goto out;
control &= ~DS1337_BIT_A1IE;
i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, control);
rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF);
}
out:
if (test_bit(HAS_ALARM, &ds1307->flags))
enable_irq(client->irq);
mutex_unlock(lock);
}
static irqreturn_t ds1307_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
struct ds1307 *ds1307 = i2c_get_clientdata(client);
disable_irq_nosync(irq);
schedule_work(&ds1307->work);
return IRQ_HANDLED;
}
/*----------------------------------------------------------------------*/
static int ds1307_get_time(struct device *dev, struct rtc_time *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
int tmp;
/* read the RTC date and time registers all at once */
tmp = ds1307->read_block_data(ds1307->client,
ds1307->offset, 7, ds1307->regs);
if (tmp != 7) {
dev_err(dev, "%s error %d\n", "read", tmp);
return -EIO;
}
dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
"read",
ds1307->regs[0], ds1307->regs[1],
ds1307->regs[2], ds1307->regs[3],
ds1307->regs[4], ds1307->regs[5],
ds1307->regs[6]);
t->tm_sec = bcd2bin(ds1307->regs[DS1307_REG_SECS] & 0x7f);
t->tm_min = bcd2bin(ds1307->regs[DS1307_REG_MIN] & 0x7f);
tmp = ds1307->regs[DS1307_REG_HOUR] & 0x3f;
t->tm_hour = bcd2bin(tmp);
t->tm_wday = bcd2bin(ds1307->regs[DS1307_REG_WDAY] & 0x07) - 1;
t->tm_mday = bcd2bin(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
tmp = ds1307->regs[DS1307_REG_MONTH] & 0x1f;
t->tm_mon = bcd2bin(tmp) - 1;
/* assume 20YY not 19YY, and ignore DS1337_BIT_CENTURY */
t->tm_year = bcd2bin(ds1307->regs[DS1307_REG_YEAR]) + 100;
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
"read", t->tm_sec, t->tm_min,
t->tm_hour, t->tm_mday,
t->tm_mon, t->tm_year, t->tm_wday);
/* initial clock setting can be undefined */
return rtc_valid_tm(t);
}
static int ds1307_set_time(struct device *dev, struct rtc_time *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
int result;
int tmp;
u8 *buf = ds1307->regs;
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
"write", t->tm_sec, t->tm_min,
t->tm_hour, t->tm_mday,
t->tm_mon, t->tm_year, t->tm_wday);
buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
buf[DS1307_REG_MIN] = bin2bcd(t->tm_min);
buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
buf[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
buf[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
buf[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
/* assume 20YY not 19YY */
tmp = t->tm_year - 100;
buf[DS1307_REG_YEAR] = bin2bcd(tmp);
switch (ds1307->type) {
case ds_1337:
case ds_1339:
case ds_3231:
buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
break;
case ds_1340:
buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN
| DS1340_BIT_CENTURY;
break;
case mcp7941x:
/*
* these bits were cleared when preparing the date/time
* values and need to be set again before writing the
* buffer out to the device.
*/
buf[DS1307_REG_SECS] |= MCP7941X_BIT_ST;
buf[DS1307_REG_WDAY] |= MCP7941X_BIT_VBATEN;
break;
default:
break;
}
dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
"write", buf[0], buf[1], buf[2], buf[3],
buf[4], buf[5], buf[6]);
result = ds1307->write_block_data(ds1307->client,
ds1307->offset, 7, buf);
if (result < 0) {
dev_err(dev, "%s error %d\n", "write", result);
return result;
}
return 0;
}
static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct i2c_client *client = to_i2c_client(dev);
struct ds1307 *ds1307 = i2c_get_clientdata(client);
int ret;
if (!test_bit(HAS_ALARM, &ds1307->flags))
return -EINVAL;
/* read all ALARM1, ALARM2, and status registers at once */
ret = ds1307->read_block_data(client,
DS1339_REG_ALARM1_SECS, 9, ds1307->regs);
if (ret != 9) {
dev_err(dev, "%s error %d\n", "alarm read", ret);
return -EIO;
}
dev_dbg(dev, "%s: %02x %02x %02x %02x, %02x %02x %02x, %02x %02x\n",
"alarm read",
ds1307->regs[0], ds1307->regs[1],
ds1307->regs[2], ds1307->regs[3],
ds1307->regs[4], ds1307->regs[5],
ds1307->regs[6], ds1307->regs[7],
ds1307->regs[8]);
/*
* report alarm time (ALARM1); assume 24 hour and day-of-month modes,
* and that all four fields are checked matches
*/
t->time.tm_sec = bcd2bin(ds1307->regs[0] & 0x7f);
t->time.tm_min = bcd2bin(ds1307->regs[1] & 0x7f);
t->time.tm_hour = bcd2bin(ds1307->regs[2] & 0x3f);
t->time.tm_mday = bcd2bin(ds1307->regs[3] & 0x3f);
t->time.tm_mon = -1;
t->time.tm_year = -1;
t->time.tm_wday = -1;
t->time.tm_yday = -1;
t->time.tm_isdst = -1;
/* ... and status */
t->enabled = !!(ds1307->regs[7] & DS1337_BIT_A1IE);
t->pending = !!(ds1307->regs[8] & DS1337_BIT_A1I);
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, enabled=%d, pending=%d\n",
"alarm read", t->time.tm_sec, t->time.tm_min,
t->time.tm_hour, t->time.tm_mday,
t->enabled, t->pending);
return 0;
}
static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct i2c_client *client = to_i2c_client(dev);
struct ds1307 *ds1307 = i2c_get_clientdata(client);
unsigned char *buf = ds1307->regs;
u8 control, status;
int ret;
if (!test_bit(HAS_ALARM, &ds1307->flags))
return -EINVAL;
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, enabled=%d, pending=%d\n",
"alarm set", t->time.tm_sec, t->time.tm_min,
t->time.tm_hour, t->time.tm_mday,
t->enabled, t->pending);
/* read current status of both alarms and the chip */
ret = ds1307->read_block_data(client,
DS1339_REG_ALARM1_SECS, 9, buf);
if (ret != 9) {
dev_err(dev, "%s error %d\n", "alarm write", ret);
return -EIO;
}
control = ds1307->regs[7];
status = ds1307->regs[8];
dev_dbg(dev, "%s: %02x %02x %02x %02x, %02x %02x %02x, %02x %02x\n",
"alarm set (old status)",
ds1307->regs[0], ds1307->regs[1],
ds1307->regs[2], ds1307->regs[3],
ds1307->regs[4], ds1307->regs[5],
ds1307->regs[6], control, status);
/* set ALARM1, using 24 hour and day-of-month modes */
buf[0] = bin2bcd(t->time.tm_sec);
buf[1] = bin2bcd(t->time.tm_min);
buf[2] = bin2bcd(t->time.tm_hour);
buf[3] = bin2bcd(t->time.tm_mday);
/* set ALARM2 to non-garbage */
buf[4] = 0;
buf[5] = 0;
buf[6] = 0;
/* optionally enable ALARM1 */
buf[7] = control & ~(DS1337_BIT_A1IE | DS1337_BIT_A2IE);
if (t->enabled) {
dev_dbg(dev, "alarm IRQ armed\n");
buf[7] |= DS1337_BIT_A1IE; /* only ALARM1 is used */
}
buf[8] = status & ~(DS1337_BIT_A1I | DS1337_BIT_A2I);
ret = ds1307->write_block_data(client,
DS1339_REG_ALARM1_SECS, 9, buf);
if (ret < 0) {
dev_err(dev, "can't set alarm time\n");
return ret;
}
return 0;
}
static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct i2c_client *client = to_i2c_client(dev);
struct ds1307 *ds1307 = i2c_get_clientdata(client);
int ret;
if (!test_bit(HAS_ALARM, &ds1307->flags))
return -ENOTTY;
ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
if (ret < 0)
return ret;
if (enabled)
ret |= DS1337_BIT_A1IE;
else
ret &= ~DS1337_BIT_A1IE;
ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, ret);
if (ret < 0)
return ret;
return 0;
}
static const struct rtc_class_ops ds13xx_rtc_ops = {
.read_time = ds1307_get_time,
.set_time = ds1307_set_time,
.read_alarm = ds1337_read_alarm,
.set_alarm = ds1337_set_alarm,
.alarm_irq_enable = ds1307_alarm_irq_enable,
};
/*----------------------------------------------------------------------*/
static ssize_t
ds1307_nvram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client;
struct ds1307 *ds1307;
int result;
client = kobj_to_i2c_client(kobj);
ds1307 = i2c_get_clientdata(client);
if (unlikely(off >= ds1307->nvram->size))
return 0;
if ((off + count) > ds1307->nvram->size)
count = ds1307->nvram->size - off;
if (unlikely(!count))
return count;
result = ds1307->read_block_data(client, ds1307->nvram_offset + off,
count, buf);
if (result < 0)
dev_err(&client->dev, "%s error %d\n", "nvram read", result);
return result;
}
static ssize_t
ds1307_nvram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client;
struct ds1307 *ds1307;
int result;
client = kobj_to_i2c_client(kobj);
ds1307 = i2c_get_clientdata(client);
if (unlikely(off >= ds1307->nvram->size))
return -EFBIG;
if ((off + count) > ds1307->nvram->size)
count = ds1307->nvram->size - off;
if (unlikely(!count))
return count;
result = ds1307->write_block_data(client, ds1307->nvram_offset + off,
count, buf);
if (result < 0) {
dev_err(&client->dev, "%s error %d\n", "nvram write", result);
return result;
}
return count;
}
/*----------------------------------------------------------------------*/
static int __devinit ds1307_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ds1307 *ds1307;
int err = -ENODEV;
int tmp;
const struct chip_desc *chip = &chips[id->driver_data];
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int want_irq = false;
unsigned char *buf;
static const int bbsqi_bitpos[] = {
[ds_1337] = 0,
[ds_1339] = DS1339_BIT_BBSQI,
[ds_3231] = DS3231_BIT_BBSQW,
};
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)
&& !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
return -EIO;
ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL);
if (!ds1307)
return -ENOMEM;
i2c_set_clientdata(client, ds1307);
ds1307->client = client;
ds1307->type = id->driver_data;
ds1307->offset = 0;
buf = ds1307->regs;
if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
ds1307->read_block_data = i2c_smbus_read_i2c_block_data;
ds1307->write_block_data = i2c_smbus_write_i2c_block_data;
} else {
ds1307->read_block_data = ds1307_read_block_data;
ds1307->write_block_data = ds1307_write_block_data;
}
switch (ds1307->type) {
case ds_1337:
case ds_1339:
case ds_3231:
/* get registers that the "rtc" read below won't read... */
tmp = ds1307->read_block_data(ds1307->client,
DS1337_REG_CONTROL, 2, buf);
if (tmp != 2) {
pr_debug("read error %d\n", tmp);
err = -EIO;
goto exit_free;
}
/* oscillator off? turn it on, so clock can tick. */
if (ds1307->regs[0] & DS1337_BIT_nEOSC)
ds1307->regs[0] &= ~DS1337_BIT_nEOSC;
/*
* Using IRQ? Disable the square wave and both alarms.
* For some variants, be sure alarms can trigger when we're
* running on Vbackup (BBSQI/BBSQW)
*/
if (ds1307->client->irq > 0 && chip->alarm) {
INIT_WORK(&ds1307->work, ds1307_work);
ds1307->regs[0] |= DS1337_BIT_INTCN
| bbsqi_bitpos[ds1307->type];
ds1307->regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE);
want_irq = true;
}
i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL,
ds1307->regs[0]);
/* oscillator fault? clear flag, and warn */
if (ds1307->regs[1] & DS1337_BIT_OSF) {
i2c_smbus_write_byte_data(client, DS1337_REG_STATUS,
ds1307->regs[1] & ~DS1337_BIT_OSF);
dev_warn(&client->dev, "SET TIME!\n");
}
break;
case rx_8025:
tmp = i2c_smbus_read_i2c_block_data(ds1307->client,
RX8025_REG_CTRL1 << 4 | 0x08, 2, buf);
if (tmp != 2) {
pr_debug("read error %d\n", tmp);
err = -EIO;
goto exit_free;
}
/* oscillator off? turn it on, so clock can tick. */
if (!(ds1307->regs[1] & RX8025_BIT_XST)) {
ds1307->regs[1] |= RX8025_BIT_XST;
i2c_smbus_write_byte_data(client,
RX8025_REG_CTRL2 << 4 | 0x08,
ds1307->regs[1]);
dev_warn(&client->dev,
"oscillator stop detected - SET TIME!\n");
}
if (ds1307->regs[1] & RX8025_BIT_PON) {
ds1307->regs[1] &= ~RX8025_BIT_PON;
i2c_smbus_write_byte_data(client,
RX8025_REG_CTRL2 << 4 | 0x08,
ds1307->regs[1]);
dev_warn(&client->dev, "power-on detected\n");
}
if (ds1307->regs[1] & RX8025_BIT_VDET) {
ds1307->regs[1] &= ~RX8025_BIT_VDET;
i2c_smbus_write_byte_data(client,
RX8025_REG_CTRL2 << 4 | 0x08,
ds1307->regs[1]);
dev_warn(&client->dev, "voltage drop detected\n");
}
/* make sure we are running in 24hour mode */
if (!(ds1307->regs[0] & RX8025_BIT_2412)) {
u8 hour;
/* switch to 24 hour mode */
i2c_smbus_write_byte_data(client,
RX8025_REG_CTRL1 << 4 | 0x08,
ds1307->regs[0] |
RX8025_BIT_2412);
tmp = i2c_smbus_read_i2c_block_data(ds1307->client,
RX8025_REG_CTRL1 << 4 | 0x08, 2, buf);
if (tmp != 2) {
pr_debug("read error %d\n", tmp);
err = -EIO;
goto exit_free;
}
/* correct hour */
hour = bcd2bin(ds1307->regs[DS1307_REG_HOUR]);
if (hour == 12)
hour = 0;
if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
hour += 12;
i2c_smbus_write_byte_data(client,
DS1307_REG_HOUR << 4 | 0x08,
hour);
}
break;
case ds_1388:
ds1307->offset = 1; /* Seconds starts at 1 */
break;
default:
break;
}
read_rtc:
/* read RTC registers */
tmp = ds1307->read_block_data(ds1307->client, ds1307->offset, 8, buf);
if (tmp != 8) {
pr_debug("read error %d\n", tmp);
err = -EIO;
goto exit_free;
}
/*
* minimal sanity checking; some chips (like DS1340) don't
* specify the extra bits as must-be-zero, but there are
* still a few values that are clearly out-of-range.
*/
tmp = ds1307->regs[DS1307_REG_SECS];
switch (ds1307->type) {
case ds_1307:
case m41t00:
/* clock halted? turn it on, so clock can tick. */
if (tmp & DS1307_BIT_CH) {
i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
dev_warn(&client->dev, "SET TIME!\n");
goto read_rtc;
}
break;
case ds_1338:
/* clock halted? turn it on, so clock can tick. */
if (tmp & DS1307_BIT_CH)
i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
/* oscillator fault? clear flag, and warn */
if (ds1307->regs[DS1307_REG_CONTROL] & DS1338_BIT_OSF) {
i2c_smbus_write_byte_data(client, DS1307_REG_CONTROL,
ds1307->regs[DS1307_REG_CONTROL]
& ~DS1338_BIT_OSF);
dev_warn(&client->dev, "SET TIME!\n");
goto read_rtc;
}
break;
case ds_1340:
/* clock halted? turn it on, so clock can tick. */
if (tmp & DS1340_BIT_nEOSC)
i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
tmp = i2c_smbus_read_byte_data(client, DS1340_REG_FLAG);
if (tmp < 0) {
pr_debug("read error %d\n", tmp);
err = -EIO;
goto exit_free;
}
/* oscillator fault? clear flag, and warn */
if (tmp & DS1340_BIT_OSF) {
i2c_smbus_write_byte_data(client, DS1340_REG_FLAG, 0);
dev_warn(&client->dev, "SET TIME!\n");
}
break;
case mcp7941x:
/* make sure that the backup battery is enabled */
if (!(ds1307->regs[DS1307_REG_WDAY] & MCP7941X_BIT_VBATEN)) {
i2c_smbus_write_byte_data(client, DS1307_REG_WDAY,
ds1307->regs[DS1307_REG_WDAY]
| MCP7941X_BIT_VBATEN);
}
/* clock halted? turn it on, so clock can tick. */
if (!(tmp & MCP7941X_BIT_ST)) {
i2c_smbus_write_byte_data(client, DS1307_REG_SECS,
MCP7941X_BIT_ST);
dev_warn(&client->dev, "SET TIME!\n");
goto read_rtc;
}
break;
default:
break;
}
tmp = ds1307->regs[DS1307_REG_HOUR];
switch (ds1307->type) {
case ds_1340:
case m41t00:
/*
* NOTE: ignores century bits; fix before deploying
* systems that will run through year 2100.
*/
break;
case rx_8025:
break;
default:
if (!(tmp & DS1307_BIT_12HR))
break;
/*
* Be sure we're in 24 hour mode. Multi-master systems
* take note...
*/
tmp = bcd2bin(tmp & 0x1f);
if (tmp == 12)
tmp = 0;
if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
tmp += 12;
i2c_smbus_write_byte_data(client,
ds1307->offset + DS1307_REG_HOUR,
bin2bcd(tmp));
}
ds1307->rtc = rtc_device_register(client->name, &client->dev,
&ds13xx_rtc_ops, THIS_MODULE);
if (IS_ERR(ds1307->rtc)) {
err = PTR_ERR(ds1307->rtc);
dev_err(&client->dev,
"unable to register the class device\n");
goto exit_free;
}
if (want_irq) {
err = request_irq(client->irq, ds1307_irq, IRQF_SHARED,
ds1307->rtc->name, client);
if (err) {
dev_err(&client->dev,
"unable to request IRQ!\n");
goto exit_irq;
}
device_set_wakeup_capable(&client->dev, 1);
set_bit(HAS_ALARM, &ds1307->flags);
dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
}
if (chip->nvram_size) {
ds1307->nvram = kzalloc(sizeof(struct bin_attribute),
GFP_KERNEL);
if (!ds1307->nvram) {
err = -ENOMEM;
goto exit_nvram;
}
ds1307->nvram->attr.name = "nvram";
ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR;
sysfs_bin_attr_init(ds1307->nvram);
ds1307->nvram->read = ds1307_nvram_read,
ds1307->nvram->write = ds1307_nvram_write,
ds1307->nvram->size = chip->nvram_size;
ds1307->nvram_offset = chip->nvram_offset;
err = sysfs_create_bin_file(&client->dev.kobj, ds1307->nvram);
if (err) {
kfree(ds1307->nvram);
goto exit_nvram;
}
set_bit(HAS_NVRAM, &ds1307->flags);
dev_info(&client->dev, "%zu bytes nvram\n", ds1307->nvram->size);
}
return 0;
exit_nvram:
exit_irq:
rtc_device_unregister(ds1307->rtc);
exit_free:
kfree(ds1307);
return err;
}
static int __devexit ds1307_remove(struct i2c_client *client)
{
struct ds1307 *ds1307 = i2c_get_clientdata(client);
if (test_and_clear_bit(HAS_ALARM, &ds1307->flags)) {
free_irq(client->irq, client);
cancel_work_sync(&ds1307->work);
}
if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags)) {
sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram);
kfree(ds1307->nvram);
}
rtc_device_unregister(ds1307->rtc);
kfree(ds1307);
return 0;
}
static struct i2c_driver ds1307_driver = {
.driver = {
.name = "rtc-ds1307",
.owner = THIS_MODULE,
},
.probe = ds1307_probe,
.remove = __devexit_p(ds1307_remove),
.id_table = ds1307_id,
};
module_i2c_driver(ds1307_driver);
MODULE_DESCRIPTION("RTC driver for DS1307 and similar chips");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mightysween/vs980-kernel | arch/mips/bcm47xx/prom.c | 7855 | 5194 | /*
* Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
* Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <asm/bootinfo.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/fw/cfe/cfe_error.h>
static int cfe_cons_handle;
const char *get_system_type(void)
{
return "Broadcom BCM47XX";
}
void prom_putchar(char c)
{
while (cfe_write(cfe_cons_handle, &c, 1) == 0)
;
}
static __init void prom_init_cfe(void)
{
uint32_t cfe_ept;
uint32_t cfe_handle;
uint32_t cfe_eptseal;
int argc = fw_arg0;
char **envp = (char **) fw_arg2;
int *prom_vec = (int *) fw_arg3;
/*
* Check if a loader was used; if NOT, the 4 arguments are
* what CFE gives us (handle, 0, EPT and EPTSEAL)
*/
if (argc < 0) {
cfe_handle = (uint32_t)argc;
cfe_ept = (uint32_t)envp;
cfe_eptseal = (uint32_t)prom_vec;
} else {
if ((int)prom_vec < 0) {
/*
* Old loader; all it gives us is the handle,
* so use the "known" entrypoint and assume
* the seal.
*/
cfe_handle = (uint32_t)prom_vec;
cfe_ept = 0xBFC00500;
cfe_eptseal = CFE_EPTSEAL;
} else {
/*
* Newer loaders bundle the handle/ept/eptseal
* Note: prom_vec is in the loader's useg
* which is still alive in the TLB.
*/
cfe_handle = prom_vec[0];
cfe_ept = prom_vec[2];
cfe_eptseal = prom_vec[3];
}
}
if (cfe_eptseal != CFE_EPTSEAL) {
/* too early for panic to do any good */
printk(KERN_ERR "CFE's entrypoint seal doesn't match.");
while (1) ;
}
cfe_init(cfe_handle, cfe_ept);
}
static __init void prom_init_console(void)
{
/* Initialize CFE console */
cfe_cons_handle = cfe_getstdhandle(CFE_STDHANDLE_CONSOLE);
}
static __init void prom_init_cmdline(void)
{
static char buf[COMMAND_LINE_SIZE] __initdata;
/* Get the kernel command line from CFE */
if (cfe_getenv("LINUX_CMDLINE", buf, COMMAND_LINE_SIZE) >= 0) {
buf[COMMAND_LINE_SIZE - 1] = 0;
strcpy(arcs_cmdline, buf);
}
/* Force a console handover by adding a console= argument if needed,
* as CFE is not available anymore later in the boot process. */
if ((strstr(arcs_cmdline, "console=")) == NULL) {
/* Try to read the default serial port used by CFE */
if ((cfe_getenv("BOOT_CONSOLE", buf, COMMAND_LINE_SIZE) < 0)
|| (strncmp("uart", buf, 4)))
/* Default to uart0 */
strcpy(buf, "uart0");
/* Compute the new command line */
snprintf(arcs_cmdline, COMMAND_LINE_SIZE, "%s console=ttyS%c,115200",
arcs_cmdline, buf[4]);
}
}
static __init void prom_init_mem(void)
{
unsigned long mem;
unsigned long max;
/* Figure out memory size by finding aliases.
*
* We should theoretically use the mapping from CFE using cfe_enummem().
* However as the BCM47XX is mostly used on low-memory systems, we
* want to reuse the memory used by CFE (around 4MB). That means cfe_*
* functions stop to work at some point during the boot, we should only
* call them at the beginning of the boot.
*
* BCM47XX uses 128MB for addressing the ram, if the system contains
* less that that amount of ram it remaps the ram more often into the
* available space.
* Accessing memory after 128MB will cause an exception.
* max contains the biggest possible address supported by the platform.
* If the method wants to try something above we assume 128MB ram.
*/
max = ((unsigned long)(prom_init) | ((128 << 20) - 1));
for (mem = (1 << 20); mem < (128 << 20); mem += (1 << 20)) {
if (((unsigned long)(prom_init) + mem) > max) {
mem = (128 << 20);
printk(KERN_DEBUG "assume 128MB RAM\n");
break;
}
if (*(unsigned long *)((unsigned long)(prom_init) + mem) ==
*(unsigned long *)(prom_init))
break;
}
add_memory_region(0, mem, BOOT_MEM_RAM);
}
void __init prom_init(void)
{
prom_init_cfe();
prom_init_console();
prom_init_cmdline();
prom_init_mem();
}
void __init prom_free_prom_memory(void)
{
}
| gpl-2.0 |
h8rift/android_kernel_htc_msm8960_evita-h8x | arch/x86/boot/main.c | 8111 | 4206 | /* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
* Copyright 2009 Intel Corporation; author H. Peter Anvin
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2.
*
* ----------------------------------------------------------------------- */
/*
* Main module for the real-mode kernel code
*/
#include "boot.h"
struct boot_params boot_params __attribute__((aligned(16)));
char *HEAP = _end;
char *heap_end = _end; /* Default end of heap = no heap */
/*
* Copy the header into the boot parameter block. Since this
* screws up the old-style command line protocol, adjust by
* filling in the new-style command line pointer instead.
*/
static void copy_boot_params(void)
{
struct old_cmdline {
u16 cl_magic;
u16 cl_offset;
};
const struct old_cmdline * const oldcmd =
(const struct old_cmdline *)OLD_CL_ADDRESS;
BUILD_BUG_ON(sizeof boot_params != 4096);
memcpy(&boot_params.hdr, &hdr, sizeof hdr);
if (!boot_params.hdr.cmd_line_ptr &&
oldcmd->cl_magic == OLD_CL_MAGIC) {
/* Old-style command line protocol. */
u16 cmdline_seg;
/* Figure out if the command line falls in the region
of memory that an old kernel would have copied up
to 0x90000... */
if (oldcmd->cl_offset < boot_params.hdr.setup_move_size)
cmdline_seg = ds();
else
cmdline_seg = 0x9000;
boot_params.hdr.cmd_line_ptr =
(cmdline_seg << 4) + oldcmd->cl_offset;
}
}
/*
* Set the keyboard repeat rate to maximum. Unclear why this
* is done here; this might be possible to kill off as stale code.
*/
static void keyboard_set_repeat(void)
{
struct biosregs ireg;
initregs(&ireg);
ireg.ax = 0x0305;
intcall(0x16, &ireg, NULL);
}
/*
* Get Intel SpeedStep (IST) information.
*/
static void query_ist(void)
{
struct biosregs ireg, oreg;
/* Some older BIOSes apparently crash on this call, so filter
it from machines too old to have SpeedStep at all. */
if (cpu.level < 6)
return;
initregs(&ireg);
ireg.ax = 0xe980; /* IST Support */
ireg.edx = 0x47534943; /* Request value */
intcall(0x15, &ireg, &oreg);
boot_params.ist_info.signature = oreg.eax;
boot_params.ist_info.command = oreg.ebx;
boot_params.ist_info.event = oreg.ecx;
boot_params.ist_info.perf_level = oreg.edx;
}
/*
* Tell the BIOS what CPU mode we intend to run in.
*/
static void set_bios_mode(void)
{
#ifdef CONFIG_X86_64
struct biosregs ireg;
initregs(&ireg);
ireg.ax = 0xec00;
ireg.bx = 2;
intcall(0x15, &ireg, NULL);
#endif
}
static void init_heap(void)
{
char *stack_end;
if (boot_params.hdr.loadflags & CAN_USE_HEAP) {
asm("leal %P1(%%esp),%0"
: "=r" (stack_end) : "i" (-STACK_SIZE));
heap_end = (char *)
((size_t)boot_params.hdr.heap_end_ptr + 0x200);
if (heap_end > stack_end)
heap_end = stack_end;
} else {
/* Boot protocol 2.00 only, no heap available */
puts("WARNING: Ancient bootloader, some functionality "
"may be limited!\n");
}
}
void main(void)
{
/* First, copy the boot header into the "zeropage" */
copy_boot_params();
/* Initialize the early-boot console */
console_init();
if (cmdline_find_option_bool("debug"))
puts("early console in setup code\n");
/* End of heap check */
init_heap();
/* Make sure we have all the proper CPU support */
if (validate_cpu()) {
puts("Unable to boot - please use a kernel appropriate "
"for your CPU.\n");
die();
}
/* Tell the BIOS what CPU mode we intend to run in. */
set_bios_mode();
/* Detect memory layout */
detect_memory();
/* Set keyboard repeat rate (why?) */
keyboard_set_repeat();
/* Query MCA information */
query_mca();
/* Query Intel SpeedStep (IST) information */
query_ist();
/* Query APM information */
#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)
query_apm_bios();
#endif
/* Query EDD information */
#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
query_edd();
#endif
/* Set the video mode */
set_video();
/* Do the last things and invoke protected mode */
go_to_protected_mode();
}
| gpl-2.0 |
toastcfh/htc-8660-kernel | drivers/net/ixp2000/enp2611.c | 9647 | 6201 | /*
* IXP2400 MSF network device driver for the Radisys ENP2611
* Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
* Dedicated to Marija Kulikova.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <asm/hardware/uengine.h>
#include <asm/mach-types.h>
#include <asm/io.h>
#include "ixpdev.h"
#include "caleb.h"
#include "ixp2400-msf.h"
#include "pm3386.h"
/***********************************************************************
* The Radisys ENP2611 is a PCI form factor board with three SFP GBIC
* slots, connected via two PMC/Sierra 3386s and an SPI-3 bridge FPGA
* to the IXP2400.
*
* +-------------+
* SFP GBIC #0 ---+ | +---------+
* | PM3386 #0 +-------+ |
* SFP GBIC #1 ---+ | | "Caleb" | +---------+
* +-------------+ | | | |
* | SPI-3 +---------+ IXP2400 |
* +-------------+ | bridge | | |
* SFP GBIC #2 ---+ | | FPGA | +---------+
* | PM3386 #1 +-------+ |
* | | +---------+
* +-------------+
* ^ ^ ^
* | 1.25Gbaud | 104MHz | 104MHz
* | SERDES ea. | SPI-3 ea. | SPI-3
*
***********************************************************************/
static struct ixp2400_msf_parameters enp2611_msf_parameters =
{
.rx_mode = IXP2400_RX_MODE_UTOPIA_POS |
IXP2400_RX_MODE_1x32 |
IXP2400_RX_MODE_MPHY |
IXP2400_RX_MODE_MPHY_32 |
IXP2400_RX_MODE_MPHY_POLLED_STATUS |
IXP2400_RX_MODE_MPHY_LEVEL3 |
IXP2400_RX_MODE_RBUF_SIZE_64,
.rxclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
.rx_poll_ports = 3,
.rx_channel_mode = {
IXP2400_PORT_RX_MODE_MASTER |
IXP2400_PORT_RX_MODE_POS_PHY |
IXP2400_PORT_RX_MODE_POS_PHY_L3 |
IXP2400_PORT_RX_MODE_ODD_PARITY |
IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
IXP2400_PORT_RX_MODE_MASTER |
IXP2400_PORT_RX_MODE_POS_PHY |
IXP2400_PORT_RX_MODE_POS_PHY_L3 |
IXP2400_PORT_RX_MODE_ODD_PARITY |
IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
IXP2400_PORT_RX_MODE_MASTER |
IXP2400_PORT_RX_MODE_POS_PHY |
IXP2400_PORT_RX_MODE_POS_PHY_L3 |
IXP2400_PORT_RX_MODE_ODD_PARITY |
IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
IXP2400_PORT_RX_MODE_MASTER |
IXP2400_PORT_RX_MODE_POS_PHY |
IXP2400_PORT_RX_MODE_POS_PHY_L3 |
IXP2400_PORT_RX_MODE_ODD_PARITY |
IXP2400_PORT_RX_MODE_2_CYCLE_DECODE
},
.tx_mode = IXP2400_TX_MODE_UTOPIA_POS |
IXP2400_TX_MODE_1x32 |
IXP2400_TX_MODE_MPHY |
IXP2400_TX_MODE_MPHY_32 |
IXP2400_TX_MODE_MPHY_POLLED_STATUS |
IXP2400_TX_MODE_MPHY_LEVEL3 |
IXP2400_TX_MODE_TBUF_SIZE_64,
.txclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
.tx_poll_ports = 3,
.tx_channel_mode = {
IXP2400_PORT_TX_MODE_MASTER |
IXP2400_PORT_TX_MODE_POS_PHY |
IXP2400_PORT_TX_MODE_ODD_PARITY |
IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
IXP2400_PORT_TX_MODE_MASTER |
IXP2400_PORT_TX_MODE_POS_PHY |
IXP2400_PORT_TX_MODE_ODD_PARITY |
IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
IXP2400_PORT_TX_MODE_MASTER |
IXP2400_PORT_TX_MODE_POS_PHY |
IXP2400_PORT_TX_MODE_ODD_PARITY |
IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
IXP2400_PORT_TX_MODE_MASTER |
IXP2400_PORT_TX_MODE_POS_PHY |
IXP2400_PORT_TX_MODE_ODD_PARITY |
IXP2400_PORT_TX_MODE_2_CYCLE_DECODE
}
};
static struct net_device *nds[3];
static struct timer_list link_check_timer;
/* @@@ Poll the SFP moddef0 line too. */
/* @@@ Try to use the pm3386 DOOL interrupt as well. */
static void enp2611_check_link_status(unsigned long __dummy)
{
int i;
for (i = 0; i < 3; i++) {
struct net_device *dev;
int status;
dev = nds[i];
if (dev == NULL)
continue;
status = pm3386_is_link_up(i);
if (status && !netif_carrier_ok(dev)) {
/* @@@ Should report autonegotiation status. */
printk(KERN_INFO "%s: NIC Link is Up\n", dev->name);
pm3386_enable_tx(i);
caleb_enable_tx(i);
netif_carrier_on(dev);
} else if (!status && netif_carrier_ok(dev)) {
printk(KERN_INFO "%s: NIC Link is Down\n", dev->name);
netif_carrier_off(dev);
caleb_disable_tx(i);
pm3386_disable_tx(i);
}
}
link_check_timer.expires = jiffies + HZ / 10;
add_timer(&link_check_timer);
}
static void enp2611_set_port_admin_status(int port, int up)
{
if (up) {
caleb_enable_rx(port);
pm3386_set_carrier(port, 1);
pm3386_enable_rx(port);
} else {
caleb_disable_tx(port);
pm3386_disable_tx(port);
/* @@@ Flush out pending packets. */
pm3386_set_carrier(port, 0);
pm3386_disable_rx(port);
caleb_disable_rx(port);
}
}
static int __init enp2611_init_module(void)
{
int ports;
int i;
if (!machine_is_enp2611())
return -ENODEV;
caleb_reset();
pm3386_reset();
ports = pm3386_port_count();
for (i = 0; i < ports; i++) {
nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv));
if (nds[i] == NULL) {
while (--i >= 0)
free_netdev(nds[i]);
return -ENOMEM;
}
pm3386_init_port(i);
pm3386_get_mac(i, nds[i]->dev_addr);
}
ixp2400_msf_init(&enp2611_msf_parameters);
if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
for (i = 0; i < ports; i++)
if (nds[i])
free_netdev(nds[i]);
return -EINVAL;
}
init_timer(&link_check_timer);
link_check_timer.function = enp2611_check_link_status;
link_check_timer.expires = jiffies;
add_timer(&link_check_timer);
return 0;
}
static void __exit enp2611_cleanup_module(void)
{
int i;
del_timer_sync(&link_check_timer);
ixpdev_deinit();
for (i = 0; i < 3; i++)
free_netdev(nds[i]);
}
module_init(enp2611_init_module);
module_exit(enp2611_cleanup_module);
MODULE_LICENSE("GPL");
| gpl-2.0 |
CyanogenMod/android_kernel_cyanogen_msm8994 | drivers/message/fusion/mptlan.c | 12719 | 43965 | /*
* linux/drivers/message/fusion/mptlan.c
* IP Over Fibre Channel device driver.
* For use with LSI Fibre Channel PCI chip/adapters
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 2000-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
NO WARRANTY
THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
solely responsible for determining the appropriateness of using and
distributing the Program and assumes all risks associated with its
exercise of rights under this Agreement, including but not limited to
the risks and costs of program errors, damage to or loss of data,
programs or equipment, and unavailability or interruption of operations.
DISCLAIMER OF LIABILITY
NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Define statements used for debugging
*/
//#define MPT_LAN_IO_DEBUG
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include "mptlan.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#define my_VERSION MPT_LINUX_VERSION_COMMON
#define MYNAM "mptlan"
MODULE_LICENSE("GPL");
MODULE_VERSION(my_VERSION);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* MPT LAN message sizes without variable part.
*/
#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
#define MPT_LAN_TRANSACTION32_SIZE \
(sizeof(SGETransaction32_t) - sizeof(u32))
/*
* Fusion MPT LAN private structures
*/
struct BufferControl {
struct sk_buff *skb;
dma_addr_t dma;
unsigned int len;
};
struct mpt_lan_priv {
MPT_ADAPTER *mpt_dev;
u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
atomic_t buckets_out; /* number of unused buckets on IOC */
int bucketthresh; /* Send more when this many left */
int *mpt_txfidx; /* Free Tx Context list */
int mpt_txfidx_tail;
spinlock_t txfidx_lock;
int *mpt_rxfidx; /* Free Rx Context list */
int mpt_rxfidx_tail;
spinlock_t rxfidx_lock;
struct BufferControl *RcvCtl; /* Receive BufferControl structs */
struct BufferControl *SendCtl; /* Send BufferControl structs */
int max_buckets_out; /* Max buckets to send to IOC */
int tx_max_out; /* IOC's Tx queue len */
u32 total_posted;
u32 total_received;
struct delayed_work post_buckets_task;
struct net_device *dev;
unsigned long post_buckets_active;
};
struct mpt_lan_ohdr {
u16 dtype;
u8 daddr[FC_ALEN];
u16 stype;
u8 saddr[FC_ALEN];
};
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Forward protos...
*/
static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
MPT_FRAME_HDR *reply);
static int mpt_lan_open(struct net_device *dev);
static int mpt_lan_reset(struct net_device *dev);
static int mpt_lan_close(struct net_device *dev);
static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
int priority);
static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
static int mpt_lan_receive_post_reply(struct net_device *dev,
LANReceivePostReply_t *pRecvRep);
static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
static int mpt_lan_send_reply(struct net_device *dev,
LANSendReply_t *pSendRep);
static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
struct net_device *dev);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Fusion MPT LAN private data
*/
static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u32 max_buckets_out = 127;
static u32 tx_max_out_p = 127 - 16;
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* lan_reply - Handle all data sent from the hardware.
* @ioc: Pointer to MPT_ADAPTER structure
* @mf: Pointer to original MPT request frame (NULL if TurboReply)
* @reply: Pointer to MPT reply frame
*
* Returns 1 indicating original alloc'd request frame ptr
* should be freed, or 0 if it shouldn't.
*/
static int
lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
{
struct net_device *dev = ioc->netdev;
int FreeReqFrame = 0;
dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
IOC_AND_NETDEV_NAMES_s_s(dev)));
// dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
// mf, reply));
if (mf == NULL) {
u32 tmsg = CAST_PTR_TO_U32(reply);
dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
tmsg));
switch (GET_LAN_FORM(tmsg)) {
// NOTE! (Optimization) First case here is now caught in
// mptbase.c::mpt_interrupt() routine and callcack here
// is now skipped for this case!
#if 0
case LAN_REPLY_FORM_MESSAGE_CONTEXT:
// dioprintk((KERN_INFO MYNAM "/lan_reply: "
// "MessageContext turbo reply received\n"));
FreeReqFrame = 1;
break;
#endif
case LAN_REPLY_FORM_SEND_SINGLE:
// dioprintk((MYNAM "/lan_reply: "
// "calling mpt_lan_send_reply (turbo)\n"));
// Potential BUG here?
// FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
// If/when mpt_lan_send_turbo would return 1 here,
// calling routine (mptbase.c|mpt_interrupt)
// would Oops because mf has already been set
// to NULL. So after return from this func,
// mpt_interrupt() will attempt to put (NULL) mf ptr
// item back onto its adapter FreeQ - Oops!:-(
// It's Ok, since mpt_lan_send_turbo() *currently*
// always returns 0, but..., just in case:
(void) mpt_lan_send_turbo(dev, tmsg);
FreeReqFrame = 0;
break;
case LAN_REPLY_FORM_RECEIVE_SINGLE:
// dioprintk((KERN_INFO MYNAM "@lan_reply: "
// "rcv-Turbo = %08x\n", tmsg));
mpt_lan_receive_post_turbo(dev, tmsg);
break;
default:
printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
"that I don't know what to do with\n");
/* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
break;
}
return FreeReqFrame;
}
// msg = (u32 *) reply;
// dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
// le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
// le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
// dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
// reply->u.hdr.Function));
switch (reply->u.hdr.Function) {
case MPI_FUNCTION_LAN_SEND:
{
LANSendReply_t *pSendRep;
pSendRep = (LANSendReply_t *) reply;
FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
break;
}
case MPI_FUNCTION_LAN_RECEIVE:
{
LANReceivePostReply_t *pRecvRep;
pRecvRep = (LANReceivePostReply_t *) reply;
if (pRecvRep->NumberOfContexts) {
mpt_lan_receive_post_reply(dev, pRecvRep);
if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
FreeReqFrame = 1;
} else
dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
"ReceivePostReply received.\n"));
break;
}
case MPI_FUNCTION_LAN_RESET:
/* Just a default reply. Might want to check it to
* make sure that everything went ok.
*/
FreeReqFrame = 1;
break;
case MPI_FUNCTION_EVENT_NOTIFICATION:
case MPI_FUNCTION_EVENT_ACK:
/* _EVENT_NOTIFICATION should NOT come down this path any more.
* Should be routed to mpt_lan_event_process(), but just in case...
*/
FreeReqFrame = 1;
break;
default:
printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
"reply that I don't know what to do with\n");
/* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
FreeReqFrame = 1;
break;
}
return FreeReqFrame;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
struct net_device *dev = ioc->netdev;
struct mpt_lan_priv *priv;
if (dev == NULL)
return(1);
else
priv = netdev_priv(dev);
dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
if (priv->mpt_rxfidx == NULL)
return (1);
if (reset_phase == MPT_IOC_SETUP_RESET) {
;
} else if (reset_phase == MPT_IOC_PRE_RESET) {
int i;
unsigned long flags;
netif_stop_queue(dev);
dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
atomic_set(&priv->buckets_out, 0);
/* Reset Rx Free Tail index and re-populate the queue. */
spin_lock_irqsave(&priv->rxfidx_lock, flags);
priv->mpt_rxfidx_tail = -1;
for (i = 0; i < priv->max_buckets_out; i++)
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
} else {
mpt_lan_post_receive_buckets(priv);
netif_wake_queue(dev);
}
return 1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
{
dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
switch (le32_to_cpu(pEvReply->Event)) {
case MPI_EVENT_NONE: /* 00 */
case MPI_EVENT_LOG_DATA: /* 01 */
case MPI_EVENT_STATE_CHANGE: /* 02 */
case MPI_EVENT_UNIT_ATTENTION: /* 03 */
case MPI_EVENT_IOC_BUS_RESET: /* 04 */
case MPI_EVENT_EXT_BUS_RESET: /* 05 */
case MPI_EVENT_RESCAN: /* 06 */
/* Ok, do we need to do anything here? As far as
I can tell, this is when a new device gets added
to the loop. */
case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
case MPI_EVENT_LOGOUT: /* 09 */
case MPI_EVENT_EVENT_CHANGE: /* 0A */
default:
break;
}
/*
* NOTE: pEvent->AckRequired handling now done in mptbase.c;
* Do NOT do it here now!
*/
return 1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_open(struct net_device *dev)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
int i;
if (mpt_lan_reset(dev) != 0) {
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
if (mpt_dev->active)
printk ("The ioc is active. Perhaps it needs to be"
" reset?\n");
else
printk ("The ioc in inactive, most likely in the "
"process of being reset. Please try again in "
"a moment.\n");
}
priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
if (priv->mpt_txfidx == NULL)
goto out;
priv->mpt_txfidx_tail = -1;
priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
GFP_KERNEL);
if (priv->SendCtl == NULL)
goto out_mpt_txfidx;
for (i = 0; i < priv->tx_max_out; i++)
priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
GFP_KERNEL);
if (priv->mpt_rxfidx == NULL)
goto out_SendCtl;
priv->mpt_rxfidx_tail = -1;
priv->RcvCtl = kcalloc(priv->max_buckets_out,
sizeof(struct BufferControl),
GFP_KERNEL);
if (priv->RcvCtl == NULL)
goto out_mpt_rxfidx;
for (i = 0; i < priv->max_buckets_out; i++)
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
/**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
/**/ for (i = 0; i < priv->tx_max_out; i++)
/**/ dlprintk((" %xh", priv->mpt_txfidx[i]));
/**/ dlprintk(("\n"));
dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
mpt_lan_post_receive_buckets(priv);
printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
IOC_AND_NETDEV_NAMES_s_s(dev));
if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
" Notifications. This is a bad thing! We're not going "
"to go ahead, but I'd be leery of system stability at "
"this point.\n");
}
netif_start_queue(dev);
dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
return 0;
out_mpt_rxfidx:
kfree(priv->mpt_rxfidx);
priv->mpt_rxfidx = NULL;
out_SendCtl:
kfree(priv->SendCtl);
priv->SendCtl = NULL;
out_mpt_txfidx:
kfree(priv->mpt_txfidx);
priv->mpt_txfidx = NULL;
out: return -ENOMEM;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Send a LanReset message to the FW. This should result in the FW returning
any buckets it still has. */
static int
mpt_lan_reset(struct net_device *dev)
{
MPT_FRAME_HDR *mf;
LANResetRequest_t *pResetReq;
struct mpt_lan_priv *priv = netdev_priv(dev);
mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
if (mf == NULL) {
/* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
"Unable to allocate a request frame.\n"));
*/
return -1;
}
pResetReq = (LANResetRequest_t *) mf;
pResetReq->Function = MPI_FUNCTION_LAN_RESET;
pResetReq->ChainOffset = 0;
pResetReq->Reserved = 0;
pResetReq->PortNumber = priv->pnum;
pResetReq->MsgFlags = 0;
pResetReq->Reserved2 = 0;
mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_close(struct net_device *dev)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
unsigned long timeout;
int i;
dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
mpt_event_deregister(LanCtx);
dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
"since driver was loaded, %d still out\n",
priv->total_posted,atomic_read(&priv->buckets_out)));
netif_stop_queue(dev);
mpt_lan_reset(dev);
timeout = jiffies + 2 * HZ;
while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
schedule_timeout_interruptible(1);
for (i = 0; i < priv->max_buckets_out; i++) {
if (priv->RcvCtl[i].skb != NULL) {
/**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
/**/ "is still out\n", i));
pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
priv->RcvCtl[i].len,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(priv->RcvCtl[i].skb);
}
}
kfree(priv->RcvCtl);
kfree(priv->mpt_rxfidx);
for (i = 0; i < priv->tx_max_out; i++) {
if (priv->SendCtl[i].skb != NULL) {
pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
priv->SendCtl[i].len,
PCI_DMA_TODEVICE);
dev_kfree_skb(priv->SendCtl[i].skb);
}
}
kfree(priv->SendCtl);
kfree(priv->mpt_txfidx);
atomic_set(&priv->buckets_out, 0);
printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
IOC_AND_NETDEV_NAMES_s_s(dev));
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Tx timeout handler. */
static void
mpt_lan_tx_timeout(struct net_device *dev)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
if (mpt_dev->active) {
dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
netif_wake_queue(dev);
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
//static inline int
static int
mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
struct sk_buff *sent;
unsigned long flags;
u32 ctx;
ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
sent = priv->SendCtl[ctx].skb;
dev->stats.tx_packets++;
dev->stats.tx_bytes += sent->len;
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__func__, sent));
priv->SendCtl[ctx].skb = NULL;
pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(sent);
spin_lock_irqsave(&priv->txfidx_lock, flags);
priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
netif_wake_queue(dev);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
struct sk_buff *sent;
unsigned long flags;
int FreeReqFrame = 0;
u32 *pContext;
u32 ctx;
u8 count;
count = pSendRep->NumberOfContexts;
dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
le16_to_cpu(pSendRep->IOCStatus)));
/* Add check for Loginfo Flag in IOCStatus */
switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
case MPI_IOCSTATUS_SUCCESS:
dev->stats.tx_packets += count;
break;
case MPI_IOCSTATUS_LAN_CANCELED:
case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
break;
case MPI_IOCSTATUS_INVALID_SGL:
dev->stats.tx_errors += count;
printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
IOC_AND_NETDEV_NAMES_s_s(dev));
goto out;
default:
dev->stats.tx_errors += count;
break;
}
pContext = &pSendRep->BufferContext;
spin_lock_irqsave(&priv->txfidx_lock, flags);
while (count > 0) {
ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
sent = priv->SendCtl[ctx].skb;
dev->stats.tx_bytes += sent->len;
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__func__, sent));
priv->SendCtl[ctx].skb = NULL;
pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(sent);
priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
pContext++;
count--;
}
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
out:
if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
FreeReqFrame = 1;
netif_wake_queue(dev);
return FreeReqFrame;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
MPT_FRAME_HDR *mf;
LANSendRequest_t *pSendReq;
SGETransaction32_t *pTrans;
SGESimple64_t *pSimple;
const unsigned char *mac;
dma_addr_t dma;
unsigned long flags;
int ctx;
u16 cur_naa = 0x1000;
dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
__func__, skb));
spin_lock_irqsave(&priv->txfidx_lock, flags);
if (priv->mpt_txfidx_tail < 0) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
printk (KERN_ERR "%s: no tx context available: %u\n",
__func__, priv->mpt_txfidx_tail);
return NETDEV_TX_BUSY;
}
mf = mpt_get_msg_frame(LanCtx, mpt_dev);
if (mf == NULL) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
printk (KERN_ERR "%s: Unable to alloc request frame\n",
__func__);
return NETDEV_TX_BUSY;
}
ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
// dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
// IOC_AND_NETDEV_NAMES_s_s(dev)));
pSendReq = (LANSendRequest_t *) mf;
/* Set the mac.raw pointer, since this apparently isn't getting
* done before we get the skb. Pull the data pointer past the mac data.
*/
skb_reset_mac_header(skb);
skb_pull(skb, 12);
dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
PCI_DMA_TODEVICE);
priv->SendCtl[ctx].skb = skb;
priv->SendCtl[ctx].dma = dma;
priv->SendCtl[ctx].len = skb->len;
/* Message Header */
pSendReq->Reserved = 0;
pSendReq->Function = MPI_FUNCTION_LAN_SEND;
pSendReq->ChainOffset = 0;
pSendReq->Reserved2 = 0;
pSendReq->MsgFlags = 0;
pSendReq->PortNumber = priv->pnum;
/* Transaction Context Element */
pTrans = (SGETransaction32_t *) pSendReq->SG_List;
/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
pTrans->ContextSize = sizeof(u32);
pTrans->DetailsLength = 2 * sizeof(u32);
pTrans->Flags = 0;
pTrans->TransactionContext[0] = cpu_to_le32(ctx);
// dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
// IOC_AND_NETDEV_NAMES_s_s(dev),
// ctx, skb, skb->data));
mac = skb_mac_header(skb);
pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
(mac[0] << 8) |
(mac[1] << 0));
pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
(mac[3] << 16) |
(mac[4] << 8) |
(mac[5] << 0));
pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
/* If we ever decide to send more than one Simple SGE per LANSend, then
we will need to make sure that LAST_ELEMENT only gets set on the
last one. Otherwise, bad voodoo and evil funkiness will commence. */
pSimple->FlagsLength = cpu_to_le32(
((MPI_SGE_FLAGS_LAST_ELEMENT |
MPI_SGE_FLAGS_END_OF_BUFFER |
MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_SYSTEM_ADDRESS |
MPI_SGE_FLAGS_HOST_TO_IOC |
MPI_SGE_FLAGS_64_BIT_ADDRESSING |
MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
skb->len);
pSimple->Address.Low = cpu_to_le32((u32) dma);
if (sizeof(dma_addr_t) > sizeof(u32))
pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
else
pSimple->Address.High = 0;
mpt_put_msg_frame (LanCtx, mpt_dev, mf);
dev->trans_start = jiffies;
dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
le32_to_cpu(pSimple->FlagsLength)));
return NETDEV_TX_OK;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static void
mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
/*
* @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
*/
{
struct mpt_lan_priv *priv = netdev_priv(dev);
if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
if (priority) {
schedule_delayed_work(&priv->post_buckets_task, 0);
} else {
schedule_delayed_work(&priv->post_buckets_task, 1);
dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
"timer.\n"));
}
dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
IOC_AND_NETDEV_NAMES_s_s(dev) ));
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
skb->protocol = mpt_lan_type_trans(skb, dev);
dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
"delivered to upper level.\n",
IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
skb->dev = dev;
netif_rx(skb);
dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
atomic_read(&priv->buckets_out)));
if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
mpt_lan_wake_post_buckets_task(dev, 1);
dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
"remaining, %d received back since sod\n",
atomic_read(&priv->buckets_out), priv->total_received));
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
//static inline int
static int
mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
struct sk_buff *skb, *old_skb;
unsigned long flags;
u32 ctx, len;
ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
skb = priv->RcvCtl[ctx].skb;
len = GET_LAN_PACKET_LENGTH(tmsg);
if (len < MPT_LAN_RX_COPYBREAK) {
old_skb = skb;
skb = (struct sk_buff *)dev_alloc_skb(len);
if (!skb) {
printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__FILE__, __LINE__);
return -ENOMEM;
}
pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
goto out;
}
skb_put(skb, len);
priv->RcvCtl[ctx].skb = NULL;
pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
out:
spin_lock_irqsave(&priv->rxfidx_lock, flags);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
atomic_dec(&priv->buckets_out);
priv->total_received++;
return mpt_lan_receive_skb(dev, skb);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_receive_post_free(struct net_device *dev,
LANReceivePostReply_t *pRecvRep)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
unsigned long flags;
struct sk_buff *skb;
u32 ctx;
int count;
int i;
count = pRecvRep->NumberOfContexts;
/**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
"IOC returned %d buckets, freeing them...\n", count));
spin_lock_irqsave(&priv->rxfidx_lock, flags);
for (i = 0; i < count; i++) {
ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
skb = priv->RcvCtl[ctx].skb;
// dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
// IOC_AND_NETDEV_NAMES_s_s(dev)));
// dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
// priv, &(priv->buckets_out)));
// dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
priv->RcvCtl[ctx].skb = NULL;
pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
}
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
atomic_sub(count, &priv->buckets_out);
// for (i = 0; i < priv->max_buckets_out; i++)
// if (priv->RcvCtl[i].skb != NULL)
// dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
// "is still out\n", i));
/* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
count));
*/
/**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
/**/ "remaining, %d received back since sod.\n",
/**/ atomic_read(&priv->buckets_out), priv->total_received));
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_receive_post_reply(struct net_device *dev,
LANReceivePostReply_t *pRecvRep)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
struct sk_buff *skb, *old_skb;
unsigned long flags;
u32 len, ctx, offset;
u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
int count;
int i, l;
dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
le16_to_cpu(pRecvRep->IOCStatus)));
if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
MPI_IOCSTATUS_LAN_CANCELED)
return mpt_lan_receive_post_free(dev, pRecvRep);
len = le32_to_cpu(pRecvRep->PacketLength);
if (len == 0) {
printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
"ReceivePostReply w/ PacketLength zero!\n",
IOC_AND_NETDEV_NAMES_s_s(dev));
printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
return -1;
}
ctx = le32_to_cpu(pRecvRep->BucketContext[0]);
count = pRecvRep->NumberOfContexts;
skb = priv->RcvCtl[ctx].skb;
offset = le32_to_cpu(pRecvRep->PacketOffset);
// if (offset != 0) {
// printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
// "w/ PacketOffset %u\n",
// IOC_AND_NETDEV_NAMES_s_s(dev),
// offset);
// }
dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
offset, len));
if (count > 1) {
int szrem = len;
// dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
// "for single packet, concatenating...\n",
// IOC_AND_NETDEV_NAMES_s_s(dev)));
skb = (struct sk_buff *)dev_alloc_skb(len);
if (!skb) {
printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__FILE__, __LINE__);
return -ENOMEM;
}
spin_lock_irqsave(&priv->rxfidx_lock, flags);
for (i = 0; i < count; i++) {
ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
old_skb = priv->RcvCtl[ctx].skb;
l = priv->RcvCtl[ctx].len;
if (szrem < l)
l = szrem;
// dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
// IOC_AND_NETDEV_NAMES_s_s(dev),
// i, l));
pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
pci_dma_sync_single_for_device(mpt_dev->pcidev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
PCI_DMA_FROMDEVICE);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
szrem -= l;
}
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
} else if (len < MPT_LAN_RX_COPYBREAK) {
old_skb = skb;
skb = (struct sk_buff *)dev_alloc_skb(len);
if (!skb) {
printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__FILE__, __LINE__);
return -ENOMEM;
}
pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
pci_dma_sync_single_for_device(mpt_dev->pcidev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
PCI_DMA_FROMDEVICE);
spin_lock_irqsave(&priv->rxfidx_lock, flags);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
} else {
spin_lock_irqsave(&priv->rxfidx_lock, flags);
priv->RcvCtl[ctx].skb = NULL;
pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
priv->RcvCtl[ctx].dma = 0;
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
skb_put(skb,len);
}
atomic_sub(count, &priv->buckets_out);
priv->total_received += count;
if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
priv->mpt_rxfidx_tail,
MPT_LAN_MAX_BUCKETS_OUT);
return -1;
}
if (remaining == 0)
printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
"(priv->buckets_out = %d)\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
atomic_read(&priv->buckets_out));
else if (remaining < 10)
printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
"(priv->buckets_out = %d)\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
remaining, atomic_read(&priv->buckets_out));
if ((remaining < priv->bucketthresh) &&
((atomic_read(&priv->buckets_out) - remaining) >
MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
printk (KERN_WARNING MYNAM " Mismatch between driver's "
"buckets_out count and fw's BucketsRemaining "
"count has crossed the threshold, issuing a "
"LanReset to clear the fw's hashtable. You may "
"want to check your /var/log/messages for \"CRC "
"error\" event notifications.\n");
mpt_lan_reset(dev);
mpt_lan_wake_post_buckets_task(dev, 0);
}
return mpt_lan_receive_skb(dev, skb);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Simple SGE's only at the moment */
static void
mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
{
struct net_device *dev = priv->dev;
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
MPT_FRAME_HDR *mf;
LANReceivePostRequest_t *pRecvReq;
SGETransaction32_t *pTrans;
SGESimple64_t *pSimple;
struct sk_buff *skb;
dma_addr_t dma;
u32 curr, buckets, count, max;
u32 len = (dev->mtu + dev->hard_header_len + 4);
unsigned long flags;
int i;
curr = atomic_read(&priv->buckets_out);
buckets = (priv->max_buckets_out - curr);
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__func__, buckets, curr));
max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
while (buckets) {
mf = mpt_get_msg_frame(LanCtx, mpt_dev);
if (mf == NULL) {
printk (KERN_ERR "%s: Unable to alloc request frame\n",
__func__);
dioprintk((KERN_ERR "%s: %u buckets remaining\n",
__func__, buckets));
goto out;
}
pRecvReq = (LANReceivePostRequest_t *) mf;
i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
mpt_dev->RequestNB[i] = 0;
count = buckets;
if (count > max)
count = max;
pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE;
pRecvReq->ChainOffset = 0;
pRecvReq->MsgFlags = 0;
pRecvReq->PortNumber = priv->pnum;
pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
pSimple = NULL;
for (i = 0; i < count; i++) {
int ctx;
spin_lock_irqsave(&priv->rxfidx_lock, flags);
if (priv->mpt_rxfidx_tail < 0) {
printk (KERN_ERR "%s: Can't alloc context\n",
__func__);
spin_unlock_irqrestore(&priv->rxfidx_lock,
flags);
break;
}
ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
skb = priv->RcvCtl[ctx].skb;
if (skb && (priv->RcvCtl[ctx].len != len)) {
pci_unmap_single(mpt_dev->pcidev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(priv->RcvCtl[ctx].skb);
skb = priv->RcvCtl[ctx].skb = NULL;
}
if (skb == NULL) {
skb = dev_alloc_skb(len);
if (skb == NULL) {
printk (KERN_WARNING
MYNAM "/%s: Can't alloc skb\n",
__func__);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
break;
}
dma = pci_map_single(mpt_dev->pcidev, skb->data,
len, PCI_DMA_FROMDEVICE);
priv->RcvCtl[ctx].skb = skb;
priv->RcvCtl[ctx].dma = dma;
priv->RcvCtl[ctx].len = len;
}
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
pTrans->ContextSize = sizeof(u32);
pTrans->DetailsLength = 0;
pTrans->Flags = 0;
pTrans->TransactionContext[0] = cpu_to_le32(ctx);
pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
pSimple->FlagsLength = cpu_to_le32(
((MPI_SGE_FLAGS_END_OF_BUFFER |
MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
if (sizeof(dma_addr_t) > sizeof(u32))
pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
else
pSimple->Address.High = 0;
pTrans = (SGETransaction32_t *) (pSimple + 1);
}
if (pSimple == NULL) {
/**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
/**/ __func__);
mpt_free_msg_frame(mpt_dev, mf);
goto out;
}
pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
pRecvReq->BucketCount = cpu_to_le32(i);
/* printk(KERN_INFO MYNAM ": posting buckets\n ");
* for (i = 0; i < j + 2; i ++)
* printk (" %08x", le32_to_cpu(msg[i]));
* printk ("\n");
*/
mpt_put_msg_frame(LanCtx, mpt_dev, mf);
priv->total_posted += i;
buckets -= i;
atomic_add(i, &priv->buckets_out);
}
out:
dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
__func__, buckets, atomic_read(&priv->buckets_out)));
dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
__func__, priv->total_posted, priv->total_received));
clear_bit(0, &priv->post_buckets_active);
}
static void
mpt_lan_post_receive_buckets_work(struct work_struct *work)
{
mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
post_buckets_task.work));
}
static const struct net_device_ops mpt_netdev_ops = {
.ndo_open = mpt_lan_open,
.ndo_stop = mpt_lan_close,
.ndo_start_xmit = mpt_lan_sdu_send,
.ndo_change_mtu = mpt_lan_change_mtu,
.ndo_tx_timeout = mpt_lan_tx_timeout,
};
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static struct net_device *
mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
{
struct net_device *dev;
struct mpt_lan_priv *priv;
u8 HWaddr[FC_ALEN], *a;
dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
if (!dev)
return NULL;
dev->mtu = MPT_LAN_MTU;
priv = netdev_priv(dev);
priv->dev = dev;
priv->mpt_dev = mpt_dev;
priv->pnum = pnum;
INIT_DELAYED_WORK(&priv->post_buckets_task,
mpt_lan_post_receive_buckets_work);
priv->post_buckets_active = 0;
dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
__LINE__, dev->mtu + dev->hard_header_len + 4));
atomic_set(&priv->buckets_out, 0);
priv->total_posted = 0;
priv->total_received = 0;
priv->max_buckets_out = max_buckets_out;
if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
__LINE__,
mpt_dev->pfacts[0].MaxLanBuckets,
max_buckets_out,
priv->max_buckets_out));
priv->bucketthresh = priv->max_buckets_out * 2 / 3;
spin_lock_init(&priv->txfidx_lock);
spin_lock_init(&priv->rxfidx_lock);
/* Grab pre-fetched LANPage1 stuff. :-) */
a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
HWaddr[0] = a[5];
HWaddr[1] = a[4];
HWaddr[2] = a[3];
HWaddr[3] = a[2];
HWaddr[4] = a[1];
HWaddr[5] = a[0];
dev->addr_len = FC_ALEN;
memcpy(dev->dev_addr, HWaddr, FC_ALEN);
memset(dev->broadcast, 0xff, FC_ALEN);
/* The Tx queue is 127 deep on the 909.
* Give ourselves some breathing room.
*/
priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
tx_max_out_p : MPT_TX_MAX_OUT_LIM;
dev->netdev_ops = &mpt_netdev_ops;
dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
dlprintk((KERN_INFO MYNAM ": Finished registering dev "
"and setting initial values\n"));
if (register_netdev(dev) != 0) {
free_netdev(dev);
dev = NULL;
}
return dev;
}
static int
mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct net_device *dev;
int i;
for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
"ProtocolFlags=%02Xh (%c%c%c%c)\n",
ioc->name, ioc->pfacts[i].PortNumber,
ioc->pfacts[i].ProtocolFlags,
MPT_PROTOCOL_FLAGS_c_c_c_c(
ioc->pfacts[i].ProtocolFlags));
if (!(ioc->pfacts[i].ProtocolFlags &
MPI_PORTFACTS_PROTOCOL_LAN)) {
printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
"seems to be disabled on this adapter port!\n",
ioc->name);
continue;
}
dev = mpt_register_lan_device(ioc, i);
if (!dev) {
printk(KERN_ERR MYNAM ": %s: Unable to register "
"port%d as a LAN device\n", ioc->name,
ioc->pfacts[i].PortNumber);
continue;
}
printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
"registered as '%s'\n", ioc->name, dev->name);
printk(KERN_INFO MYNAM ": %s/%s: "
"LanAddr = %pM\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
dev->dev_addr);
ioc->netdev = dev;
return 0;
}
return -ENODEV;
}
static void
mptlan_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct net_device *dev = ioc->netdev;
if(dev != NULL) {
unregister_netdev(dev);
free_netdev(dev);
}
}
static struct mpt_pci_driver mptlan_driver = {
.probe = mptlan_probe,
.remove = mptlan_remove,
};
static int __init mpt_lan_init (void)
{
show_mptmod_ver(LANAME, LANVER);
LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
"lan_reply");
if (LanCtx <= 0) {
printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
return -EBUSY;
}
dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
"handler with mptbase! The world is at an end! "
"Everything is fading to black! Goodbye.\n");
return -EBUSY;
}
dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
return 0;
}
static void __exit mpt_lan_exit(void)
{
mpt_device_driver_deregister(MPTLAN_DRIVER);
mpt_reset_deregister(LanCtx);
if (LanCtx) {
mpt_deregister(LanCtx);
LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
}
}
module_init(mpt_lan_init);
module_exit(mpt_lan_exit);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static unsigned short
mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
struct fcllc *fcllc;
skb_reset_mac_header(skb);
skb_pull(skb, sizeof(struct mpt_lan_ohdr));
if (fch->dtype == htons(0xffff)) {
u32 *p = (u32 *) fch;
swab32s(p + 0);
swab32s(p + 1);
swab32s(p + 2);
swab32s(p + 3);
printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
NETDEV_PTR_TO_IOC_NAME_s(dev));
printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
fch->saddr);
}
if (*fch->daddr & 1) {
if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
skb->pkt_type = PACKET_BROADCAST;
} else {
skb->pkt_type = PACKET_MULTICAST;
}
} else {
if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
skb->pkt_type = PACKET_OTHERHOST;
} else {
skb->pkt_type = PACKET_HOST;
}
}
fcllc = (struct fcllc *)skb->data;
/* Strip the SNAP header from ARP packets since we don't
* pass them through to the 802.2/SNAP layers.
*/
if (fcllc->dsap == EXTENDED_SAP &&
(fcllc->ethertype == htons(ETH_P_IP) ||
fcllc->ethertype == htons(ETH_P_ARP))) {
skb_pull(skb, sizeof(struct fcllc));
return fcllc->ethertype;
}
return htons(ETH_P_802_2);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
| gpl-2.0 |
HridayHS/Lightning | lib/zlib_deflate/deftree.c | 13743 | 40510 | /* +++ trees.c */
/* trees.c -- output deflated data using Huffman coding
* Copyright (C) 1995-1996 Jean-loup Gailly
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/*
* ALGORITHM
*
* The "deflation" process uses several Huffman trees. The more
* common source values are represented by shorter bit sequences.
*
* Each code tree is stored in a compressed form which is itself
* a Huffman encoding of the lengths of all the code strings (in
* ascending order by source values). The actual code strings are
* reconstructed from the lengths in the inflate process, as described
* in the deflate specification.
*
* REFERENCES
*
* Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
* Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
*
* Storer, James A.
* Data Compression: Methods and Theory, pp. 49-50.
* Computer Science Press, 1988. ISBN 0-7167-8156-5.
*
* Sedgewick, R.
* Algorithms, p290.
* Addison-Wesley, 1983. ISBN 0-201-06672-6.
*/
/* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */
/* #include "deflate.h" */
#include <linux/zutil.h>
#include "defutil.h"
#ifdef DEBUG_ZLIB
# include <ctype.h>
#endif
/* ===========================================================================
* Constants
*/
#define MAX_BL_BITS 7
/* Bit length codes must not exceed MAX_BL_BITS bits */
#define END_BLOCK 256
/* end of block literal code */
#define REP_3_6 16
/* repeat previous bit length 3-6 times (2 bits of repeat count) */
#define REPZ_3_10 17
/* repeat a zero length 3-10 times (3 bits of repeat count) */
#define REPZ_11_138 18
/* repeat a zero length 11-138 times (7 bits of repeat count) */
static const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
= {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
static const int extra_dbits[D_CODES] /* extra bits for each distance code */
= {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
static const int extra_blbits[BL_CODES]/* extra bits for each bit length code */
= {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
static const uch bl_order[BL_CODES]
= {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
/* The lengths of the bit length codes are sent in order of decreasing
* probability, to avoid transmitting the lengths for unused bit length codes.
*/
#define Buf_size (8 * 2*sizeof(char))
/* Number of bits used within bi_buf. (bi_buf might be implemented on
* more than 16 bits on some systems.)
*/
/* ===========================================================================
* Local data. These are initialized only once.
*/
static ct_data static_ltree[L_CODES+2];
/* The static literal tree. Since the bit lengths are imposed, there is no
* need for the L_CODES extra codes used during heap construction. However
* The codes 286 and 287 are needed to build a canonical tree (see zlib_tr_init
* below).
*/
static ct_data static_dtree[D_CODES];
/* The static distance tree. (Actually a trivial tree since all codes use
* 5 bits.)
*/
static uch dist_code[512];
/* distance codes. The first 256 values correspond to the distances
* 3 .. 258, the last 256 values correspond to the top 8 bits of
* the 15 bit distances.
*/
static uch length_code[MAX_MATCH-MIN_MATCH+1];
/* length code for each normalized match length (0 == MIN_MATCH) */
static int base_length[LENGTH_CODES];
/* First normalized length for each code (0 = MIN_MATCH) */
static int base_dist[D_CODES];
/* First normalized distance for each code (0 = distance of 1) */
struct static_tree_desc_s {
const ct_data *static_tree; /* static tree or NULL */
const int *extra_bits; /* extra bits for each code or NULL */
int extra_base; /* base index for extra_bits */
int elems; /* max number of elements in the tree */
int max_length; /* max bit length for the codes */
};
static static_tree_desc static_l_desc =
{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
static static_tree_desc static_d_desc =
{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
static static_tree_desc static_bl_desc =
{(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
/* ===========================================================================
* Local (static) routines in this file.
*/
static void tr_static_init (void);
static void init_block (deflate_state *s);
static void pqdownheap (deflate_state *s, ct_data *tree, int k);
static void gen_bitlen (deflate_state *s, tree_desc *desc);
static void gen_codes (ct_data *tree, int max_code, ush *bl_count);
static void build_tree (deflate_state *s, tree_desc *desc);
static void scan_tree (deflate_state *s, ct_data *tree, int max_code);
static void send_tree (deflate_state *s, ct_data *tree, int max_code);
static int build_bl_tree (deflate_state *s);
static void send_all_trees (deflate_state *s, int lcodes, int dcodes,
int blcodes);
static void compress_block (deflate_state *s, ct_data *ltree,
ct_data *dtree);
static void set_data_type (deflate_state *s);
static unsigned bi_reverse (unsigned value, int length);
static void bi_windup (deflate_state *s);
static void bi_flush (deflate_state *s);
static void copy_block (deflate_state *s, char *buf, unsigned len,
int header);
#ifndef DEBUG_ZLIB
# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
/* Send a code of the given tree. c and tree must not have side effects */
#else /* DEBUG_ZLIB */
# define send_code(s, c, tree) \
{ if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
send_bits(s, tree[c].Code, tree[c].Len); }
#endif
#define d_code(dist) \
((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)])
/* Mapping from a distance to a distance code. dist is the distance - 1 and
* must not have side effects. dist_code[256] and dist_code[257] are never
* used.
*/
/* ===========================================================================
* Send a value on a given number of bits.
* IN assertion: length <= 16 and value fits in length bits.
*/
#ifdef DEBUG_ZLIB
static void send_bits (deflate_state *s, int value, int length);
static void send_bits(
deflate_state *s,
int value, /* value to send */
int length /* number of bits */
)
{
Tracevv((stderr," l %2d v %4x ", length, value));
Assert(length > 0 && length <= 15, "invalid length");
s->bits_sent += (ulg)length;
/* If not enough room in bi_buf, use (valid) bits from bi_buf and
* (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
* unused bits in value.
*/
if (s->bi_valid > (int)Buf_size - length) {
s->bi_buf |= (value << s->bi_valid);
put_short(s, s->bi_buf);
s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
s->bi_valid += length - Buf_size;
} else {
s->bi_buf |= value << s->bi_valid;
s->bi_valid += length;
}
}
#else /* !DEBUG_ZLIB */
#define send_bits(s, value, length) \
{ int len = length;\
if (s->bi_valid > (int)Buf_size - len) {\
int val = value;\
s->bi_buf |= (val << s->bi_valid);\
put_short(s, s->bi_buf);\
s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
s->bi_valid += len - Buf_size;\
} else {\
s->bi_buf |= (value) << s->bi_valid;\
s->bi_valid += len;\
}\
}
#endif /* DEBUG_ZLIB */
/* ===========================================================================
* Initialize the various 'constant' tables. In a multi-threaded environment,
* this function may be called by two threads concurrently, but this is
* harmless since both invocations do exactly the same thing.
*/
static void tr_static_init(void)
{
static int static_init_done;
int n; /* iterates over tree elements */
int bits; /* bit counter */
int length; /* length value */
int code; /* code value */
int dist; /* distance index */
ush bl_count[MAX_BITS+1];
/* number of codes at each bit length for an optimal tree */
if (static_init_done) return;
/* Initialize the mapping length (0..255) -> length code (0..28) */
length = 0;
for (code = 0; code < LENGTH_CODES-1; code++) {
base_length[code] = length;
for (n = 0; n < (1<<extra_lbits[code]); n++) {
length_code[length++] = (uch)code;
}
}
Assert (length == 256, "tr_static_init: length != 256");
/* Note that the length 255 (match length 258) can be represented
* in two different ways: code 284 + 5 bits or code 285, so we
* overwrite length_code[255] to use the best encoding:
*/
length_code[length-1] = (uch)code;
/* Initialize the mapping dist (0..32K) -> dist code (0..29) */
dist = 0;
for (code = 0 ; code < 16; code++) {
base_dist[code] = dist;
for (n = 0; n < (1<<extra_dbits[code]); n++) {
dist_code[dist++] = (uch)code;
}
}
Assert (dist == 256, "tr_static_init: dist != 256");
dist >>= 7; /* from now on, all distances are divided by 128 */
for ( ; code < D_CODES; code++) {
base_dist[code] = dist << 7;
for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
dist_code[256 + dist++] = (uch)code;
}
}
Assert (dist == 256, "tr_static_init: 256+dist != 512");
/* Construct the codes of the static literal tree */
for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
n = 0;
while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
/* Codes 286 and 287 do not exist, but we must include them in the
* tree construction to get a canonical Huffman tree (longest code
* all ones)
*/
gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
/* The static distance tree is trivial: */
for (n = 0; n < D_CODES; n++) {
static_dtree[n].Len = 5;
static_dtree[n].Code = bi_reverse((unsigned)n, 5);
}
static_init_done = 1;
}
/* ===========================================================================
* Initialize the tree data structures for a new zlib stream.
*/
void zlib_tr_init(
deflate_state *s
)
{
tr_static_init();
s->compressed_len = 0L;
s->l_desc.dyn_tree = s->dyn_ltree;
s->l_desc.stat_desc = &static_l_desc;
s->d_desc.dyn_tree = s->dyn_dtree;
s->d_desc.stat_desc = &static_d_desc;
s->bl_desc.dyn_tree = s->bl_tree;
s->bl_desc.stat_desc = &static_bl_desc;
s->bi_buf = 0;
s->bi_valid = 0;
s->last_eob_len = 8; /* enough lookahead for inflate */
#ifdef DEBUG_ZLIB
s->bits_sent = 0L;
#endif
/* Initialize the first block of the first file: */
init_block(s);
}
/* ===========================================================================
* Initialize a new block.
*/
static void init_block(
deflate_state *s
)
{
int n; /* iterates over tree elements */
/* Initialize the trees. */
for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
s->dyn_ltree[END_BLOCK].Freq = 1;
s->opt_len = s->static_len = 0L;
s->last_lit = s->matches = 0;
}
#define SMALLEST 1
/* Index within the heap array of least frequent node in the Huffman tree */
/* ===========================================================================
* Remove the smallest element from the heap and recreate the heap with
* one less element. Updates heap and heap_len.
*/
#define pqremove(s, tree, top) \
{\
top = s->heap[SMALLEST]; \
s->heap[SMALLEST] = s->heap[s->heap_len--]; \
pqdownheap(s, tree, SMALLEST); \
}
/* ===========================================================================
* Compares to subtrees, using the tree depth as tie breaker when
* the subtrees have equal frequency. This minimizes the worst case length.
*/
#define smaller(tree, n, m, depth) \
(tree[n].Freq < tree[m].Freq || \
(tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
/* ===========================================================================
* Restore the heap property by moving down the tree starting at node k,
* exchanging a node with the smallest of its two sons if necessary, stopping
* when the heap property is re-established (each father smaller than its
* two sons).
*/
static void pqdownheap(
deflate_state *s,
ct_data *tree, /* the tree to restore */
int k /* node to move down */
)
{
int v = s->heap[k];
int j = k << 1; /* left son of k */
while (j <= s->heap_len) {
/* Set j to the smallest of the two sons: */
if (j < s->heap_len &&
smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
j++;
}
/* Exit if v is smaller than both sons */
if (smaller(tree, v, s->heap[j], s->depth)) break;
/* Exchange v with the smallest son */
s->heap[k] = s->heap[j]; k = j;
/* And continue down the tree, setting j to the left son of k */
j <<= 1;
}
s->heap[k] = v;
}
/* ===========================================================================
* Compute the optimal bit lengths for a tree and update the total bit length
* for the current block.
* IN assertion: the fields freq and dad are set, heap[heap_max] and
* above are the tree nodes sorted by increasing frequency.
* OUT assertions: the field len is set to the optimal bit length, the
* array bl_count contains the frequencies for each bit length.
* The length opt_len is updated; static_len is also updated if stree is
* not null.
*/
static void gen_bitlen(
deflate_state *s,
tree_desc *desc /* the tree descriptor */
)
{
ct_data *tree = desc->dyn_tree;
int max_code = desc->max_code;
const ct_data *stree = desc->stat_desc->static_tree;
const int *extra = desc->stat_desc->extra_bits;
int base = desc->stat_desc->extra_base;
int max_length = desc->stat_desc->max_length;
int h; /* heap index */
int n, m; /* iterate over the tree elements */
int bits; /* bit length */
int xbits; /* extra bits */
ush f; /* frequency */
int overflow = 0; /* number of elements with bit length too large */
for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
/* In a first pass, compute the optimal bit lengths (which may
* overflow in the case of the bit length tree).
*/
tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
n = s->heap[h];
bits = tree[tree[n].Dad].Len + 1;
if (bits > max_length) bits = max_length, overflow++;
tree[n].Len = (ush)bits;
/* We overwrite tree[n].Dad which is no longer needed */
if (n > max_code) continue; /* not a leaf node */
s->bl_count[bits]++;
xbits = 0;
if (n >= base) xbits = extra[n-base];
f = tree[n].Freq;
s->opt_len += (ulg)f * (bits + xbits);
if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
}
if (overflow == 0) return;
Trace((stderr,"\nbit length overflow\n"));
/* This happens for example on obj2 and pic of the Calgary corpus */
/* Find the first bit length which could increase: */
do {
bits = max_length-1;
while (s->bl_count[bits] == 0) bits--;
s->bl_count[bits]--; /* move one leaf down the tree */
s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
s->bl_count[max_length]--;
/* The brother of the overflow item also moves one step up,
* but this does not affect bl_count[max_length]
*/
overflow -= 2;
} while (overflow > 0);
/* Now recompute all bit lengths, scanning in increasing frequency.
* h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
* lengths instead of fixing only the wrong ones. This idea is taken
* from 'ar' written by Haruhiko Okumura.)
*/
for (bits = max_length; bits != 0; bits--) {
n = s->bl_count[bits];
while (n != 0) {
m = s->heap[--h];
if (m > max_code) continue;
if (tree[m].Len != (unsigned) bits) {
Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
s->opt_len += ((long)bits - (long)tree[m].Len)
*(long)tree[m].Freq;
tree[m].Len = (ush)bits;
}
n--;
}
}
}
/* ===========================================================================
* Generate the codes for a given tree and bit counts (which need not be
* optimal).
* IN assertion: the array bl_count contains the bit length statistics for
* the given tree and the field len is set for all tree elements.
* OUT assertion: the field code is set for all tree elements of non
* zero code length.
*/
static void gen_codes(
ct_data *tree, /* the tree to decorate */
int max_code, /* largest code with non zero frequency */
ush *bl_count /* number of codes at each bit length */
)
{
ush next_code[MAX_BITS+1]; /* next code value for each bit length */
ush code = 0; /* running code value */
int bits; /* bit index */
int n; /* code index */
/* The distribution counts are first used to generate the code values
* without bit reversal.
*/
for (bits = 1; bits <= MAX_BITS; bits++) {
next_code[bits] = code = (code + bl_count[bits-1]) << 1;
}
/* Check that the bit counts in bl_count are consistent. The last code
* must be all ones.
*/
Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
"inconsistent bit counts");
Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
for (n = 0; n <= max_code; n++) {
int len = tree[n].Len;
if (len == 0) continue;
/* Now reverse the bits */
tree[n].Code = bi_reverse(next_code[len]++, len);
Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
}
}
/* ===========================================================================
* Construct one Huffman tree and assigns the code bit strings and lengths.
* Update the total bit length for the current block.
* IN assertion: the field freq is set for all tree elements.
* OUT assertions: the fields len and code are set to the optimal bit length
* and corresponding code. The length opt_len is updated; static_len is
* also updated if stree is not null. The field max_code is set.
*/
static void build_tree(
deflate_state *s,
tree_desc *desc /* the tree descriptor */
)
{
ct_data *tree = desc->dyn_tree;
const ct_data *stree = desc->stat_desc->static_tree;
int elems = desc->stat_desc->elems;
int n, m; /* iterate over heap elements */
int max_code = -1; /* largest code with non zero frequency */
int node; /* new node being created */
/* Construct the initial heap, with least frequent element in
* heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
* heap[0] is not used.
*/
s->heap_len = 0, s->heap_max = HEAP_SIZE;
for (n = 0; n < elems; n++) {
if (tree[n].Freq != 0) {
s->heap[++(s->heap_len)] = max_code = n;
s->depth[n] = 0;
} else {
tree[n].Len = 0;
}
}
/* The pkzip format requires that at least one distance code exists,
* and that at least one bit should be sent even if there is only one
* possible code. So to avoid special checks later on we force at least
* two codes of non zero frequency.
*/
while (s->heap_len < 2) {
node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
tree[node].Freq = 1;
s->depth[node] = 0;
s->opt_len--; if (stree) s->static_len -= stree[node].Len;
/* node is 0 or 1 so it does not have extra bits */
}
desc->max_code = max_code;
/* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
* establish sub-heaps of increasing lengths:
*/
for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
/* Construct the Huffman tree by repeatedly combining the least two
* frequent nodes.
*/
node = elems; /* next internal node of the tree */
do {
pqremove(s, tree, n); /* n = node of least frequency */
m = s->heap[SMALLEST]; /* m = node of next least frequency */
s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
s->heap[--(s->heap_max)] = m;
/* Create a new node father of n and m */
tree[node].Freq = tree[n].Freq + tree[m].Freq;
s->depth[node] = (uch) (max(s->depth[n], s->depth[m]) + 1);
tree[n].Dad = tree[m].Dad = (ush)node;
#ifdef DUMP_BL_TREE
if (tree == s->bl_tree) {
fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
}
#endif
/* and insert the new node in the heap */
s->heap[SMALLEST] = node++;
pqdownheap(s, tree, SMALLEST);
} while (s->heap_len >= 2);
s->heap[--(s->heap_max)] = s->heap[SMALLEST];
/* At this point, the fields freq and dad are set. We can now
* generate the bit lengths.
*/
gen_bitlen(s, (tree_desc *)desc);
/* The field len is now set, we can generate the bit codes */
gen_codes ((ct_data *)tree, max_code, s->bl_count);
}
/* ===========================================================================
* Scan a literal or distance tree to determine the frequencies of the codes
* in the bit length tree.
*/
static void scan_tree(
deflate_state *s,
ct_data *tree, /* the tree to be scanned */
int max_code /* and its largest code of non zero frequency */
)
{
int n; /* iterates over all tree elements */
int prevlen = -1; /* last emitted length */
int curlen; /* length of current code */
int nextlen = tree[0].Len; /* length of next code */
int count = 0; /* repeat count of the current code */
int max_count = 7; /* max repeat count */
int min_count = 4; /* min repeat count */
if (nextlen == 0) max_count = 138, min_count = 3;
tree[max_code+1].Len = (ush)0xffff; /* guard */
for (n = 0; n <= max_code; n++) {
curlen = nextlen; nextlen = tree[n+1].Len;
if (++count < max_count && curlen == nextlen) {
continue;
} else if (count < min_count) {
s->bl_tree[curlen].Freq += count;
} else if (curlen != 0) {
if (curlen != prevlen) s->bl_tree[curlen].Freq++;
s->bl_tree[REP_3_6].Freq++;
} else if (count <= 10) {
s->bl_tree[REPZ_3_10].Freq++;
} else {
s->bl_tree[REPZ_11_138].Freq++;
}
count = 0; prevlen = curlen;
if (nextlen == 0) {
max_count = 138, min_count = 3;
} else if (curlen == nextlen) {
max_count = 6, min_count = 3;
} else {
max_count = 7, min_count = 4;
}
}
}
/* ===========================================================================
* Send a literal or distance tree in compressed form, using the codes in
* bl_tree.
*/
static void send_tree(
deflate_state *s,
ct_data *tree, /* the tree to be scanned */
int max_code /* and its largest code of non zero frequency */
)
{
int n; /* iterates over all tree elements */
int prevlen = -1; /* last emitted length */
int curlen; /* length of current code */
int nextlen = tree[0].Len; /* length of next code */
int count = 0; /* repeat count of the current code */
int max_count = 7; /* max repeat count */
int min_count = 4; /* min repeat count */
/* tree[max_code+1].Len = -1; */ /* guard already set */
if (nextlen == 0) max_count = 138, min_count = 3;
for (n = 0; n <= max_code; n++) {
curlen = nextlen; nextlen = tree[n+1].Len;
if (++count < max_count && curlen == nextlen) {
continue;
} else if (count < min_count) {
do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
} else if (curlen != 0) {
if (curlen != prevlen) {
send_code(s, curlen, s->bl_tree); count--;
}
Assert(count >= 3 && count <= 6, " 3_6?");
send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
} else if (count <= 10) {
send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
} else {
send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
}
count = 0; prevlen = curlen;
if (nextlen == 0) {
max_count = 138, min_count = 3;
} else if (curlen == nextlen) {
max_count = 6, min_count = 3;
} else {
max_count = 7, min_count = 4;
}
}
}
/* ===========================================================================
* Construct the Huffman tree for the bit lengths and return the index in
* bl_order of the last bit length code to send.
*/
static int build_bl_tree(
deflate_state *s
)
{
int max_blindex; /* index of last bit length code of non zero freq */
/* Determine the bit length frequencies for literal and distance trees */
scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
/* Build the bit length tree: */
build_tree(s, (tree_desc *)(&(s->bl_desc)));
/* opt_len now includes the length of the tree representations, except
* the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
*/
/* Determine the number of bit length codes to send. The pkzip format
* requires that at least 4 bit length codes be sent. (appnote.txt says
* 3 but the actual value used is 4.)
*/
for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
}
/* Update opt_len to include the bit length tree and counts */
s->opt_len += 3*(max_blindex+1) + 5+5+4;
Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
s->opt_len, s->static_len));
return max_blindex;
}
/* ===========================================================================
* Send the header for a block using dynamic Huffman trees: the counts, the
* lengths of the bit length codes, the literal tree and the distance tree.
* IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
*/
static void send_all_trees(
deflate_state *s,
int lcodes, /* number of codes for each tree */
int dcodes, /* number of codes for each tree */
int blcodes /* number of codes for each tree */
)
{
int rank; /* index in bl_order */
Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
"too many codes");
Tracev((stderr, "\nbl counts: "));
send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
send_bits(s, dcodes-1, 5);
send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
for (rank = 0; rank < blcodes; rank++) {
Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
}
Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
}
/* ===========================================================================
* Send a stored block
*/
void zlib_tr_stored_block(
deflate_state *s,
char *buf, /* input block */
ulg stored_len, /* length of input block */
int eof /* true if this is the last block for a file */
)
{
send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */
s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
s->compressed_len += (stored_len + 4) << 3;
copy_block(s, buf, (unsigned)stored_len, 1); /* with header */
}
/* Send just the `stored block' type code without any length bytes or data.
*/
void zlib_tr_stored_type_only(
deflate_state *s
)
{
send_bits(s, (STORED_BLOCK << 1), 3);
bi_windup(s);
s->compressed_len = (s->compressed_len + 3) & ~7L;
}
/* ===========================================================================
* Send one empty static block to give enough lookahead for inflate.
* This takes 10 bits, of which 7 may remain in the bit buffer.
* The current inflate code requires 9 bits of lookahead. If the
* last two codes for the previous block (real code plus EOB) were coded
* on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
* the last real code. In this case we send two empty static blocks instead
* of one. (There are no problems if the previous block is stored or fixed.)
* To simplify the code, we assume the worst case of last real code encoded
* on one bit only.
*/
void zlib_tr_align(
deflate_state *s
)
{
send_bits(s, STATIC_TREES<<1, 3);
send_code(s, END_BLOCK, static_ltree);
s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
bi_flush(s);
/* Of the 10 bits for the empty block, we have already sent
* (10 - bi_valid) bits. The lookahead for the last real code (before
* the EOB of the previous block) was thus at least one plus the length
* of the EOB plus what we have just sent of the empty static block.
*/
if (1 + s->last_eob_len + 10 - s->bi_valid < 9) {
send_bits(s, STATIC_TREES<<1, 3);
send_code(s, END_BLOCK, static_ltree);
s->compressed_len += 10L;
bi_flush(s);
}
s->last_eob_len = 7;
}
/* ===========================================================================
* Determine the best encoding for the current block: dynamic trees, static
* trees or store, and output the encoded block to the zip file. This function
* returns the total compressed length for the file so far.
*/
ulg zlib_tr_flush_block(
deflate_state *s,
char *buf, /* input block, or NULL if too old */
ulg stored_len, /* length of input block */
int eof /* true if this is the last block for a file */
)
{
ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
int max_blindex = 0; /* index of last bit length code of non zero freq */
/* Build the Huffman trees unless a stored block is forced */
if (s->level > 0) {
/* Check if the file is ascii or binary */
if (s->data_type == Z_UNKNOWN) set_data_type(s);
/* Construct the literal and distance trees */
build_tree(s, (tree_desc *)(&(s->l_desc)));
Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
s->static_len));
build_tree(s, (tree_desc *)(&(s->d_desc)));
Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
s->static_len));
/* At this point, opt_len and static_len are the total bit lengths of
* the compressed block data, excluding the tree representations.
*/
/* Build the bit length tree for the above two trees, and get the index
* in bl_order of the last bit length code to send.
*/
max_blindex = build_bl_tree(s);
/* Determine the best encoding. Compute first the block length in bytes*/
opt_lenb = (s->opt_len+3+7)>>3;
static_lenb = (s->static_len+3+7)>>3;
Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
s->last_lit));
if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
} else {
Assert(buf != (char*)0, "lost buf");
opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
}
/* If compression failed and this is the first and last block,
* and if the .zip file can be seeked (to rewrite the local header),
* the whole file is transformed into a stored file:
*/
#ifdef STORED_FILE_OK
# ifdef FORCE_STORED_FILE
if (eof && s->compressed_len == 0L) { /* force stored file */
# else
if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) {
# endif
/* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */
if (buf == (char*)0) error ("block vanished");
copy_block(s, buf, (unsigned)stored_len, 0); /* without header */
s->compressed_len = stored_len << 3;
s->method = STORED;
} else
#endif /* STORED_FILE_OK */
#ifdef FORCE_STORED
if (buf != (char*)0) { /* force stored block */
#else
if (stored_len+4 <= opt_lenb && buf != (char*)0) {
/* 4: two words for the lengths */
#endif
/* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
* Otherwise we can't have processed more than WSIZE input bytes since
* the last block flush, because compression would have been
* successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
* transform a block into a stored block.
*/
zlib_tr_stored_block(s, buf, stored_len, eof);
#ifdef FORCE_STATIC
} else if (static_lenb >= 0) { /* force static trees */
#else
} else if (static_lenb == opt_lenb) {
#endif
send_bits(s, (STATIC_TREES<<1)+eof, 3);
compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
s->compressed_len += 3 + s->static_len;
} else {
send_bits(s, (DYN_TREES<<1)+eof, 3);
send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
max_blindex+1);
compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
s->compressed_len += 3 + s->opt_len;
}
Assert (s->compressed_len == s->bits_sent, "bad compressed size");
init_block(s);
if (eof) {
bi_windup(s);
s->compressed_len += 7; /* align on byte boundary */
}
Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
s->compressed_len-7*eof));
return s->compressed_len >> 3;
}
/* ===========================================================================
* Save the match info and tally the frequency counts. Return true if
* the current block must be flushed.
*/
int zlib_tr_tally(
deflate_state *s,
unsigned dist, /* distance of matched string */
unsigned lc /* match length-MIN_MATCH or unmatched char (if dist==0) */
)
{
s->d_buf[s->last_lit] = (ush)dist;
s->l_buf[s->last_lit++] = (uch)lc;
if (dist == 0) {
/* lc is the unmatched char */
s->dyn_ltree[lc].Freq++;
} else {
s->matches++;
/* Here, lc is the match length - MIN_MATCH */
dist--; /* dist = match distance - 1 */
Assert((ush)dist < (ush)MAX_DIST(s) &&
(ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
(ush)d_code(dist) < (ush)D_CODES, "zlib_tr_tally: bad match");
s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++;
s->dyn_dtree[d_code(dist)].Freq++;
}
/* Try to guess if it is profitable to stop the current block here */
if ((s->last_lit & 0xfff) == 0 && s->level > 2) {
/* Compute an upper bound for the compressed length */
ulg out_length = (ulg)s->last_lit*8L;
ulg in_length = (ulg)((long)s->strstart - s->block_start);
int dcode;
for (dcode = 0; dcode < D_CODES; dcode++) {
out_length += (ulg)s->dyn_dtree[dcode].Freq *
(5L+extra_dbits[dcode]);
}
out_length >>= 3;
Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
s->last_lit, in_length, out_length,
100L - out_length*100L/in_length));
if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
}
return (s->last_lit == s->lit_bufsize-1);
/* We avoid equality with lit_bufsize because of wraparound at 64K
* on 16 bit machines and because stored blocks are restricted to
* 64K-1 bytes.
*/
}
/* ===========================================================================
* Send the block data compressed using the given Huffman trees
*/
static void compress_block(
deflate_state *s,
ct_data *ltree, /* literal tree */
ct_data *dtree /* distance tree */
)
{
unsigned dist; /* distance of matched string */
int lc; /* match length or unmatched char (if dist == 0) */
unsigned lx = 0; /* running index in l_buf */
unsigned code; /* the code to send */
int extra; /* number of extra bits to send */
if (s->last_lit != 0) do {
dist = s->d_buf[lx];
lc = s->l_buf[lx++];
if (dist == 0) {
send_code(s, lc, ltree); /* send a literal byte */
Tracecv(isgraph(lc), (stderr," '%c' ", lc));
} else {
/* Here, lc is the match length - MIN_MATCH */
code = length_code[lc];
send_code(s, code+LITERALS+1, ltree); /* send the length code */
extra = extra_lbits[code];
if (extra != 0) {
lc -= base_length[code];
send_bits(s, lc, extra); /* send the extra length bits */
}
dist--; /* dist is now the match distance - 1 */
code = d_code(dist);
Assert (code < D_CODES, "bad d_code");
send_code(s, code, dtree); /* send the distance code */
extra = extra_dbits[code];
if (extra != 0) {
dist -= base_dist[code];
send_bits(s, dist, extra); /* send the extra distance bits */
}
} /* literal or match pair ? */
/* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow");
} while (lx < s->last_lit);
send_code(s, END_BLOCK, ltree);
s->last_eob_len = ltree[END_BLOCK].Len;
}
/* ===========================================================================
* Set the data type to ASCII or BINARY, using a crude approximation:
* binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
* IN assertion: the fields freq of dyn_ltree are set and the total of all
* frequencies does not exceed 64K (to fit in an int on 16 bit machines).
*/
static void set_data_type(
deflate_state *s
)
{
int n = 0;
unsigned ascii_freq = 0;
unsigned bin_freq = 0;
while (n < 7) bin_freq += s->dyn_ltree[n++].Freq;
while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq;
while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq;
s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
}
/* ===========================================================================
* Copy a stored block, storing first the length and its
* one's complement if requested.
*/
static void copy_block(
deflate_state *s,
char *buf, /* the input data */
unsigned len, /* its length */
int header /* true if block header must be written */
)
{
bi_windup(s); /* align on byte boundary */
s->last_eob_len = 8; /* enough lookahead for inflate */
if (header) {
put_short(s, (ush)len);
put_short(s, (ush)~len);
#ifdef DEBUG_ZLIB
s->bits_sent += 2*16;
#endif
}
#ifdef DEBUG_ZLIB
s->bits_sent += (ulg)len<<3;
#endif
/* bundle up the put_byte(s, *buf++) calls */
memcpy(&s->pending_buf[s->pending], buf, len);
s->pending += len;
}
| gpl-2.0 |
patholden/linux | drivers/tty/serial/omap-serial.c | 176 | 49943 | /*
* Driver for OMAP-UART controller.
* Based on drivers/serial/8250.c
*
* Copyright (C) 2010 Texas Instruments.
*
* Authors:
* Govindraj R <govindraj.raja@ti.com>
* Thara Gopinath <thara@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Note: This driver is made separate from 8250 driver as we cannot
* over load 8250 driver with omap platform specific configuration for
* features like DMA, it makes easier to implement features like DMA and
* hardware flow control and software flow control configuration with
* this driver as required for the omap-platform.
*/
#if defined(CONFIG_SERIAL_OMAP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/serial_reg.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/serial_core.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/platform_data/serial-omap.h>
#include <dt-bindings/gpio/gpio.h>
#define OMAP_MAX_HSUART_PORTS 10
#define UART_BUILD_REVISION(x, y) (((x) << 8) | (y))
#define OMAP_UART_REV_42 0x0402
#define OMAP_UART_REV_46 0x0406
#define OMAP_UART_REV_52 0x0502
#define OMAP_UART_REV_63 0x0603
#define OMAP_UART_TX_WAKEUP_EN BIT(7)
/* Feature flags */
#define OMAP_UART_WER_HAS_TX_WAKEUP BIT(0)
#define UART_ERRATA_i202_MDR1_ACCESS BIT(0)
#define UART_ERRATA_i291_DMA_FORCEIDLE BIT(1)
#define DEFAULT_CLK_SPEED 48000000 /* 48Mhz */
/* SCR register bitmasks */
#define OMAP_UART_SCR_RX_TRIG_GRANU1_MASK (1 << 7)
#define OMAP_UART_SCR_TX_TRIG_GRANU1_MASK (1 << 6)
#define OMAP_UART_SCR_TX_EMPTY (1 << 3)
/* FCR register bitmasks */
#define OMAP_UART_FCR_RX_FIFO_TRIG_MASK (0x3 << 6)
#define OMAP_UART_FCR_TX_FIFO_TRIG_MASK (0x3 << 4)
/* MVR register bitmasks */
#define OMAP_UART_MVR_SCHEME_SHIFT 30
#define OMAP_UART_LEGACY_MVR_MAJ_MASK 0xf0
#define OMAP_UART_LEGACY_MVR_MAJ_SHIFT 4
#define OMAP_UART_LEGACY_MVR_MIN_MASK 0x0f
#define OMAP_UART_MVR_MAJ_MASK 0x700
#define OMAP_UART_MVR_MAJ_SHIFT 8
#define OMAP_UART_MVR_MIN_MASK 0x3f
#define OMAP_UART_DMA_CH_FREE -1
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
#define OMAP_MODE13X_SPEED 230400
/* WER = 0x7F
* Enable module level wakeup in WER reg
*/
#define OMAP_UART_WER_MOD_WKUP 0x7F
/* Enable XON/XOFF flow control on output */
#define OMAP_UART_SW_TX 0x08
/* Enable XON/XOFF flow control on input */
#define OMAP_UART_SW_RX 0x02
#define OMAP_UART_SW_CLR 0xF0
#define OMAP_UART_TCR_TRIG 0x0F
struct uart_omap_dma {
u8 uart_dma_tx;
u8 uart_dma_rx;
int rx_dma_channel;
int tx_dma_channel;
dma_addr_t rx_buf_dma_phys;
dma_addr_t tx_buf_dma_phys;
unsigned int uart_base;
/*
* Buffer for rx dma. It is not required for tx because the buffer
* comes from port structure.
*/
unsigned char *rx_buf;
unsigned int prev_rx_dma_pos;
int tx_buf_size;
int tx_dma_used;
int rx_dma_used;
spinlock_t tx_lock;
spinlock_t rx_lock;
/* timer to poll activity on rx dma */
struct timer_list rx_timer;
unsigned int rx_buf_size;
unsigned int rx_poll_rate;
unsigned int rx_timeout;
};
struct uart_omap_port {
struct uart_port port;
struct uart_omap_dma uart_dma;
struct device *dev;
int wakeirq;
unsigned char ier;
unsigned char lcr;
unsigned char mcr;
unsigned char fcr;
unsigned char efr;
unsigned char dll;
unsigned char dlh;
unsigned char mdr1;
unsigned char scr;
unsigned char wer;
int use_dma;
/*
* Some bits in registers are cleared on a read, so they must
* be saved whenever the register is read, but the bits will not
* be immediately processed.
*/
unsigned int lsr_break_flag;
unsigned char msr_saved_flags;
char name[20];
unsigned long port_activity;
int context_loss_cnt;
u32 errata;
u8 wakeups_enabled;
u32 features;
int rts_gpio;
struct pm_qos_request pm_qos_request;
u32 latency;
u32 calc_latency;
struct work_struct qos_work;
bool is_suspending;
};
#define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
/* Forward declaration of functions */
static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1);
static inline unsigned int serial_in(struct uart_omap_port *up, int offset)
{
offset <<= up->port.regshift;
return readw(up->port.membase + offset);
}
static inline void serial_out(struct uart_omap_port *up, int offset, int value)
{
offset <<= up->port.regshift;
writew(value, up->port.membase + offset);
}
static inline void serial_omap_clear_fifos(struct uart_omap_port *up)
{
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial_out(up, UART_FCR, 0);
}
static int serial_omap_get_context_loss_count(struct uart_omap_port *up)
{
struct omap_uart_port_info *pdata = dev_get_platdata(up->dev);
if (!pdata || !pdata->get_context_loss_count)
return -EINVAL;
return pdata->get_context_loss_count(up->dev);
}
static inline void serial_omap_enable_wakeirq(struct uart_omap_port *up,
bool enable)
{
if (!up->wakeirq)
return;
if (enable)
enable_irq(up->wakeirq);
else
disable_irq_nosync(up->wakeirq);
}
static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
{
struct omap_uart_port_info *pdata = dev_get_platdata(up->dev);
if (enable == up->wakeups_enabled)
return;
serial_omap_enable_wakeirq(up, enable);
up->wakeups_enabled = enable;
if (!pdata || !pdata->enable_wakeup)
return;
pdata->enable_wakeup(up->dev, enable);
}
/*
* Calculate the absolute difference between the desired and actual baud
* rate for the given mode.
*/
static inline int calculate_baud_abs_diff(struct uart_port *port,
unsigned int baud, unsigned int mode)
{
unsigned int n = port->uartclk / (mode * baud);
int abs_diff;
if (n == 0)
n = 1;
abs_diff = baud - (port->uartclk / (mode * n));
if (abs_diff < 0)
abs_diff = -abs_diff;
return abs_diff;
}
/*
* serial_omap_baud_is_mode16 - check if baud rate is MODE16X
* @port: uart port info
* @baud: baudrate for which mode needs to be determined
*
* Returns true if baud rate is MODE16X and false if MODE13X
* Original table in OMAP TRM named "UART Mode Baud Rates, Divisor Values,
* and Error Rates" determines modes not for all common baud rates.
* E.g. for 1000000 baud rate mode must be 16x, but according to that
* table it's determined as 13x.
*/
static bool
serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud)
{
int abs_diff_13 = calculate_baud_abs_diff(port, baud, 13);
int abs_diff_16 = calculate_baud_abs_diff(port, baud, 16);
return (abs_diff_13 >= abs_diff_16);
}
/*
* serial_omap_get_divisor - calculate divisor value
* @port: uart port info
* @baud: baudrate for which divisor needs to be calculated.
*/
static unsigned int
serial_omap_get_divisor(struct uart_port *port, unsigned int baud)
{
unsigned int mode;
if (!serial_omap_baud_is_mode16(port, baud))
mode = 13;
else
mode = 16;
return port->uartclk/(mode * baud);
}
static void serial_omap_enable_ms(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->port.line);
pm_runtime_get_sync(up->dev);
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static void serial_omap_stop_tx(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
int res;
pm_runtime_get_sync(up->dev);
/* Handle RS-485 */
if (port->rs485.flags & SER_RS485_ENABLED) {
if (up->scr & OMAP_UART_SCR_TX_EMPTY) {
/* THR interrupt is fired when both TX FIFO and TX
* shift register are empty. This means there's nothing
* left to transmit now, so make sure the THR interrupt
* is fired when TX FIFO is below the trigger level,
* disable THR interrupts and toggle the RS-485 GPIO
* data direction pin if needed.
*/
up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
serial_out(up, UART_OMAP_SCR, up->scr);
res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ?
1 : 0;
if (gpio_get_value(up->rts_gpio) != res) {
if (port->rs485.delay_rts_after_send > 0)
mdelay(
port->rs485.delay_rts_after_send);
gpio_set_value(up->rts_gpio, res);
}
} else {
/* We're asked to stop, but there's still stuff in the
* UART FIFO, so make sure the THR interrupt is fired
* when both TX FIFO and TX shift register are empty.
* The next THR interrupt (if no transmission is started
* in the meantime) will indicate the end of a
* transmission. Therefore we _don't_ disable THR
* interrupts in this situation.
*/
up->scr |= OMAP_UART_SCR_TX_EMPTY;
serial_out(up, UART_OMAP_SCR, up->scr);
return;
}
}
if (up->ier & UART_IER_THRI) {
up->ier &= ~UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
if ((port->rs485.flags & SER_RS485_ENABLED) &&
!(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
/*
* Empty the RX FIFO, we are not interested in anything
* received during the half-duplex transmission.
*/
serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_RCVR);
/* Re-enable RX interrupts */
up->ier |= UART_IER_RLSI | UART_IER_RDI;
up->port.read_status_mask |= UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
}
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static void serial_omap_stop_rx(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
pm_runtime_get_sync(up->dev);
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
up->port.read_status_mask &= ~UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
{
struct circ_buf *xmit = &up->port.state->xmit;
int count;
if (up->port.x_char) {
serial_out(up, UART_TX, up->port.x_char);
up->port.icount.tx++;
up->port.x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
serial_omap_stop_tx(&up->port);
return;
}
count = up->port.fifosize / 4;
do {
serial_out(up, UART_TX, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
up->port.icount.tx++;
if (uart_circ_empty(xmit))
break;
} while (--count > 0);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
if (uart_circ_empty(xmit))
serial_omap_stop_tx(&up->port);
}
static inline void serial_omap_enable_ier_thri(struct uart_omap_port *up)
{
if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
}
static void serial_omap_start_tx(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
int res;
pm_runtime_get_sync(up->dev);
/* Handle RS-485 */
if (port->rs485.flags & SER_RS485_ENABLED) {
/* Fire THR interrupts when FIFO is below trigger level */
up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
serial_out(up, UART_OMAP_SCR, up->scr);
/* if rts not already enabled */
res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
if (gpio_get_value(up->rts_gpio) != res) {
gpio_set_value(up->rts_gpio, res);
if (port->rs485.delay_rts_before_send > 0)
mdelay(port->rs485.delay_rts_before_send);
}
}
if ((port->rs485.flags & SER_RS485_ENABLED) &&
!(port->rs485.flags & SER_RS485_RX_DURING_TX))
serial_omap_stop_rx(port);
serial_omap_enable_ier_thri(up);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static void serial_omap_throttle(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags;
pm_runtime_get_sync(up->dev);
spin_lock_irqsave(&up->port.lock, flags);
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
serial_out(up, UART_IER, up->ier);
spin_unlock_irqrestore(&up->port.lock, flags);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static void serial_omap_unthrottle(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags;
pm_runtime_get_sync(up->dev);
spin_lock_irqsave(&up->port.lock, flags);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
serial_out(up, UART_IER, up->ier);
spin_unlock_irqrestore(&up->port.lock, flags);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static unsigned int check_modem_status(struct uart_omap_port *up)
{
unsigned int status;
status = serial_in(up, UART_MSR);
status |= up->msr_saved_flags;
up->msr_saved_flags = 0;
if ((status & UART_MSR_ANY_DELTA) == 0)
return status;
if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
up->port.state != NULL) {
if (status & UART_MSR_TERI)
up->port.icount.rng++;
if (status & UART_MSR_DDSR)
up->port.icount.dsr++;
if (status & UART_MSR_DDCD)
uart_handle_dcd_change
(&up->port, status & UART_MSR_DCD);
if (status & UART_MSR_DCTS)
uart_handle_cts_change
(&up->port, status & UART_MSR_CTS);
wake_up_interruptible(&up->port.state->port.delta_msr_wait);
}
return status;
}
static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
{
unsigned int flag;
unsigned char ch = 0;
if (likely(lsr & UART_LSR_DR))
ch = serial_in(up, UART_RX);
up->port.icount.rx++;
flag = TTY_NORMAL;
if (lsr & UART_LSR_BI) {
flag = TTY_BREAK;
lsr &= ~(UART_LSR_FE | UART_LSR_PE);
up->port.icount.brk++;
/*
* We do the SysRQ and SAK checking
* here because otherwise the break
* may get masked by ignore_status_mask
* or read_status_mask.
*/
if (uart_handle_break(&up->port))
return;
}
if (lsr & UART_LSR_PE) {
flag = TTY_PARITY;
up->port.icount.parity++;
}
if (lsr & UART_LSR_FE) {
flag = TTY_FRAME;
up->port.icount.frame++;
}
if (lsr & UART_LSR_OE)
up->port.icount.overrun++;
#ifdef CONFIG_SERIAL_OMAP_CONSOLE
if (up->port.line == up->port.cons->index) {
/* Recover the break flag from console xmit */
lsr |= up->lsr_break_flag;
}
#endif
uart_insert_char(&up->port, lsr, UART_LSR_OE, 0, flag);
}
static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
{
unsigned char ch = 0;
unsigned int flag;
if (!(lsr & UART_LSR_DR))
return;
ch = serial_in(up, UART_RX);
flag = TTY_NORMAL;
up->port.icount.rx++;
if (uart_handle_sysrq_char(&up->port, ch))
return;
uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag);
}
/**
* serial_omap_irq() - This handles the interrupt from one port
* @irq: uart port irq number
* @dev_id: uart port info
*/
static irqreturn_t serial_omap_irq(int irq, void *dev_id)
{
struct uart_omap_port *up = dev_id;
unsigned int iir, lsr;
unsigned int type;
irqreturn_t ret = IRQ_NONE;
int max_count = 256;
spin_lock(&up->port.lock);
pm_runtime_get_sync(up->dev);
do {
iir = serial_in(up, UART_IIR);
if (iir & UART_IIR_NO_INT)
break;
ret = IRQ_HANDLED;
lsr = serial_in(up, UART_LSR);
/* extract IRQ type from IIR register */
type = iir & 0x3e;
switch (type) {
case UART_IIR_MSI:
check_modem_status(up);
break;
case UART_IIR_THRI:
transmit_chars(up, lsr);
break;
case UART_IIR_RX_TIMEOUT:
/* FALLTHROUGH */
case UART_IIR_RDI:
serial_omap_rdi(up, lsr);
break;
case UART_IIR_RLSI:
serial_omap_rlsi(up, lsr);
break;
case UART_IIR_CTS_RTS_DSR:
/* simply try again */
break;
case UART_IIR_XOFF:
/* FALLTHROUGH */
default:
break;
}
} while (!(iir & UART_IIR_NO_INT) && max_count--);
spin_unlock(&up->port.lock);
tty_flip_buffer_push(&up->port.state->port);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
up->port_activity = jiffies;
return ret;
}
static unsigned int serial_omap_tx_empty(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags = 0;
unsigned int ret = 0;
pm_runtime_get_sync(up->dev);
dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line);
spin_lock_irqsave(&up->port.lock, flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&up->port.lock, flags);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
return ret;
}
static unsigned int serial_omap_get_mctrl(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int status;
unsigned int ret = 0;
pm_runtime_get_sync(up->dev);
status = check_modem_status(up);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->port.line);
if (status & UART_MSR_DCD)
ret |= TIOCM_CAR;
if (status & UART_MSR_RI)
ret |= TIOCM_RNG;
if (status & UART_MSR_DSR)
ret |= TIOCM_DSR;
if (status & UART_MSR_CTS)
ret |= TIOCM_CTS;
return ret;
}
static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned char mcr = 0, old_mcr, lcr;
dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->port.line);
if (mctrl & TIOCM_RTS)
mcr |= UART_MCR_RTS;
if (mctrl & TIOCM_DTR)
mcr |= UART_MCR_DTR;
if (mctrl & TIOCM_OUT1)
mcr |= UART_MCR_OUT1;
if (mctrl & TIOCM_OUT2)
mcr |= UART_MCR_OUT2;
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
pm_runtime_get_sync(up->dev);
old_mcr = serial_in(up, UART_MCR);
old_mcr &= ~(UART_MCR_LOOP | UART_MCR_OUT2 | UART_MCR_OUT1 |
UART_MCR_DTR | UART_MCR_RTS);
up->mcr = old_mcr | mcr;
serial_out(up, UART_MCR, up->mcr);
/* Turn off autoRTS if RTS is lowered; restore autoRTS if RTS raised */
lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
up->efr |= UART_EFR_RTS;
else
up->efr &= UART_EFR_RTS;
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, lcr);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static void serial_omap_break_ctl(struct uart_port *port, int break_state)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags = 0;
dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
pm_runtime_get_sync(up->dev);
spin_lock_irqsave(&up->port.lock, flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
spin_unlock_irqrestore(&up->port.lock, flags);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static int serial_omap_startup(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags = 0;
int retval;
/*
* Allocate the IRQ
*/
retval = request_irq(up->port.irq, serial_omap_irq, up->port.irqflags,
up->name, up);
if (retval)
return retval;
/* Optional wake-up IRQ */
if (up->wakeirq) {
retval = request_irq(up->wakeirq, serial_omap_irq,
up->port.irqflags, up->name, up);
if (retval) {
free_irq(up->port.irq, up);
return retval;
}
disable_irq(up->wakeirq);
}
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
pm_runtime_get_sync(up->dev);
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
*/
serial_omap_clear_fifos(up);
/*
* Clear the interrupt registers.
*/
(void) serial_in(up, UART_LSR);
if (serial_in(up, UART_LSR) & UART_LSR_DR)
(void) serial_in(up, UART_RX);
(void) serial_in(up, UART_IIR);
(void) serial_in(up, UART_MSR);
/*
* Now, initialize the UART
*/
serial_out(up, UART_LCR, UART_LCR_WLEN8);
spin_lock_irqsave(&up->port.lock, flags);
/*
* Most PC uarts need OUT2 raised to enable interrupts.
*/
up->port.mctrl |= TIOCM_OUT2;
serial_omap_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
up->msr_saved_flags = 0;
/*
* Finally, enable interrupts. Note: Modem status interrupts
* are set via set_termios(), which will be occurring imminently
* anyway, so we don't enable them here.
*/
up->ier = UART_IER_RLSI | UART_IER_RDI;
serial_out(up, UART_IER, up->ier);
/* Enable module level wake up */
up->wer = OMAP_UART_WER_MOD_WKUP;
if (up->features & OMAP_UART_WER_HAS_TX_WAKEUP)
up->wer |= OMAP_UART_TX_WAKEUP_EN;
serial_out(up, UART_OMAP_WER, up->wer);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
up->port_activity = jiffies;
return 0;
}
static void serial_omap_shutdown(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags = 0;
dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->port.line);
pm_runtime_get_sync(up->dev);
/*
* Disable interrupts from this port
*/
up->ier = 0;
serial_out(up, UART_IER, 0);
spin_lock_irqsave(&up->port.lock, flags);
up->port.mctrl &= ~TIOCM_OUT2;
serial_omap_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
/*
* Disable break condition and FIFOs
*/
serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
serial_omap_clear_fifos(up);
/*
* Read data port to reset things, and then free the irq
*/
if (serial_in(up, UART_LSR) & UART_LSR_DR)
(void) serial_in(up, UART_RX);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
free_irq(up->port.irq, up);
if (up->wakeirq)
free_irq(up->wakeirq, up);
}
static void serial_omap_uart_qos_work(struct work_struct *work)
{
struct uart_omap_port *up = container_of(work, struct uart_omap_port,
qos_work);
pm_qos_update_request(&up->pm_qos_request, up->latency);
}
static void
serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned char cval = 0;
unsigned long flags = 0;
unsigned int baud, quot;
switch (termios->c_cflag & CSIZE) {
case CS5:
cval = UART_LCR_WLEN5;
break;
case CS6:
cval = UART_LCR_WLEN6;
break;
case CS7:
cval = UART_LCR_WLEN7;
break;
default:
case CS8:
cval = UART_LCR_WLEN8;
break;
}
if (termios->c_cflag & CSTOPB)
cval |= UART_LCR_STOP;
if (termios->c_cflag & PARENB)
cval |= UART_LCR_PARITY;
if (!(termios->c_cflag & PARODD))
cval |= UART_LCR_EPAR;
if (termios->c_cflag & CMSPAR)
cval |= UART_LCR_SPAR;
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13);
quot = serial_omap_get_divisor(port, baud);
/* calculate wakeup latency constraint */
up->calc_latency = (USEC_PER_SEC * up->port.fifosize) / (baud / 8);
up->latency = up->calc_latency;
schedule_work(&up->qos_work);
up->dll = quot & 0xff;
up->dlh = quot >> 8;
up->mdr1 = UART_OMAP_MDR1_DISABLE;
up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 |
UART_FCR_ENABLE_FIFO;
/*
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
pm_runtime_get_sync(up->dev);
spin_lock_irqsave(&up->port.lock, flags);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (termios->c_iflag & (BRKINT | PARMRK))
up->port.read_status_mask |= UART_LSR_BI;
/*
* Characters to ignore
*/
up->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
if (termios->c_iflag & IGNBRK) {
up->port.ignore_status_mask |= UART_LSR_BI;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= UART_LSR_OE;
}
/*
* ignore all characters if CREAD is not set
*/
if ((termios->c_cflag & CREAD) == 0)
up->port.ignore_status_mask |= UART_LSR_DR;
/*
* Modem status interrupts
*/
up->ier &= ~UART_IER_MSI;
if (UART_ENABLE_MS(&up->port, termios->c_cflag))
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, cval); /* reset DLAB */
up->lcr = cval;
up->scr = 0;
/* FIFOs and DMA Settings */
/* FCR can be changed only when the
* baud clock is not running
* DLL_REG and DLH_REG set to 0.
*/
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_DLL, 0);
serial_out(up, UART_DLM, 0);
serial_out(up, UART_LCR, 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR) & ~UART_EFR_ECB;
up->efr &= ~UART_EFR_SCD;
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
up->mcr = serial_in(up, UART_MCR) & ~UART_MCR_TCRTLR;
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
/* FIFO ENABLE, DMA MODE */
up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
/*
* NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK
* sets Enables the granularity of 1 for TRIGGER RX
* level. Along with setting RX FIFO trigger level
* to 1 (as noted below, 16 characters) and TLR[3:0]
* to zero this will result RX FIFO threshold level
* to 1 character, instead of 16 as noted in comment
* below.
*/
/* Set receive FIFO threshold to 16 characters and
* transmit FIFO threshold to 32 spaces
*/
up->fcr &= ~OMAP_UART_FCR_RX_FIFO_TRIG_MASK;
up->fcr &= ~OMAP_UART_FCR_TX_FIFO_TRIG_MASK;
up->fcr |= UART_FCR6_R_TRIGGER_16 | UART_FCR6_T_TRIGGER_24 |
UART_FCR_ENABLE_FIFO;
serial_out(up, UART_FCR, up->fcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_OMAP_SCR, up->scr);
/* Reset UART_MCR_TCRTLR: this must be done with the EFR_ECB bit set */
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
/* Protocol, Baud Rate, and Interrupt Settings */
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
serial_omap_mdr1_errataset(up, up->mdr1);
else
serial_out(up, UART_OMAP_MDR1, up->mdr1);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_DLL, up->dll); /* LS of divisor */
serial_out(up, UART_DLM, up->dlh); /* MS of divisor */
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, cval);
if (!serial_omap_baud_is_mode16(port, baud))
up->mdr1 = UART_OMAP_MDR1_13X_MODE;
else
up->mdr1 = UART_OMAP_MDR1_16X_MODE;
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
serial_omap_mdr1_errataset(up, up->mdr1);
else
serial_out(up, UART_OMAP_MDR1, up->mdr1);
/* Configure flow control */
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
/* XON1/XOFF1 accessible mode B, TCRTLR=0, ECB=0 */
serial_out(up, UART_XON1, termios->c_cc[VSTART]);
serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]);
/* Enable access to TCR/TLR */
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
/* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
up->efr |= UART_EFR_CTS;
} else {
/* Disable AUTORTS and AUTOCTS */
up->efr &= ~(UART_EFR_CTS | UART_EFR_RTS);
}
if (up->port.flags & UPF_SOFT_FLOW) {
/* clear SW control mode bits */
up->efr &= OMAP_UART_SW_CLR;
/*
* IXON Flag:
* Enable XON/XOFF flow control on input.
* Receiver compares XON1, XOFF1.
*/
if (termios->c_iflag & IXON)
up->efr |= OMAP_UART_SW_RX;
/*
* IXOFF Flag:
* Enable XON/XOFF flow control on output.
* Transmit XON1, XOFF1
*/
if (termios->c_iflag & IXOFF) {
up->port.status |= UPSTAT_AUTOXOFF;
up->efr |= OMAP_UART_SW_TX;
}
/*
* IXANY Flag:
* Enable any character to restart output.
* Operation resumes after receiving any
* character after recognition of the XOFF character
*/
if (termios->c_iflag & IXANY)
up->mcr |= UART_MCR_XONANY;
else
up->mcr &= ~UART_MCR_XONANY;
}
serial_out(up, UART_MCR, up->mcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, up->lcr);
serial_omap_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
}
static void
serial_omap_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned char efr;
dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->port.line);
pm_runtime_get_sync(up->dev);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, efr | UART_EFR_ECB);
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, efr);
serial_out(up, UART_LCR, 0);
if (!device_may_wakeup(up->dev)) {
if (!state)
pm_runtime_forbid(up->dev);
else
pm_runtime_allow(up->dev);
}
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static void serial_omap_release_port(struct uart_port *port)
{
dev_dbg(port->dev, "serial_omap_release_port+\n");
}
static int serial_omap_request_port(struct uart_port *port)
{
dev_dbg(port->dev, "serial_omap_request_port+\n");
return 0;
}
static void serial_omap_config_port(struct uart_port *port, int flags)
{
struct uart_omap_port *up = to_uart_omap_port(port);
dev_dbg(up->port.dev, "serial_omap_config_port+%d\n",
up->port.line);
up->port.type = PORT_OMAP;
up->port.flags |= UPF_SOFT_FLOW | UPF_HARD_FLOW;
}
static int
serial_omap_verify_port(struct uart_port *port, struct serial_struct *ser)
{
/* we don't want the core code to modify any port params */
dev_dbg(port->dev, "serial_omap_verify_port+\n");
return -EINVAL;
}
static const char *
serial_omap_type(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->port.line);
return up->name;
}
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
static inline void wait_for_xmitr(struct uart_omap_port *up)
{
unsigned int status, tmout = 10000;
/* Wait up to 10ms for the character(s) to be sent. */
do {
status = serial_in(up, UART_LSR);
if (status & UART_LSR_BI)
up->lsr_break_flag = UART_LSR_BI;
if (--tmout == 0)
break;
udelay(1);
} while ((status & BOTH_EMPTY) != BOTH_EMPTY);
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
tmout = 1000000;
for (tmout = 1000000; tmout; tmout--) {
unsigned int msr = serial_in(up, UART_MSR);
up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
if (msr & UART_MSR_CTS)
break;
udelay(1);
}
}
}
#ifdef CONFIG_CONSOLE_POLL
static void serial_omap_poll_put_char(struct uart_port *port, unsigned char ch)
{
struct uart_omap_port *up = to_uart_omap_port(port);
pm_runtime_get_sync(up->dev);
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static int serial_omap_poll_get_char(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int status;
pm_runtime_get_sync(up->dev);
status = serial_in(up, UART_LSR);
if (!(status & UART_LSR_DR)) {
status = NO_POLL_CHAR;
goto out;
}
status = serial_in(up, UART_RX);
out:
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
return status;
}
#endif /* CONFIG_CONSOLE_POLL */
#ifdef CONFIG_SERIAL_OMAP_CONSOLE
static struct uart_omap_port *serial_omap_console_ports[OMAP_MAX_HSUART_PORTS];
static struct uart_driver serial_omap_reg;
static void serial_omap_console_putchar(struct uart_port *port, int ch)
{
struct uart_omap_port *up = to_uart_omap_port(port);
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
}
static void
serial_omap_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_omap_port *up = serial_omap_console_ports[co->index];
unsigned long flags;
unsigned int ier;
int locked = 1;
pm_runtime_get_sync(up->dev);
local_irq_save(flags);
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock(&up->port.lock);
else
spin_lock(&up->port.lock);
/*
* First save the IER then disable the interrupts
*/
ier = serial_in(up, UART_IER);
serial_out(up, UART_IER, 0);
uart_console_write(&up->port, s, count, serial_omap_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
wait_for_xmitr(up);
serial_out(up, UART_IER, ier);
/*
* The receive handling will happen properly because the
* receive ready bit will still be set; it is not cleared
* on read. However, modem control will not, we must
* call it if we have saved something in the saved flags
* while processing with interrupts off.
*/
if (up->msr_saved_flags)
check_modem_status(up);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
}
static int __init
serial_omap_console_setup(struct console *co, char *options)
{
struct uart_omap_port *up;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (serial_omap_console_ports[co->index] == NULL)
return -ENODEV;
up = serial_omap_console_ports[co->index];
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&up->port, co, baud, parity, bits, flow);
}
static struct console serial_omap_console = {
.name = OMAP_SERIAL_NAME,
.write = serial_omap_console_write,
.device = uart_console_device,
.setup = serial_omap_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &serial_omap_reg,
};
static void serial_omap_add_console_port(struct uart_omap_port *up)
{
serial_omap_console_ports[up->port.line] = up;
}
#define OMAP_CONSOLE (&serial_omap_console)
#else
#define OMAP_CONSOLE NULL
static inline void serial_omap_add_console_port(struct uart_omap_port *up)
{}
#endif
/* Enable or disable the rs485 support */
static int
serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int mode;
int val;
pm_runtime_get_sync(up->dev);
/* Disable interrupts from this port */
mode = up->ier;
up->ier = 0;
serial_out(up, UART_IER, 0);
/* store new config */
port->rs485 = *rs485conf;
/*
* Just as a precaution, only allow rs485
* to be enabled if the gpio pin is valid
*/
if (gpio_is_valid(up->rts_gpio)) {
/* enable / disable rts */
val = (port->rs485.flags & SER_RS485_ENABLED) ?
SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
val = (port->rs485.flags & val) ? 1 : 0;
gpio_set_value(up->rts_gpio, val);
} else
port->rs485.flags &= ~SER_RS485_ENABLED;
/* Enable interrupts */
up->ier = mode;
serial_out(up, UART_IER, up->ier);
/* If RS-485 is disabled, make sure the THR interrupt is fired when
* TX FIFO is below the trigger level.
*/
if (!(port->rs485.flags & SER_RS485_ENABLED) &&
(up->scr & OMAP_UART_SCR_TX_EMPTY)) {
up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
serial_out(up, UART_OMAP_SCR, up->scr);
}
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
return 0;
}
static struct uart_ops serial_omap_pops = {
.tx_empty = serial_omap_tx_empty,
.set_mctrl = serial_omap_set_mctrl,
.get_mctrl = serial_omap_get_mctrl,
.stop_tx = serial_omap_stop_tx,
.start_tx = serial_omap_start_tx,
.throttle = serial_omap_throttle,
.unthrottle = serial_omap_unthrottle,
.stop_rx = serial_omap_stop_rx,
.enable_ms = serial_omap_enable_ms,
.break_ctl = serial_omap_break_ctl,
.startup = serial_omap_startup,
.shutdown = serial_omap_shutdown,
.set_termios = serial_omap_set_termios,
.pm = serial_omap_pm,
.type = serial_omap_type,
.release_port = serial_omap_release_port,
.request_port = serial_omap_request_port,
.config_port = serial_omap_config_port,
.verify_port = serial_omap_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_put_char = serial_omap_poll_put_char,
.poll_get_char = serial_omap_poll_get_char,
#endif
};
static struct uart_driver serial_omap_reg = {
.owner = THIS_MODULE,
.driver_name = "OMAP-SERIAL",
.dev_name = OMAP_SERIAL_NAME,
.nr = OMAP_MAX_HSUART_PORTS,
.cons = OMAP_CONSOLE,
};
#ifdef CONFIG_PM_SLEEP
static int serial_omap_prepare(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
up->is_suspending = true;
return 0;
}
static void serial_omap_complete(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
up->is_suspending = false;
}
static int serial_omap_suspend(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
uart_suspend_port(&serial_omap_reg, &up->port);
flush_work(&up->qos_work);
if (device_may_wakeup(dev))
serial_omap_enable_wakeup(up, true);
else
serial_omap_enable_wakeup(up, false);
return 0;
}
static int serial_omap_resume(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
serial_omap_enable_wakeup(up, false);
uart_resume_port(&serial_omap_reg, &up->port);
return 0;
}
#else
#define serial_omap_prepare NULL
#define serial_omap_complete NULL
#endif /* CONFIG_PM_SLEEP */
static void omap_serial_fill_features_erratas(struct uart_omap_port *up)
{
u32 mvr, scheme;
u16 revision, major, minor;
mvr = readl(up->port.membase + (UART_OMAP_MVER << up->port.regshift));
/* Check revision register scheme */
scheme = mvr >> OMAP_UART_MVR_SCHEME_SHIFT;
switch (scheme) {
case 0: /* Legacy Scheme: OMAP2/3 */
/* MINOR_REV[0:4], MAJOR_REV[4:7] */
major = (mvr & OMAP_UART_LEGACY_MVR_MAJ_MASK) >>
OMAP_UART_LEGACY_MVR_MAJ_SHIFT;
minor = (mvr & OMAP_UART_LEGACY_MVR_MIN_MASK);
break;
case 1:
/* New Scheme: OMAP4+ */
/* MINOR_REV[0:5], MAJOR_REV[8:10] */
major = (mvr & OMAP_UART_MVR_MAJ_MASK) >>
OMAP_UART_MVR_MAJ_SHIFT;
minor = (mvr & OMAP_UART_MVR_MIN_MASK);
break;
default:
dev_warn(up->dev,
"Unknown %s revision, defaulting to highest\n",
up->name);
/* highest possible revision */
major = 0xff;
minor = 0xff;
}
/* normalize revision for the driver */
revision = UART_BUILD_REVISION(major, minor);
switch (revision) {
case OMAP_UART_REV_46:
up->errata |= (UART_ERRATA_i202_MDR1_ACCESS |
UART_ERRATA_i291_DMA_FORCEIDLE);
break;
case OMAP_UART_REV_52:
up->errata |= (UART_ERRATA_i202_MDR1_ACCESS |
UART_ERRATA_i291_DMA_FORCEIDLE);
up->features |= OMAP_UART_WER_HAS_TX_WAKEUP;
break;
case OMAP_UART_REV_63:
up->errata |= UART_ERRATA_i202_MDR1_ACCESS;
up->features |= OMAP_UART_WER_HAS_TX_WAKEUP;
break;
default:
break;
}
}
static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
{
struct omap_uart_port_info *omap_up_info;
omap_up_info = devm_kzalloc(dev, sizeof(*omap_up_info), GFP_KERNEL);
if (!omap_up_info)
return NULL; /* out of memory */
of_property_read_u32(dev->of_node, "clock-frequency",
&omap_up_info->uartclk);
return omap_up_info;
}
static int serial_omap_probe_rs485(struct uart_omap_port *up,
struct device_node *np)
{
struct serial_rs485 *rs485conf = &up->port.rs485;
u32 rs485_delay[2];
enum of_gpio_flags flags;
int ret;
rs485conf->flags = 0;
up->rts_gpio = -EINVAL;
if (!np)
return 0;
if (of_property_read_bool(np, "rs485-rts-active-high"))
rs485conf->flags |= SER_RS485_RTS_ON_SEND;
else
rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
/* check for tx enable gpio */
up->rts_gpio = of_get_named_gpio_flags(np, "rts-gpio", 0, &flags);
if (gpio_is_valid(up->rts_gpio)) {
ret = devm_gpio_request(up->dev, up->rts_gpio, "omap-serial");
if (ret < 0)
return ret;
ret = gpio_direction_output(up->rts_gpio,
flags & SER_RS485_RTS_AFTER_SEND);
if (ret < 0)
return ret;
} else if (up->rts_gpio == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else {
up->rts_gpio = -EINVAL;
}
if (of_property_read_u32_array(np, "rs485-rts-delay",
rs485_delay, 2) == 0) {
rs485conf->delay_rts_before_send = rs485_delay[0];
rs485conf->delay_rts_after_send = rs485_delay[1];
}
if (of_property_read_bool(np, "rs485-rx-during-tx"))
rs485conf->flags |= SER_RS485_RX_DURING_TX;
if (of_property_read_bool(np, "linux,rs485-enabled-at-boot-time"))
rs485conf->flags |= SER_RS485_ENABLED;
return 0;
}
static int serial_omap_probe(struct platform_device *pdev)
{
struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev);
struct uart_omap_port *up;
struct resource *mem;
void __iomem *base;
int uartirq = 0;
int wakeirq = 0;
int ret;
/* The optional wakeirq may be specified in the board dts file */
if (pdev->dev.of_node) {
uartirq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (!uartirq)
return -EPROBE_DEFER;
wakeirq = irq_of_parse_and_map(pdev->dev.of_node, 1);
omap_up_info = of_get_uart_port_info(&pdev->dev);
pdev->dev.platform_data = omap_up_info;
} else {
uartirq = platform_get_irq(pdev, 0);
if (uartirq < 0)
return -EPROBE_DEFER;
}
up = devm_kzalloc(&pdev->dev, sizeof(*up), GFP_KERNEL);
if (!up)
return -ENOMEM;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(base))
return PTR_ERR(base);
up->dev = &pdev->dev;
up->port.dev = &pdev->dev;
up->port.type = PORT_OMAP;
up->port.iotype = UPIO_MEM;
up->port.irq = uartirq;
up->port.regshift = 2;
up->port.fifosize = 64;
up->port.ops = &serial_omap_pops;
if (pdev->dev.of_node)
ret = of_alias_get_id(pdev->dev.of_node, "serial");
else
ret = pdev->id;
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
ret);
goto err_port_line;
}
up->port.line = ret;
if (up->port.line >= OMAP_MAX_HSUART_PORTS) {
dev_err(&pdev->dev, "uart ID %d > MAX %d.\n", up->port.line,
OMAP_MAX_HSUART_PORTS);
ret = -ENXIO;
goto err_port_line;
}
up->wakeirq = wakeirq;
if (!up->wakeirq)
dev_info(up->port.dev, "no wakeirq for uart%d\n",
up->port.line);
ret = serial_omap_probe_rs485(up, pdev->dev.of_node);
if (ret < 0)
goto err_rs485;
sprintf(up->name, "OMAP UART%d", up->port.line);
up->port.mapbase = mem->start;
up->port.membase = base;
up->port.flags = omap_up_info->flags;
up->port.uartclk = omap_up_info->uartclk;
up->port.rs485_config = serial_omap_config_rs485;
if (!up->port.uartclk) {
up->port.uartclk = DEFAULT_CLK_SPEED;
dev_warn(&pdev->dev,
"No clock speed specified: using default: %d\n",
DEFAULT_CLK_SPEED);
}
up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
pm_qos_add_request(&up->pm_qos_request,
PM_QOS_CPU_DMA_LATENCY, up->latency);
INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);
platform_set_drvdata(pdev, up);
if (omap_up_info->autosuspend_timeout == 0)
omap_up_info->autosuspend_timeout = -1;
device_init_wakeup(up->dev, true);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev,
omap_up_info->autosuspend_timeout);
pm_runtime_irq_safe(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
omap_serial_fill_features_erratas(up);
ui[up->port.line] = up;
serial_omap_add_console_port(up);
ret = uart_add_one_port(&serial_omap_reg, &up->port);
if (ret != 0)
goto err_add_port;
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
return 0;
err_add_port:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_qos_remove_request(&up->pm_qos_request);
device_init_wakeup(up->dev, false);
err_rs485:
err_port_line:
return ret;
}
static int serial_omap_remove(struct platform_device *dev)
{
struct uart_omap_port *up = platform_get_drvdata(dev);
pm_runtime_put_sync(up->dev);
pm_runtime_disable(up->dev);
uart_remove_one_port(&serial_omap_reg, &up->port);
pm_qos_remove_request(&up->pm_qos_request);
device_init_wakeup(&dev->dev, false);
return 0;
}
/*
* Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
* The access to uart register after MDR1 Access
* causes UART to corrupt data.
*
* Need a delay =
* 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS)
* give 10 times as much
*/
static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1)
{
u8 timeout = 255;
serial_out(up, UART_OMAP_MDR1, mdr1);
udelay(2);
serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
UART_FCR_CLEAR_RCVR);
/*
* Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
* TX_FIFO_E bit is 1.
*/
while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
(UART_LSR_THRE | UART_LSR_DR))) {
timeout--;
if (!timeout) {
/* Should *never* happen. we warn and carry on */
dev_crit(up->dev, "Errata i202: timedout %x\n",
serial_in(up, UART_LSR));
break;
}
udelay(1);
}
}
#ifdef CONFIG_PM
static void serial_omap_restore_context(struct uart_omap_port *up)
{
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
serial_omap_mdr1_errataset(up, UART_OMAP_MDR1_DISABLE);
else
serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
serial_out(up, UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, 0x0); /* Operational mode */
serial_out(up, UART_IER, 0x0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
serial_out(up, UART_DLL, up->dll);
serial_out(up, UART_DLM, up->dlh);
serial_out(up, UART_LCR, 0x0); /* Operational mode */
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_FCR, up->fcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
serial_out(up, UART_OMAP_SCR, up->scr);
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, up->lcr);
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
serial_omap_mdr1_errataset(up, up->mdr1);
else
serial_out(up, UART_OMAP_MDR1, up->mdr1);
serial_out(up, UART_OMAP_WER, up->wer);
}
static int serial_omap_runtime_suspend(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
if (!up)
return -EINVAL;
/*
* When using 'no_console_suspend', the console UART must not be
* suspended. Since driver suspend is managed by runtime suspend,
* preventing runtime suspend (by returning error) will keep device
* active during suspend.
*/
if (up->is_suspending && !console_suspend_enabled &&
uart_console(&up->port))
return -EBUSY;
up->context_loss_cnt = serial_omap_get_context_loss_count(up);
serial_omap_enable_wakeup(up, true);
up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
schedule_work(&up->qos_work);
return 0;
}
static int serial_omap_runtime_resume(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
int loss_cnt = serial_omap_get_context_loss_count(up);
serial_omap_enable_wakeup(up, false);
if (loss_cnt < 0) {
dev_dbg(dev, "serial_omap_get_context_loss_count failed : %d\n",
loss_cnt);
serial_omap_restore_context(up);
} else if (up->context_loss_cnt != loss_cnt) {
serial_omap_restore_context(up);
}
up->latency = up->calc_latency;
schedule_work(&up->qos_work);
return 0;
}
#endif
static const struct dev_pm_ops serial_omap_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(serial_omap_suspend, serial_omap_resume)
SET_RUNTIME_PM_OPS(serial_omap_runtime_suspend,
serial_omap_runtime_resume, NULL)
.prepare = serial_omap_prepare,
.complete = serial_omap_complete,
};
#if defined(CONFIG_OF)
static const struct of_device_id omap_serial_of_match[] = {
{ .compatible = "ti,omap2-uart" },
{ .compatible = "ti,omap3-uart" },
{ .compatible = "ti,omap4-uart" },
{},
};
MODULE_DEVICE_TABLE(of, omap_serial_of_match);
#endif
static struct platform_driver serial_omap_driver = {
.probe = serial_omap_probe,
.remove = serial_omap_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &serial_omap_dev_pm_ops,
.of_match_table = of_match_ptr(omap_serial_of_match),
},
};
static int __init serial_omap_init(void)
{
int ret;
ret = uart_register_driver(&serial_omap_reg);
if (ret != 0)
return ret;
ret = platform_driver_register(&serial_omap_driver);
if (ret != 0)
uart_unregister_driver(&serial_omap_reg);
return ret;
}
static void __exit serial_omap_exit(void)
{
platform_driver_unregister(&serial_omap_driver);
uart_unregister_driver(&serial_omap_reg);
}
module_init(serial_omap_init);
module_exit(serial_omap_exit);
MODULE_DESCRIPTION("OMAP High Speed UART driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments Inc");
| gpl-2.0 |
nypdmax/NUMA | tools/qemu-xen/disas/ppc.c | 176 | 242713 | /* ppc-dis.c -- Disassemble PowerPC instructions
Copyright 1994, 1995, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
Free Software Foundation, Inc.
Written by Ian Lance Taylor, Cygnus Support
This file is part of GDB, GAS, and the GNU binutils.
GDB, GAS, and the GNU binutils are free software; you can redistribute
them and/or modify them under the terms of the GNU General Public
License as published by the Free Software Foundation; either version
2, or (at your option) any later version.
GDB, GAS, and the GNU binutils are distributed in the hope that they
will be useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this file; see the file COPYING. If not,
see <http://www.gnu.org/licenses/>. */
#include "disas/bfd.h"
#define BFD_DEFAULT_TARGET_SIZE 64
/* ppc.h -- Header file for PowerPC opcode table
Copyright 1994, 1995, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
2007 Free Software Foundation, Inc.
Written by Ian Lance Taylor, Cygnus Support
This file is part of GDB, GAS, and the GNU binutils.
GDB, GAS, and the GNU binutils are free software; you can redistribute
them and/or modify them under the terms of the GNU General Public
License as published by the Free Software Foundation; either version
1, or (at your option) any later version.
GDB, GAS, and the GNU binutils are distributed in the hope that they
will be useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this file; see the file COPYING. If not,
see <http://www.gnu.org/licenses/>. */
/* The opcode table is an array of struct powerpc_opcode. */
struct powerpc_opcode
{
/* The opcode name. */
const char *name;
/* The opcode itself. Those bits which will be filled in with
operands are zeroes. */
unsigned long opcode;
/* The opcode mask. This is used by the disassembler. This is a
mask containing ones indicating those bits which must match the
opcode field, and zeroes indicating those bits which need not
match (and are presumably filled in by operands). */
unsigned long mask;
/* One bit flags for the opcode. These are used to indicate which
specific processors support the instructions. The defined values
are listed below. */
unsigned long flags;
/* An array of operand codes. Each code is an index into the
operand table. They appear in the order which the operands must
appear in assembly code, and are terminated by a zero. */
unsigned char operands[8];
};
/* The table itself is sorted by major opcode number, and is otherwise
in the order in which the disassembler should consider
instructions. */
extern const struct powerpc_opcode powerpc_opcodes[];
extern const int powerpc_num_opcodes;
/* Values defined for the flags field of a struct powerpc_opcode. */
/* Opcode is defined for the PowerPC architecture. */
#define PPC_OPCODE_PPC 1
/* Opcode is defined for the POWER (RS/6000) architecture. */
#define PPC_OPCODE_POWER 2
/* Opcode is defined for the POWER2 (Rios 2) architecture. */
#define PPC_OPCODE_POWER2 4
/* Opcode is only defined on 32 bit architectures. */
#define PPC_OPCODE_32 8
/* Opcode is only defined on 64 bit architectures. */
#define PPC_OPCODE_64 0x10
/* Opcode is supported by the Motorola PowerPC 601 processor. The 601
is assumed to support all PowerPC (PPC_OPCODE_PPC) instructions,
but it also supports many additional POWER instructions. */
#define PPC_OPCODE_601 0x20
/* Opcode is supported in both the Power and PowerPC architectures
(ie, compiler's -mcpu=common or assembler's -mcom). */
#define PPC_OPCODE_COMMON 0x40
/* Opcode is supported for any Power or PowerPC platform (this is
for the assembler's -many option, and it eliminates duplicates). */
#define PPC_OPCODE_ANY 0x80
/* Opcode is supported as part of the 64-bit bridge. */
#define PPC_OPCODE_64_BRIDGE 0x100
/* Opcode is supported by Altivec Vector Unit */
#define PPC_OPCODE_ALTIVEC 0x200
/* Opcode is supported by PowerPC 403 processor. */
#define PPC_OPCODE_403 0x400
/* Opcode is supported by PowerPC BookE processor. */
#define PPC_OPCODE_BOOKE 0x800
/* Opcode is only supported by 64-bit PowerPC BookE processor. */
#define PPC_OPCODE_BOOKE64 0x1000
/* Opcode is supported by PowerPC 440 processor. */
#define PPC_OPCODE_440 0x2000
/* Opcode is only supported by Power4 architecture. */
#define PPC_OPCODE_POWER4 0x4000
/* Opcode isn't supported by Power4 architecture. */
#define PPC_OPCODE_NOPOWER4 0x8000
/* Opcode is only supported by POWERPC Classic architecture. */
#define PPC_OPCODE_CLASSIC 0x10000
/* Opcode is only supported by e500x2 Core. */
#define PPC_OPCODE_SPE 0x20000
/* Opcode is supported by e500x2 Integer select APU. */
#define PPC_OPCODE_ISEL 0x40000
/* Opcode is an e500 SPE floating point instruction. */
#define PPC_OPCODE_EFS 0x80000
/* Opcode is supported by branch locking APU. */
#define PPC_OPCODE_BRLOCK 0x100000
/* Opcode is supported by performance monitor APU. */
#define PPC_OPCODE_PMR 0x200000
/* Opcode is supported by cache locking APU. */
#define PPC_OPCODE_CACHELCK 0x400000
/* Opcode is supported by machine check APU. */
#define PPC_OPCODE_RFMCI 0x800000
/* Opcode is only supported by Power5 architecture. */
#define PPC_OPCODE_POWER5 0x1000000
/* Opcode is supported by PowerPC e300 family. */
#define PPC_OPCODE_E300 0x2000000
/* Opcode is only supported by Power6 architecture. */
#define PPC_OPCODE_POWER6 0x4000000
/* Opcode is only supported by PowerPC Cell family. */
#define PPC_OPCODE_CELL 0x8000000
/* A macro to extract the major opcode from an instruction. */
#define PPC_OP(i) (((i) >> 26) & 0x3f)
/* The operands table is an array of struct powerpc_operand. */
struct powerpc_operand
{
/* A bitmask of bits in the operand. */
unsigned int bitm;
/* How far the operand is left shifted in the instruction.
-1 to indicate that BITM and SHIFT cannot be used to determine
where the operand goes in the insn. */
int shift;
/* Insertion function. This is used by the assembler. To insert an
operand value into an instruction, check this field.
If it is NULL, execute
i |= (op & o->bitm) << o->shift;
(i is the instruction which we are filling in, o is a pointer to
this structure, and op is the operand value).
If this field is not NULL, then simply call it with the
instruction and the operand value. It will return the new value
of the instruction. If the ERRMSG argument is not NULL, then if
the operand value is illegal, *ERRMSG will be set to a warning
string (the operand will be inserted in any case). If the
operand value is legal, *ERRMSG will be unchanged (most operands
can accept any value). */
unsigned long (*insert)
(unsigned long instruction, long op, int dialect, const char **errmsg);
/* Extraction function. This is used by the disassembler. To
extract this operand type from an instruction, check this field.
If it is NULL, compute
op = (i >> o->shift) & o->bitm;
if ((o->flags & PPC_OPERAND_SIGNED) != 0)
sign_extend (op);
(i is the instruction, o is a pointer to this structure, and op
is the result).
If this field is not NULL, then simply call it with the
instruction value. It will return the value of the operand. If
the INVALID argument is not NULL, *INVALID will be set to
non-zero if this operand type can not actually be extracted from
this operand (i.e., the instruction does not match). If the
operand is valid, *INVALID will not be changed. */
long (*extract) (unsigned long instruction, int dialect, int *invalid);
/* One bit syntax flags. */
unsigned long flags;
};
/* Elements in the table are retrieved by indexing with values from
the operands field of the powerpc_opcodes table. */
extern const struct powerpc_operand powerpc_operands[];
extern const unsigned int num_powerpc_operands;
/* Values defined for the flags field of a struct powerpc_operand. */
/* This operand takes signed values. */
#define PPC_OPERAND_SIGNED (0x1)
/* This operand takes signed values, but also accepts a full positive
range of values when running in 32 bit mode. That is, if bits is
16, it takes any value from -0x8000 to 0xffff. In 64 bit mode,
this flag is ignored. */
#define PPC_OPERAND_SIGNOPT (0x2)
/* This operand does not actually exist in the assembler input. This
is used to support extended mnemonics such as mr, for which two
operands fields are identical. The assembler should call the
insert function with any op value. The disassembler should call
the extract function, ignore the return value, and check the value
placed in the valid argument. */
#define PPC_OPERAND_FAKE (0x4)
/* The next operand should be wrapped in parentheses rather than
separated from this one by a comma. This is used for the load and
store instructions which want their operands to look like
reg,displacement(reg)
*/
#define PPC_OPERAND_PARENS (0x8)
/* This operand may use the symbolic names for the CR fields, which
are
lt 0 gt 1 eq 2 so 3 un 3
cr0 0 cr1 1 cr2 2 cr3 3
cr4 4 cr5 5 cr6 6 cr7 7
These may be combined arithmetically, as in cr2*4+gt. These are
only supported on the PowerPC, not the POWER. */
#define PPC_OPERAND_CR (0x10)
/* This operand names a register. The disassembler uses this to print
register names with a leading 'r'. */
#define PPC_OPERAND_GPR (0x20)
/* Like PPC_OPERAND_GPR, but don't print a leading 'r' for r0. */
#define PPC_OPERAND_GPR_0 (0x40)
/* This operand names a floating point register. The disassembler
prints these with a leading 'f'. */
#define PPC_OPERAND_FPR (0x80)
/* This operand is a relative branch displacement. The disassembler
prints these symbolically if possible. */
#define PPC_OPERAND_RELATIVE (0x100)
/* This operand is an absolute branch address. The disassembler
prints these symbolically if possible. */
#define PPC_OPERAND_ABSOLUTE (0x200)
/* This operand is optional, and is zero if omitted. This is used for
example, in the optional BF field in the comparison instructions. The
assembler must count the number of operands remaining on the line,
and the number of operands remaining for the opcode, and decide
whether this operand is present or not. The disassembler should
print this operand out only if it is not zero. */
#define PPC_OPERAND_OPTIONAL (0x400)
/* This flag is only used with PPC_OPERAND_OPTIONAL. If this operand
is omitted, then for the next operand use this operand value plus
1, ignoring the next operand field for the opcode. This wretched
hack is needed because the Power rotate instructions can take
either 4 or 5 operands. The disassembler should print this operand
out regardless of the PPC_OPERAND_OPTIONAL field. */
#define PPC_OPERAND_NEXT (0x800)
/* This operand should be regarded as a negative number for the
purposes of overflow checking (i.e., the normal most negative
number is disallowed and one more than the normal most positive
number is allowed). This flag will only be set for a signed
operand. */
#define PPC_OPERAND_NEGATIVE (0x1000)
/* This operand names a vector unit register. The disassembler
prints these with a leading 'v'. */
#define PPC_OPERAND_VR (0x2000)
/* This operand is for the DS field in a DS form instruction. */
#define PPC_OPERAND_DS (0x4000)
/* This operand is for the DQ field in a DQ form instruction. */
#define PPC_OPERAND_DQ (0x8000)
/* Valid range of operand is 0..n rather than 0..n-1. */
#define PPC_OPERAND_PLUS1 (0x10000)
/* The POWER and PowerPC assemblers use a few macros. We keep them
with the operands table for simplicity. The macro table is an
array of struct powerpc_macro. */
struct powerpc_macro
{
/* The macro name. */
const char *name;
/* The number of operands the macro takes. */
unsigned int operands;
/* One bit flags for the opcode. These are used to indicate which
specific processors support the instructions. The values are the
same as those for the struct powerpc_opcode flags field. */
unsigned long flags;
/* A format string to turn the macro into a normal instruction.
Each %N in the string is replaced with operand number N (zero
based). */
const char *format;
};
extern const struct powerpc_macro powerpc_macros[];
extern const int powerpc_num_macros;
/* ppc-opc.c -- PowerPC opcode list
Copyright 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2005, 2006, 2007 Free Software Foundation, Inc.
Written by Ian Lance Taylor, Cygnus Support
This file is part of GDB, GAS, and the GNU binutils.
GDB, GAS, and the GNU binutils are free software; you can redistribute
them and/or modify them under the terms of the GNU General Public
License as published by the Free Software Foundation; either version
2, or (at your option) any later version.
GDB, GAS, and the GNU binutils are distributed in the hope that they
will be useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this file; see the file COPYING.
If not, see <http://www.gnu.org/licenses/>. */
/* This file holds the PowerPC opcode table. The opcode table
includes almost all of the extended instruction mnemonics. This
permits the disassembler to use them, and simplifies the assembler
logic, at the cost of increasing the table size. The table is
strictly constant data, so the compiler should be able to put it in
the .text section.
This file also holds the operand table. All knowledge about
inserting operands into instructions and vice-versa is kept in this
file. */
/* Local insertion and extraction functions. */
static unsigned long insert_bat (unsigned long, long, int, const char **);
static long extract_bat (unsigned long, int, int *);
static unsigned long insert_bba (unsigned long, long, int, const char **);
static long extract_bba (unsigned long, int, int *);
static unsigned long insert_bdm (unsigned long, long, int, const char **);
static long extract_bdm (unsigned long, int, int *);
static unsigned long insert_bdp (unsigned long, long, int, const char **);
static long extract_bdp (unsigned long, int, int *);
static unsigned long insert_bo (unsigned long, long, int, const char **);
static long extract_bo (unsigned long, int, int *);
static unsigned long insert_boe (unsigned long, long, int, const char **);
static long extract_boe (unsigned long, int, int *);
static unsigned long insert_fxm (unsigned long, long, int, const char **);
static long extract_fxm (unsigned long, int, int *);
static unsigned long insert_mbe (unsigned long, long, int, const char **);
static long extract_mbe (unsigned long, int, int *);
static unsigned long insert_mb6 (unsigned long, long, int, const char **);
static long extract_mb6 (unsigned long, int, int *);
static long extract_nb (unsigned long, int, int *);
static unsigned long insert_nsi (unsigned long, long, int, const char **);
static long extract_nsi (unsigned long, int, int *);
static unsigned long insert_ral (unsigned long, long, int, const char **);
static unsigned long insert_ram (unsigned long, long, int, const char **);
static unsigned long insert_raq (unsigned long, long, int, const char **);
static unsigned long insert_ras (unsigned long, long, int, const char **);
static unsigned long insert_rbs (unsigned long, long, int, const char **);
static long extract_rbs (unsigned long, int, int *);
static unsigned long insert_sh6 (unsigned long, long, int, const char **);
static long extract_sh6 (unsigned long, int, int *);
static unsigned long insert_spr (unsigned long, long, int, const char **);
static long extract_spr (unsigned long, int, int *);
static unsigned long insert_sprg (unsigned long, long, int, const char **);
static long extract_sprg (unsigned long, int, int *);
static unsigned long insert_tbr (unsigned long, long, int, const char **);
static long extract_tbr (unsigned long, int, int *);
/* The operands table.
The fields are bitm, shift, insert, extract, flags.
We used to put parens around the various additions, like the one
for BA just below. However, that caused trouble with feeble
compilers with a limit on depth of a parenthesized expression, like
(reportedly) the compiler in Microsoft Developer Studio 5. So we
omit the parens, since the macros are never used in a context where
the addition will be ambiguous. */
const struct powerpc_operand powerpc_operands[] =
{
/* The zero index is used to indicate the end of the list of
operands. */
#define UNUSED 0
{ 0, 0, NULL, NULL, 0 },
/* The BA field in an XL form instruction. */
#define BA UNUSED + 1
/* The BI field in a B form or XL form instruction. */
#define BI BA
#define BI_MASK (0x1f << 16)
{ 0x1f, 16, NULL, NULL, PPC_OPERAND_CR },
/* The BA field in an XL form instruction when it must be the same
as the BT field in the same instruction. */
#define BAT BA + 1
{ 0x1f, 16, insert_bat, extract_bat, PPC_OPERAND_FAKE },
/* The BB field in an XL form instruction. */
#define BB BAT + 1
#define BB_MASK (0x1f << 11)
{ 0x1f, 11, NULL, NULL, PPC_OPERAND_CR },
/* The BB field in an XL form instruction when it must be the same
as the BA field in the same instruction. */
#define BBA BB + 1
{ 0x1f, 11, insert_bba, extract_bba, PPC_OPERAND_FAKE },
/* The BD field in a B form instruction. The lower two bits are
forced to zero. */
#define BD BBA + 1
{ 0xfffc, 0, NULL, NULL, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED },
/* The BD field in a B form instruction when absolute addressing is
used. */
#define BDA BD + 1
{ 0xfffc, 0, NULL, NULL, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED },
/* The BD field in a B form instruction when the - modifier is used.
This sets the y bit of the BO field appropriately. */
#define BDM BDA + 1
{ 0xfffc, 0, insert_bdm, extract_bdm,
PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED },
/* The BD field in a B form instruction when the - modifier is used
and absolute address is used. */
#define BDMA BDM + 1
{ 0xfffc, 0, insert_bdm, extract_bdm,
PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED },
/* The BD field in a B form instruction when the + modifier is used.
This sets the y bit of the BO field appropriately. */
#define BDP BDMA + 1
{ 0xfffc, 0, insert_bdp, extract_bdp,
PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED },
/* The BD field in a B form instruction when the + modifier is used
and absolute addressing is used. */
#define BDPA BDP + 1
{ 0xfffc, 0, insert_bdp, extract_bdp,
PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED },
/* The BF field in an X or XL form instruction. */
#define BF BDPA + 1
/* The CRFD field in an X form instruction. */
#define CRFD BF
{ 0x7, 23, NULL, NULL, PPC_OPERAND_CR },
/* The BF field in an X or XL form instruction. */
#define BFF BF + 1
{ 0x7, 23, NULL, NULL, 0 },
/* An optional BF field. This is used for comparison instructions,
in which an omitted BF field is taken as zero. */
#define OBF BFF + 1
{ 0x7, 23, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
/* The BFA field in an X or XL form instruction. */
#define BFA OBF + 1
{ 0x7, 18, NULL, NULL, PPC_OPERAND_CR },
/* The BO field in a B form instruction. Certain values are
illegal. */
#define BO BFA + 1
#define BO_MASK (0x1f << 21)
{ 0x1f, 21, insert_bo, extract_bo, 0 },
/* The BO field in a B form instruction when the + or - modifier is
used. This is like the BO field, but it must be even. */
#define BOE BO + 1
{ 0x1e, 21, insert_boe, extract_boe, 0 },
#define BH BOE + 1
{ 0x3, 11, NULL, NULL, PPC_OPERAND_OPTIONAL },
/* The BT field in an X or XL form instruction. */
#define BT BH + 1
{ 0x1f, 21, NULL, NULL, PPC_OPERAND_CR },
/* The condition register number portion of the BI field in a B form
or XL form instruction. This is used for the extended
conditional branch mnemonics, which set the lower two bits of the
BI field. This field is optional. */
#define CR BT + 1
{ 0x7, 18, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
/* The CRB field in an X form instruction. */
#define CRB CR + 1
/* The MB field in an M form instruction. */
#define MB CRB
#define MB_MASK (0x1f << 6)
{ 0x1f, 6, NULL, NULL, 0 },
/* The CRFS field in an X form instruction. */
#define CRFS CRB + 1
{ 0x7, 0, NULL, NULL, PPC_OPERAND_CR },
/* The CT field in an X form instruction. */
#define CT CRFS + 1
/* The MO field in an mbar instruction. */
#define MO CT
{ 0x1f, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
/* The D field in a D form instruction. This is a displacement off
a register, and implies that the next operand is a register in
parentheses. */
#define D CT + 1
{ 0xffff, 0, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED },
/* The DE field in a DE form instruction. This is like D, but is 12
bits only. */
#define DE D + 1
{ 0xfff, 4, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED },
/* The DES field in a DES form instruction. This is like DS, but is 14
bits only (12 stored.) */
#define DES DE + 1
{ 0x3ffc, 2, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED },
/* The DQ field in a DQ form instruction. This is like D, but the
lower four bits are forced to zero. */
#define DQ DES + 1
{ 0xfff0, 0, NULL, NULL,
PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED | PPC_OPERAND_DQ },
/* The DS field in a DS form instruction. This is like D, but the
lower two bits are forced to zero. */
#undef DS
#define DS DQ + 1
{ 0xfffc, 0, NULL, NULL,
PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED | PPC_OPERAND_DS },
/* The E field in a wrteei instruction. */
#define E DS + 1
{ 0x1, 15, NULL, NULL, 0 },
/* The FL1 field in a POWER SC form instruction. */
#define FL1 E + 1
/* The U field in an X form instruction. */
#define U FL1
{ 0xf, 12, NULL, NULL, 0 },
/* The FL2 field in a POWER SC form instruction. */
#define FL2 FL1 + 1
{ 0x7, 2, NULL, NULL, 0 },
/* The FLM field in an XFL form instruction. */
#define FLM FL2 + 1
{ 0xff, 17, NULL, NULL, 0 },
/* The FRA field in an X or A form instruction. */
#define FRA FLM + 1
#define FRA_MASK (0x1f << 16)
{ 0x1f, 16, NULL, NULL, PPC_OPERAND_FPR },
/* The FRB field in an X or A form instruction. */
#define FRB FRA + 1
#define FRB_MASK (0x1f << 11)
{ 0x1f, 11, NULL, NULL, PPC_OPERAND_FPR },
/* The FRC field in an A form instruction. */
#define FRC FRB + 1
#define FRC_MASK (0x1f << 6)
{ 0x1f, 6, NULL, NULL, PPC_OPERAND_FPR },
/* The FRS field in an X form instruction or the FRT field in a D, X
or A form instruction. */
#define FRS FRC + 1
#define FRT FRS
{ 0x1f, 21, NULL, NULL, PPC_OPERAND_FPR },
/* The FXM field in an XFX instruction. */
#define FXM FRS + 1
{ 0xff, 12, insert_fxm, extract_fxm, 0 },
/* Power4 version for mfcr. */
#define FXM4 FXM + 1
{ 0xff, 12, insert_fxm, extract_fxm, PPC_OPERAND_OPTIONAL },
/* The L field in a D or X form instruction. */
#define L FXM4 + 1
{ 0x1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
/* The LEV field in a POWER SVC form instruction. */
#define SVC_LEV L + 1
{ 0x7f, 5, NULL, NULL, 0 },
/* The LEV field in an SC form instruction. */
#define LEV SVC_LEV + 1
{ 0x7f, 5, NULL, NULL, PPC_OPERAND_OPTIONAL },
/* The LI field in an I form instruction. The lower two bits are
forced to zero. */
#define LI LEV + 1
{ 0x3fffffc, 0, NULL, NULL, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED },
/* The LI field in an I form instruction when used as an absolute
address. */
#define LIA LI + 1
{ 0x3fffffc, 0, NULL, NULL, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED },
/* The LS field in an X (sync) form instruction. */
#define LS LIA + 1
{ 0x3, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
/* The ME field in an M form instruction. */
#define ME LS + 1
#define ME_MASK (0x1f << 1)
{ 0x1f, 1, NULL, NULL, 0 },
/* The MB and ME fields in an M form instruction expressed a single
operand which is a bitmask indicating which bits to select. This
is a two operand form using PPC_OPERAND_NEXT. See the
description in opcode/ppc.h for what this means. */
#define MBE ME + 1
{ 0x1f, 6, NULL, NULL, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT },
{ -1, 0, insert_mbe, extract_mbe, 0 },
/* The MB or ME field in an MD or MDS form instruction. The high
bit is wrapped to the low end. */
#define MB6 MBE + 2
#define ME6 MB6
#define MB6_MASK (0x3f << 5)
{ 0x3f, 5, insert_mb6, extract_mb6, 0 },
/* The NB field in an X form instruction. The value 32 is stored as
0. */
#define NB MB6 + 1
{ 0x1f, 11, NULL, extract_nb, PPC_OPERAND_PLUS1 },
/* The NSI field in a D form instruction. This is the same as the
SI field, only negated. */
#define NSI NB + 1
{ 0xffff, 0, insert_nsi, extract_nsi,
PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED },
/* The RA field in an D, DS, DQ, X, XO, M, or MDS form instruction. */
#define RA NSI + 1
#define RA_MASK (0x1f << 16)
{ 0x1f, 16, NULL, NULL, PPC_OPERAND_GPR },
/* As above, but 0 in the RA field means zero, not r0. */
#define RA0 RA + 1
{ 0x1f, 16, NULL, NULL, PPC_OPERAND_GPR_0 },
/* The RA field in the DQ form lq instruction, which has special
value restrictions. */
#define RAQ RA0 + 1
{ 0x1f, 16, insert_raq, NULL, PPC_OPERAND_GPR_0 },
/* The RA field in a D or X form instruction which is an updating
load, which means that the RA field may not be zero and may not
equal the RT field. */
#define RAL RAQ + 1
{ 0x1f, 16, insert_ral, NULL, PPC_OPERAND_GPR_0 },
/* The RA field in an lmw instruction, which has special value
restrictions. */
#define RAM RAL + 1
{ 0x1f, 16, insert_ram, NULL, PPC_OPERAND_GPR_0 },
/* The RA field in a D or X form instruction which is an updating
store or an updating floating point load, which means that the RA
field may not be zero. */
#define RAS RAM + 1
{ 0x1f, 16, insert_ras, NULL, PPC_OPERAND_GPR_0 },
/* The RA field of the tlbwe instruction, which is optional. */
#define RAOPT RAS + 1
{ 0x1f, 16, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL },
/* The RB field in an X, XO, M, or MDS form instruction. */
#define RB RAOPT + 1
#define RB_MASK (0x1f << 11)
{ 0x1f, 11, NULL, NULL, PPC_OPERAND_GPR },
/* The RB field in an X form instruction when it must be the same as
the RS field in the instruction. This is used for extended
mnemonics like mr. */
#define RBS RB + 1
{ 0x1f, 11, insert_rbs, extract_rbs, PPC_OPERAND_FAKE },
/* The RS field in a D, DS, X, XFX, XS, M, MD or MDS form
instruction or the RT field in a D, DS, X, XFX or XO form
instruction. */
#define RS RBS + 1
#define RT RS
#define RT_MASK (0x1f << 21)
{ 0x1f, 21, NULL, NULL, PPC_OPERAND_GPR },
/* The RS and RT fields of the DS form stq instruction, which have
special value restrictions. */
#define RSQ RS + 1
#define RTQ RSQ
{ 0x1e, 21, NULL, NULL, PPC_OPERAND_GPR_0 },
/* The RS field of the tlbwe instruction, which is optional. */
#define RSO RSQ + 1
#define RTO RSO
{ 0x1f, 21, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL },
/* The SH field in an X or M form instruction. */
#define SH RSO + 1
#define SH_MASK (0x1f << 11)
/* The other UIMM field in a EVX form instruction. */
#define EVUIMM SH
{ 0x1f, 11, NULL, NULL, 0 },
/* The SH field in an MD form instruction. This is split. */
#define SH6 SH + 1
#define SH6_MASK ((0x1f << 11) | (1 << 1))
{ 0x3f, -1, insert_sh6, extract_sh6, 0 },
/* The SH field of the tlbwe instruction, which is optional. */
#define SHO SH6 + 1
{ 0x1f, 11, NULL, NULL, PPC_OPERAND_OPTIONAL },
/* The SI field in a D form instruction. */
#define SI SHO + 1
{ 0xffff, 0, NULL, NULL, PPC_OPERAND_SIGNED },
/* The SI field in a D form instruction when we accept a wide range
of positive values. */
#define SISIGNOPT SI + 1
{ 0xffff, 0, NULL, NULL, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT },
/* The SPR field in an XFX form instruction. This is flipped--the
lower 5 bits are stored in the upper 5 and vice- versa. */
#define SPR SISIGNOPT + 1
#define PMR SPR
#define SPR_MASK (0x3ff << 11)
{ 0x3ff, 11, insert_spr, extract_spr, 0 },
/* The BAT index number in an XFX form m[ft]ibat[lu] instruction. */
#define SPRBAT SPR + 1
#define SPRBAT_MASK (0x3 << 17)
{ 0x3, 17, NULL, NULL, 0 },
/* The SPRG register number in an XFX form m[ft]sprg instruction. */
#define SPRG SPRBAT + 1
{ 0x1f, 16, insert_sprg, extract_sprg, 0 },
/* The SR field in an X form instruction. */
#define SR SPRG + 1
{ 0xf, 16, NULL, NULL, 0 },
/* The STRM field in an X AltiVec form instruction. */
#define STRM SR + 1
{ 0x3, 21, NULL, NULL, 0 },
/* The SV field in a POWER SC form instruction. */
#define SV STRM + 1
{ 0x3fff, 2, NULL, NULL, 0 },
/* The TBR field in an XFX form instruction. This is like the SPR
field, but it is optional. */
#define TBR SV + 1
{ 0x3ff, 11, insert_tbr, extract_tbr, PPC_OPERAND_OPTIONAL },
/* The TO field in a D or X form instruction. */
#define TO TBR + 1
#define TO_MASK (0x1f << 21)
{ 0x1f, 21, NULL, NULL, 0 },
/* The UI field in a D form instruction. */
#define UI TO + 1
{ 0xffff, 0, NULL, NULL, 0 },
/* The VA field in a VA, VX or VXR form instruction. */
#define VA UI + 1
{ 0x1f, 16, NULL, NULL, PPC_OPERAND_VR },
/* The VB field in a VA, VX or VXR form instruction. */
#define VB VA + 1
{ 0x1f, 11, NULL, NULL, PPC_OPERAND_VR },
/* The VC field in a VA form instruction. */
#define VC VB + 1
{ 0x1f, 6, NULL, NULL, PPC_OPERAND_VR },
/* The VD or VS field in a VA, VX, VXR or X form instruction. */
#define VD VC + 1
#define VS VD
{ 0x1f, 21, NULL, NULL, PPC_OPERAND_VR },
/* The SIMM field in a VX form instruction. */
#define SIMM VD + 1
{ 0x1f, 16, NULL, NULL, PPC_OPERAND_SIGNED},
/* The UIMM field in a VX form instruction, and TE in Z form. */
#define UIMM SIMM + 1
#define TE UIMM
{ 0x1f, 16, NULL, NULL, 0 },
/* The SHB field in a VA form instruction. */
#define SHB UIMM + 1
{ 0xf, 6, NULL, NULL, 0 },
/* The other UIMM field in a half word EVX form instruction. */
#define EVUIMM_2 SHB + 1
{ 0x3e, 10, NULL, NULL, PPC_OPERAND_PARENS },
/* The other UIMM field in a word EVX form instruction. */
#define EVUIMM_4 EVUIMM_2 + 1
{ 0x7c, 9, NULL, NULL, PPC_OPERAND_PARENS },
/* The other UIMM field in a double EVX form instruction. */
#define EVUIMM_8 EVUIMM_4 + 1
{ 0xf8, 8, NULL, NULL, PPC_OPERAND_PARENS },
/* The WS field. */
#define WS EVUIMM_8 + 1
{ 0x7, 11, NULL, NULL, 0 },
/* The L field in an mtmsrd or A form instruction or W in an X form. */
#define A_L WS + 1
#define W A_L
{ 0x1, 16, NULL, NULL, PPC_OPERAND_OPTIONAL },
#define RMC A_L + 1
{ 0x3, 9, NULL, NULL, 0 },
#define R RMC + 1
{ 0x1, 16, NULL, NULL, 0 },
#define SP R + 1
{ 0x3, 19, NULL, NULL, 0 },
#define S SP + 1
{ 0x1, 20, NULL, NULL, 0 },
/* SH field starting at bit position 16. */
#define SH16 S + 1
/* The DCM and DGM fields in a Z form instruction. */
#define DCM SH16
#define DGM DCM
{ 0x3f, 10, NULL, NULL, 0 },
/* The EH field in larx instruction. */
#define EH SH16 + 1
{ 0x1, 0, NULL, NULL, PPC_OPERAND_OPTIONAL },
/* The L field in an mtfsf or XFL form instruction. */
#define XFL_L EH + 1
{ 0x1, 25, NULL, NULL, PPC_OPERAND_OPTIONAL},
};
const unsigned int num_powerpc_operands = (sizeof (powerpc_operands)
/ sizeof (powerpc_operands[0]));
/* The functions used to insert and extract complicated operands. */
/* The BA field in an XL form instruction when it must be the same as
the BT field in the same instruction. This operand is marked FAKE.
The insertion function just copies the BT field into the BA field,
and the extraction function just checks that the fields are the
same. */
static unsigned long
insert_bat (unsigned long insn,
long value ATTRIBUTE_UNUSED,
int dialect ATTRIBUTE_UNUSED,
const char **errmsg ATTRIBUTE_UNUSED)
{
return insn | (((insn >> 21) & 0x1f) << 16);
}
static long
extract_bat (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
int *invalid)
{
if (((insn >> 21) & 0x1f) != ((insn >> 16) & 0x1f))
*invalid = 1;
return 0;
}
/* The BB field in an XL form instruction when it must be the same as
the BA field in the same instruction. This operand is marked FAKE.
The insertion function just copies the BA field into the BB field,
and the extraction function just checks that the fields are the
same. */
static unsigned long
insert_bba (unsigned long insn,
long value ATTRIBUTE_UNUSED,
int dialect ATTRIBUTE_UNUSED,
const char **errmsg ATTRIBUTE_UNUSED)
{
return insn | (((insn >> 16) & 0x1f) << 11);
}
static long
extract_bba (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
int *invalid)
{
if (((insn >> 16) & 0x1f) != ((insn >> 11) & 0x1f))
*invalid = 1;
return 0;
}
/* The BD field in a B form instruction when the - modifier is used.
This modifier means that the branch is not expected to be taken.
For chips built to versions of the architecture prior to version 2
(ie. not Power4 compatible), we set the y bit of the BO field to 1
if the offset is negative. When extracting, we require that the y
bit be 1 and that the offset be positive, since if the y bit is 0
we just want to print the normal form of the instruction.
Power4 compatible targets use two bits, "a", and "t", instead of
the "y" bit. "at" == 00 => no hint, "at" == 01 => unpredictable,
"at" == 10 => not taken, "at" == 11 => taken. The "t" bit is 00001
in BO field, the "a" bit is 00010 for branch on CR(BI) and 01000
for branch on CTR. We only handle the taken/not-taken hint here.
Note that we don't relax the conditions tested here when
disassembling with -Many because insns using extract_bdm and
extract_bdp always occur in pairs. One or the other will always
be valid. */
static unsigned long
insert_bdm (unsigned long insn,
long value,
int dialect,
const char **errmsg ATTRIBUTE_UNUSED)
{
if ((dialect & PPC_OPCODE_POWER4) == 0)
{
if ((value & 0x8000) != 0)
insn |= 1 << 21;
}
else
{
if ((insn & (0x14 << 21)) == (0x04 << 21))
insn |= 0x02 << 21;
else if ((insn & (0x14 << 21)) == (0x10 << 21))
insn |= 0x08 << 21;
}
return insn | (value & 0xfffc);
}
static long
extract_bdm (unsigned long insn,
int dialect,
int *invalid)
{
if ((dialect & PPC_OPCODE_POWER4) == 0)
{
if (((insn & (1 << 21)) == 0) != ((insn & (1 << 15)) == 0))
*invalid = 1;
}
else
{
if ((insn & (0x17 << 21)) != (0x06 << 21)
&& (insn & (0x1d << 21)) != (0x18 << 21))
*invalid = 1;
}
return ((insn & 0xfffc) ^ 0x8000) - 0x8000;
}
/* The BD field in a B form instruction when the + modifier is used.
This is like BDM, above, except that the branch is expected to be
taken. */
static unsigned long
insert_bdp (unsigned long insn,
long value,
int dialect,
const char **errmsg ATTRIBUTE_UNUSED)
{
if ((dialect & PPC_OPCODE_POWER4) == 0)
{
if ((value & 0x8000) == 0)
insn |= 1 << 21;
}
else
{
if ((insn & (0x14 << 21)) == (0x04 << 21))
insn |= 0x03 << 21;
else if ((insn & (0x14 << 21)) == (0x10 << 21))
insn |= 0x09 << 21;
}
return insn | (value & 0xfffc);
}
static long
extract_bdp (unsigned long insn,
int dialect,
int *invalid)
{
if ((dialect & PPC_OPCODE_POWER4) == 0)
{
if (((insn & (1 << 21)) == 0) == ((insn & (1 << 15)) == 0))
*invalid = 1;
}
else
{
if ((insn & (0x17 << 21)) != (0x07 << 21)
&& (insn & (0x1d << 21)) != (0x19 << 21))
*invalid = 1;
}
return ((insn & 0xfffc) ^ 0x8000) - 0x8000;
}
/* Check for legal values of a BO field. */
static int
valid_bo (long value, int dialect, int extract)
{
if ((dialect & PPC_OPCODE_POWER4) == 0)
{
int valid;
/* Certain encodings have bits that are required to be zero.
These are (z must be zero, y may be anything):
001zy
011zy
1z00y
1z01y
1z1zz
*/
switch (value & 0x14)
{
default:
case 0:
valid = 1;
break;
case 0x4:
valid = (value & 0x2) == 0;
break;
case 0x10:
valid = (value & 0x8) == 0;
break;
case 0x14:
valid = value == 0x14;
break;
}
/* When disassembling with -Many, accept power4 encodings too. */
if (valid
|| (dialect & PPC_OPCODE_ANY) == 0
|| !extract)
return valid;
}
/* Certain encodings have bits that are required to be zero.
These are (z must be zero, a & t may be anything):
0000z
0001z
0100z
0101z
001at
011at
1a00t
1a01t
1z1zz
*/
if ((value & 0x14) == 0)
return (value & 0x1) == 0;
else if ((value & 0x14) == 0x14)
return value == 0x14;
else
return 1;
}
/* The BO field in a B form instruction. Warn about attempts to set
the field to an illegal value. */
static unsigned long
insert_bo (unsigned long insn,
long value,
int dialect,
const char **errmsg)
{
if (!valid_bo (value, dialect, 0))
*errmsg = _("invalid conditional option");
return insn | ((value & 0x1f) << 21);
}
static long
extract_bo (unsigned long insn,
int dialect,
int *invalid)
{
long value;
value = (insn >> 21) & 0x1f;
if (!valid_bo (value, dialect, 1))
*invalid = 1;
return value;
}
/* The BO field in a B form instruction when the + or - modifier is
used. This is like the BO field, but it must be even. When
extracting it, we force it to be even. */
static unsigned long
insert_boe (unsigned long insn,
long value,
int dialect,
const char **errmsg)
{
if (!valid_bo (value, dialect, 0))
*errmsg = _("invalid conditional option");
else if ((value & 1) != 0)
*errmsg = _("attempt to set y bit when using + or - modifier");
return insn | ((value & 0x1f) << 21);
}
static long
extract_boe (unsigned long insn,
int dialect,
int *invalid)
{
long value;
value = (insn >> 21) & 0x1f;
if (!valid_bo (value, dialect, 1))
*invalid = 1;
return value & 0x1e;
}
/* FXM mask in mfcr and mtcrf instructions. */
static unsigned long
insert_fxm (unsigned long insn,
long value,
int dialect,
const char **errmsg)
{
/* If we're handling the mfocrf and mtocrf insns ensure that exactly
one bit of the mask field is set. */
if ((insn & (1 << 20)) != 0)
{
if (value == 0 || (value & -value) != value)
{
*errmsg = _("invalid mask field");
value = 0;
}
}
/* If the optional field on mfcr is missing that means we want to use
the old form of the instruction that moves the whole cr. In that
case we'll have VALUE zero. There doesn't seem to be a way to
distinguish this from the case where someone writes mfcr %r3,0. */
else if (value == 0)
;
/* If only one bit of the FXM field is set, we can use the new form
of the instruction, which is faster. Unlike the Power4 branch hint
encoding, this is not backward compatible. Do not generate the
new form unless -mpower4 has been given, or -many and the two
operand form of mfcr was used. */
else if ((value & -value) == value
&& ((dialect & PPC_OPCODE_POWER4) != 0
|| ((dialect & PPC_OPCODE_ANY) != 0
&& (insn & (0x3ff << 1)) == 19 << 1)))
insn |= 1 << 20;
/* Any other value on mfcr is an error. */
else if ((insn & (0x3ff << 1)) == 19 << 1)
{
*errmsg = _("ignoring invalid mfcr mask");
value = 0;
}
return insn | ((value & 0xff) << 12);
}
static long
extract_fxm (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
int *invalid)
{
long mask = (insn >> 12) & 0xff;
/* Is this a Power4 insn? */
if ((insn & (1 << 20)) != 0)
{
/* Exactly one bit of MASK should be set. */
if (mask == 0 || (mask & -mask) != mask)
*invalid = 1;
}
/* Check that non-power4 form of mfcr has a zero MASK. */
else if ((insn & (0x3ff << 1)) == 19 << 1)
{
if (mask != 0)
*invalid = 1;
}
return mask;
}
/* The MB and ME fields in an M form instruction expressed as a single
operand which is itself a bitmask. The extraction function always
marks it as invalid, since we never want to recognize an
instruction which uses a field of this type. */
static unsigned long
insert_mbe (unsigned long insn,
long value,
int dialect ATTRIBUTE_UNUSED,
const char **errmsg)
{
unsigned long uval, mask;
int mb, me, mx, count, last;
uval = value;
if (uval == 0)
{
*errmsg = _("illegal bitmask");
return insn;
}
mb = 0;
me = 32;
if ((uval & 1) != 0)
last = 1;
else
last = 0;
count = 0;
/* mb: location of last 0->1 transition */
/* me: location of last 1->0 transition */
/* count: # transitions */
for (mx = 0, mask = 1L << 31; mx < 32; ++mx, mask >>= 1)
{
if ((uval & mask) && !last)
{
++count;
mb = mx;
last = 1;
}
else if (!(uval & mask) && last)
{
++count;
me = mx;
last = 0;
}
}
if (me == 0)
me = 32;
if (count != 2 && (count != 0 || ! last))
*errmsg = _("illegal bitmask");
return insn | (mb << 6) | ((me - 1) << 1);
}
static long
extract_mbe (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
int *invalid)
{
long ret;
int mb, me;
int i;
*invalid = 1;
mb = (insn >> 6) & 0x1f;
me = (insn >> 1) & 0x1f;
if (mb < me + 1)
{
ret = 0;
for (i = mb; i <= me; i++)
ret |= 1L << (31 - i);
}
else if (mb == me + 1)
ret = ~0;
else /* (mb > me + 1) */
{
ret = ~0;
for (i = me + 1; i < mb; i++)
ret &= ~(1L << (31 - i));
}
return ret;
}
/* The MB or ME field in an MD or MDS form instruction. The high bit
is wrapped to the low end. */
static unsigned long
insert_mb6 (unsigned long insn,
long value,
int dialect ATTRIBUTE_UNUSED,
const char **errmsg ATTRIBUTE_UNUSED)
{
return insn | ((value & 0x1f) << 6) | (value & 0x20);
}
static long
extract_mb6 (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
int *invalid ATTRIBUTE_UNUSED)
{
return ((insn >> 6) & 0x1f) | (insn & 0x20);
}
/* The NB field in an X form instruction. The value 32 is stored as
0. */
static long
extract_nb (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
int *invalid ATTRIBUTE_UNUSED)
{
long ret;
ret = (insn >> 11) & 0x1f;
if (ret == 0)
ret = 32;
return ret;
}
/* The NSI field in a D form instruction. This is the same as the SI
field, only negated. The extraction function always marks it as
invalid, since we never want to recognize an instruction which uses
a field of this type. */
static unsigned long
insert_nsi (unsigned long insn,
long value,
int dialect ATTRIBUTE_UNUSED,
const char **errmsg ATTRIBUTE_UNUSED)
{
return insn | (-value & 0xffff);
}
static long
extract_nsi (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
int *invalid)
{
*invalid = 1;
return -(((insn & 0xffff) ^ 0x8000) - 0x8000);
}
/* The RA field in a D or X form instruction which is an updating
load, which means that the RA field may not be zero and may not
equal the RT field. */
static unsigned long
insert_ral (unsigned long insn,
long value,
int dialect ATTRIBUTE_UNUSED,
const char **errmsg)
{
if (value == 0
|| (unsigned long) value == ((insn >> 21) & 0x1f))
*errmsg = "invalid register operand when updating";
return insn | ((value & 0x1f) << 16);
}
/* The RA field in an lmw instruction, which has special value
restrictions. */
static unsigned long
insert_ram (unsigned long insn,
long value,
int dialect ATTRIBUTE_UNUSED,
const char **errmsg)
{
if ((unsigned long) value >= ((insn >> 21) & 0x1f))
*errmsg = _("index register in load range");
return insn | ((value & 0x1f) << 16);
}
/* The RA field in the DQ form lq instruction, which has special
value restrictions. */
static unsigned long
insert_raq (unsigned long insn,
long value,
int dialect ATTRIBUTE_UNUSED,
const char **errmsg)
{
long rtvalue = (insn & RT_MASK) >> 21;
if (value == rtvalue)
*errmsg = _("source and target register operands must be different");
return insn | ((value & 0x1f) << 16);
}
/* The RA field in a D or X form instruction which is an updating
store or an updating floating point load, which means that the RA
field may not be zero. */
static unsigned long
insert_ras (unsigned long insn,
long value,
int dialect ATTRIBUTE_UNUSED,
const char **errmsg)
{
if (value == 0)
*errmsg = _("invalid register operand when updating");
return insn | ((value & 0x1f) << 16);
}
/* The RB field in an X form instruction when it must be the same as
the RS field in the instruction. This is used for extended
mnemonics like mr. This operand is marked FAKE. The insertion
function just copies the BT field into the BA field, and the
extraction function just checks that the fields are the same. */
static unsigned long
insert_rbs (unsigned long insn,
long value ATTRIBUTE_UNUSED,
int dialect ATTRIBUTE_UNUSED,
const char **errmsg ATTRIBUTE_UNUSED)
{
return insn | (((insn >> 21) & 0x1f) << 11);
}
static long
extract_rbs (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
int *invalid)
{
if (((insn >> 21) & 0x1f) != ((insn >> 11) & 0x1f))
*invalid = 1;
return 0;
}
/* The SH field in an MD form instruction. This is split. */
static unsigned long
insert_sh6 (unsigned long insn,
long value,
int dialect ATTRIBUTE_UNUSED,
const char **errmsg ATTRIBUTE_UNUSED)
{
return insn | ((value & 0x1f) << 11) | ((value & 0x20) >> 4);
}
static long
extract_sh6 (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
int *invalid ATTRIBUTE_UNUSED)
{
return ((insn >> 11) & 0x1f) | ((insn << 4) & 0x20);
}
/* The SPR field in an XFX form instruction. This is flipped--the
lower 5 bits are stored in the upper 5 and vice- versa. */
static unsigned long
insert_spr (unsigned long insn,
long value,
int dialect ATTRIBUTE_UNUSED,
const char **errmsg ATTRIBUTE_UNUSED)
{
return insn | ((value & 0x1f) << 16) | ((value & 0x3e0) << 6);
}
static long
extract_spr (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
int *invalid ATTRIBUTE_UNUSED)
{
return ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0);
}
/* Some dialects have 8 SPRG registers instead of the standard 4. */
static unsigned long
insert_sprg (unsigned long insn,
long value,
int dialect,
const char **errmsg)
{
/* This check uses PPC_OPCODE_403 because PPC405 is later defined
as a synonym. If ever a 405 specific dialect is added this
check should use that instead. */
if (value > 7
|| (value > 3
&& (dialect & (PPC_OPCODE_BOOKE | PPC_OPCODE_403)) == 0))
*errmsg = _("invalid sprg number");
/* If this is mfsprg4..7 then use spr 260..263 which can be read in
user mode. Anything else must use spr 272..279. */
if (value <= 3 || (insn & 0x100) != 0)
value |= 0x10;
return insn | ((value & 0x17) << 16);
}
static long
extract_sprg (unsigned long insn,
int dialect,
int *invalid)
{
unsigned long val = (insn >> 16) & 0x1f;
/* mfsprg can use 260..263 and 272..279. mtsprg only uses spr 272..279
If not BOOKE or 405, then both use only 272..275. */
if (val <= 3
|| (val < 0x10 && (insn & 0x100) != 0)
|| (val - 0x10 > 3
&& (dialect & (PPC_OPCODE_BOOKE | PPC_OPCODE_403)) == 0))
*invalid = 1;
return val & 7;
}
/* The TBR field in an XFX instruction. This is just like SPR, but it
is optional. When TBR is omitted, it must be inserted as 268 (the
magic number of the TB register). These functions treat 0
(indicating an omitted optional operand) as 268. This means that
``mftb 4,0'' is not handled correctly. This does not matter very
much, since the architecture manual does not define mftb as
accepting any values other than 268 or 269. */
#define TB (268)
static unsigned long
insert_tbr (unsigned long insn,
long value,
int dialect ATTRIBUTE_UNUSED,
const char **errmsg ATTRIBUTE_UNUSED)
{
if (value == 0)
value = TB;
return insn | ((value & 0x1f) << 16) | ((value & 0x3e0) << 6);
}
static long
extract_tbr (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
int *invalid ATTRIBUTE_UNUSED)
{
long ret;
ret = ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0);
if (ret == TB)
ret = 0;
return ret;
}
/* Macros used to form opcodes. */
/* The main opcode. */
#define OP(x) ((((unsigned long)(x)) & 0x3f) << 26)
#define OP_MASK OP (0x3f)
/* The main opcode combined with a trap code in the TO field of a D
form instruction. Used for extended mnemonics for the trap
instructions. */
#define OPTO(x,to) (OP (x) | ((((unsigned long)(to)) & 0x1f) << 21))
#define OPTO_MASK (OP_MASK | TO_MASK)
/* The main opcode combined with a comparison size bit in the L field
of a D form or X form instruction. Used for extended mnemonics for
the comparison instructions. */
#define OPL(x,l) (OP (x) | ((((unsigned long)(l)) & 1) << 21))
#define OPL_MASK OPL (0x3f,1)
/* An A form instruction. */
#define A(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1) | (((unsigned long)(rc)) & 1))
#define A_MASK A (0x3f, 0x1f, 1)
/* An A_MASK with the FRB field fixed. */
#define AFRB_MASK (A_MASK | FRB_MASK)
/* An A_MASK with the FRC field fixed. */
#define AFRC_MASK (A_MASK | FRC_MASK)
/* An A_MASK with the FRA and FRC fields fixed. */
#define AFRAFRC_MASK (A_MASK | FRA_MASK | FRC_MASK)
/* An AFRAFRC_MASK, but with L bit clear. */
#define AFRALFRC_MASK (AFRAFRC_MASK & ~((unsigned long) 1 << 16))
/* A B form instruction. */
#define B(op, aa, lk) (OP (op) | ((((unsigned long)(aa)) & 1) << 1) | ((lk) & 1))
#define B_MASK B (0x3f, 1, 1)
/* A B form instruction setting the BO field. */
#define BBO(op, bo, aa, lk) (B ((op), (aa), (lk)) | ((((unsigned long)(bo)) & 0x1f) << 21))
#define BBO_MASK BBO (0x3f, 0x1f, 1, 1)
/* A BBO_MASK with the y bit of the BO field removed. This permits
matching a conditional branch regardless of the setting of the y
bit. Similarly for the 'at' bits used for power4 branch hints. */
#define Y_MASK (((unsigned long) 1) << 21)
#define AT1_MASK (((unsigned long) 3) << 21)
#define AT2_MASK (((unsigned long) 9) << 21)
#define BBOY_MASK (BBO_MASK &~ Y_MASK)
#define BBOAT_MASK (BBO_MASK &~ AT1_MASK)
/* A B form instruction setting the BO field and the condition bits of
the BI field. */
#define BBOCB(op, bo, cb, aa, lk) \
(BBO ((op), (bo), (aa), (lk)) | ((((unsigned long)(cb)) & 0x3) << 16))
#define BBOCB_MASK BBOCB (0x3f, 0x1f, 0x3, 1, 1)
/* A BBOCB_MASK with the y bit of the BO field removed. */
#define BBOYCB_MASK (BBOCB_MASK &~ Y_MASK)
#define BBOATCB_MASK (BBOCB_MASK &~ AT1_MASK)
#define BBOAT2CB_MASK (BBOCB_MASK &~ AT2_MASK)
/* A BBOYCB_MASK in which the BI field is fixed. */
#define BBOYBI_MASK (BBOYCB_MASK | BI_MASK)
#define BBOATBI_MASK (BBOAT2CB_MASK | BI_MASK)
/* An Context form instruction. */
#define CTX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x7))
#define CTX_MASK CTX(0x3f, 0x7)
/* An User Context form instruction. */
#define UCTX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f))
#define UCTX_MASK UCTX(0x3f, 0x1f)
/* The main opcode mask with the RA field clear. */
#define DRA_MASK (OP_MASK | RA_MASK)
/* A DS form instruction. */
#define DSO(op, xop) (OP (op) | ((xop) & 0x3))
#define DS_MASK DSO (0x3f, 3)
/* A DE form instruction. */
#define DEO(op, xop) (OP (op) | ((xop) & 0xf))
#define DE_MASK DEO (0x3e, 0xf)
/* An EVSEL form instruction. */
#define EVSEL(op, xop) (OP (op) | (((unsigned long)(xop)) & 0xff) << 3)
#define EVSEL_MASK EVSEL(0x3f, 0xff)
/* An M form instruction. */
#define M(op, rc) (OP (op) | ((rc) & 1))
#define M_MASK M (0x3f, 1)
/* An M form instruction with the ME field specified. */
#define MME(op, me, rc) (M ((op), (rc)) | ((((unsigned long)(me)) & 0x1f) << 1))
/* An M_MASK with the MB and ME fields fixed. */
#define MMBME_MASK (M_MASK | MB_MASK | ME_MASK)
/* An M_MASK with the SH and ME fields fixed. */
#define MSHME_MASK (M_MASK | SH_MASK | ME_MASK)
/* An MD form instruction. */
#define MD(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x7) << 2) | ((rc) & 1))
#define MD_MASK MD (0x3f, 0x7, 1)
/* An MD_MASK with the MB field fixed. */
#define MDMB_MASK (MD_MASK | MB6_MASK)
/* An MD_MASK with the SH field fixed. */
#define MDSH_MASK (MD_MASK | SH6_MASK)
/* An MDS form instruction. */
#define MDS(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0xf) << 1) | ((rc) & 1))
#define MDS_MASK MDS (0x3f, 0xf, 1)
/* An MDS_MASK with the MB field fixed. */
#define MDSMB_MASK (MDS_MASK | MB6_MASK)
/* An SC form instruction. */
#define SC(op, sa, lk) (OP (op) | ((((unsigned long)(sa)) & 1) << 1) | ((lk) & 1))
#define SC_MASK (OP_MASK | (((unsigned long)0x3ff) << 16) | (((unsigned long)1) << 1) | 1)
/* An VX form instruction. */
#define VX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x7ff))
/* The mask for an VX form instruction. */
#define VX_MASK VX(0x3f, 0x7ff)
/* An VA form instruction. */
#define VXA(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x03f))
/* The mask for an VA form instruction. */
#define VXA_MASK VXA(0x3f, 0x3f)
/* An VXR form instruction. */
#define VXR(op, xop, rc) (OP (op) | (((rc) & 1) << 10) | (((unsigned long)(xop)) & 0x3ff))
/* The mask for a VXR form instruction. */
#define VXR_MASK VXR(0x3f, 0x3ff, 1)
/* An X form instruction. */
#define X(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1))
/* A Z form instruction. */
#define Z(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 1))
/* An X form instruction with the RC bit specified. */
#define XRC(op, xop, rc) (X ((op), (xop)) | ((rc) & 1))
/* A Z form instruction with the RC bit specified. */
#define ZRC(op, xop, rc) (Z ((op), (xop)) | ((rc) & 1))
/* The mask for an X form instruction. */
#define X_MASK XRC (0x3f, 0x3ff, 1)
/* The mask for a Z form instruction. */
#define Z_MASK ZRC (0x3f, 0x1ff, 1)
#define Z2_MASK ZRC (0x3f, 0xff, 1)
/* An X_MASK with the RA field fixed. */
#define XRA_MASK (X_MASK | RA_MASK)
/* An XRA_MASK with the W field clear. */
#define XWRA_MASK (XRA_MASK & ~((unsigned long) 1 << 16))
/* An X_MASK with the RB field fixed. */
#define XRB_MASK (X_MASK | RB_MASK)
/* An X_MASK with the RT field fixed. */
#define XRT_MASK (X_MASK | RT_MASK)
/* An XRT_MASK mask with the L bits clear. */
#define XLRT_MASK (XRT_MASK & ~((unsigned long) 0x3 << 21))
/* An X_MASK with the RA and RB fields fixed. */
#define XRARB_MASK (X_MASK | RA_MASK | RB_MASK)
/* An XRARB_MASK, but with the L bit clear. */
#define XRLARB_MASK (XRARB_MASK & ~((unsigned long) 1 << 16))
/* An X_MASK with the RT and RA fields fixed. */
#define XRTRA_MASK (X_MASK | RT_MASK | RA_MASK)
/* An XRTRA_MASK, but with L bit clear. */
#define XRTLRA_MASK (XRTRA_MASK & ~((unsigned long) 1 << 21))
/* An X form instruction with the L bit specified. */
#define XOPL(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 1) << 21))
/* The mask for an X form comparison instruction. */
#define XCMP_MASK (X_MASK | (((unsigned long)1) << 22))
/* The mask for an X form comparison instruction with the L field
fixed. */
#define XCMPL_MASK (XCMP_MASK | (((unsigned long)1) << 21))
/* An X form trap instruction with the TO field specified. */
#define XTO(op, xop, to) (X ((op), (xop)) | ((((unsigned long)(to)) & 0x1f) << 21))
#define XTO_MASK (X_MASK | TO_MASK)
/* An X form tlb instruction with the SH field specified. */
#define XTLB(op, xop, sh) (X ((op), (xop)) | ((((unsigned long)(sh)) & 0x1f) << 11))
#define XTLB_MASK (X_MASK | SH_MASK)
/* An X form sync instruction. */
#define XSYNC(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 3) << 21))
/* An X form sync instruction with everything filled in except the LS field. */
#define XSYNC_MASK (0xff9fffff)
/* An X_MASK, but with the EH bit clear. */
#define XEH_MASK (X_MASK & ~((unsigned long )1))
/* An X form AltiVec dss instruction. */
#define XDSS(op, xop, a) (X ((op), (xop)) | ((((unsigned long)(a)) & 1) << 25))
#define XDSS_MASK XDSS(0x3f, 0x3ff, 1)
/* An XFL form instruction. */
#define XFL(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1) | (((unsigned long)(rc)) & 1))
#define XFL_MASK XFL (0x3f, 0x3ff, 1)
/* An X form isel instruction. */
#define XISEL(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1))
#define XISEL_MASK XISEL(0x3f, 0x1f)
/* An XL form instruction with the LK field set to 0. */
#define XL(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1))
/* An XL form instruction which uses the LK field. */
#define XLLK(op, xop, lk) (XL ((op), (xop)) | ((lk) & 1))
/* The mask for an XL form instruction. */
#define XL_MASK XLLK (0x3f, 0x3ff, 1)
/* An XL form instruction which explicitly sets the BO field. */
#define XLO(op, bo, xop, lk) \
(XLLK ((op), (xop), (lk)) | ((((unsigned long)(bo)) & 0x1f) << 21))
#define XLO_MASK (XL_MASK | BO_MASK)
/* An XL form instruction which explicitly sets the y bit of the BO
field. */
#define XLYLK(op, xop, y, lk) (XLLK ((op), (xop), (lk)) | ((((unsigned long)(y)) & 1) << 21))
#define XLYLK_MASK (XL_MASK | Y_MASK)
/* An XL form instruction which sets the BO field and the condition
bits of the BI field. */
#define XLOCB(op, bo, cb, xop, lk) \
(XLO ((op), (bo), (xop), (lk)) | ((((unsigned long)(cb)) & 3) << 16))
#define XLOCB_MASK XLOCB (0x3f, 0x1f, 0x3, 0x3ff, 1)
/* An XL_MASK or XLYLK_MASK or XLOCB_MASK with the BB field fixed. */
#define XLBB_MASK (XL_MASK | BB_MASK)
#define XLYBB_MASK (XLYLK_MASK | BB_MASK)
#define XLBOCBBB_MASK (XLOCB_MASK | BB_MASK)
/* A mask for branch instructions using the BH field. */
#define XLBH_MASK (XL_MASK | (0x1c << 11))
/* An XL_MASK with the BO and BB fields fixed. */
#define XLBOBB_MASK (XL_MASK | BO_MASK | BB_MASK)
/* An XL_MASK with the BO, BI and BB fields fixed. */
#define XLBOBIBB_MASK (XL_MASK | BO_MASK | BI_MASK | BB_MASK)
/* An XO form instruction. */
#define XO(op, xop, oe, rc) \
(OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 1) | ((((unsigned long)(oe)) & 1) << 10) | (((unsigned long)(rc)) & 1))
#define XO_MASK XO (0x3f, 0x1ff, 1, 1)
/* An XO_MASK with the RB field fixed. */
#define XORB_MASK (XO_MASK | RB_MASK)
/* An XS form instruction. */
#define XS(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 2) | (((unsigned long)(rc)) & 1))
#define XS_MASK XS (0x3f, 0x1ff, 1)
/* A mask for the FXM version of an XFX form instruction. */
#define XFXFXM_MASK (X_MASK | (1 << 11) | (1 << 20))
/* An XFX form instruction with the FXM field filled in. */
#define XFXM(op, xop, fxm, p4) \
(X ((op), (xop)) | ((((unsigned long)(fxm)) & 0xff) << 12) \
| ((unsigned long)(p4) << 20))
/* An XFX form instruction with the SPR field filled in. */
#define XSPR(op, xop, spr) \
(X ((op), (xop)) | ((((unsigned long)(spr)) & 0x1f) << 16) | ((((unsigned long)(spr)) & 0x3e0) << 6))
#define XSPR_MASK (X_MASK | SPR_MASK)
/* An XFX form instruction with the SPR field filled in except for the
SPRBAT field. */
#define XSPRBAT_MASK (XSPR_MASK &~ SPRBAT_MASK)
/* An XFX form instruction with the SPR field filled in except for the
SPRG field. */
#define XSPRG_MASK (XSPR_MASK & ~(0x1f << 16))
/* An X form instruction with everything filled in except the E field. */
#define XE_MASK (0xffff7fff)
/* An X form user context instruction. */
#define XUC(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f))
#define XUC_MASK XUC(0x3f, 0x1f)
/* The BO encodings used in extended conditional branch mnemonics. */
#define BODNZF (0x0)
#define BODNZFP (0x1)
#define BODZF (0x2)
#define BODZFP (0x3)
#define BODNZT (0x8)
#define BODNZTP (0x9)
#define BODZT (0xa)
#define BODZTP (0xb)
#define BOF (0x4)
#define BOFP (0x5)
#define BOFM4 (0x6)
#define BOFP4 (0x7)
#define BOT (0xc)
#define BOTP (0xd)
#define BOTM4 (0xe)
#define BOTP4 (0xf)
#define BODNZ (0x10)
#define BODNZP (0x11)
#define BODZ (0x12)
#define BODZP (0x13)
#define BODNZM4 (0x18)
#define BODNZP4 (0x19)
#define BODZM4 (0x1a)
#define BODZP4 (0x1b)
#define BOU (0x14)
/* The BI condition bit encodings used in extended conditional branch
mnemonics. */
#define CBLT (0)
#define CBGT (1)
#define CBEQ (2)
#define CBSO (3)
/* The TO encodings used in extended trap mnemonics. */
#define TOLGT (0x1)
#define TOLLT (0x2)
#define TOEQ (0x4)
#define TOLGE (0x5)
#define TOLNL (0x5)
#define TOLLE (0x6)
#define TOLNG (0x6)
#define TOGT (0x8)
#define TOGE (0xc)
#define TONL (0xc)
#define TOLT (0x10)
#define TOLE (0x14)
#define TONG (0x14)
#define TONE (0x18)
#define TOU (0x1f)
/* Smaller names for the flags so each entry in the opcodes table will
fit on a single line. */
#undef PPC
#define PPC PPC_OPCODE_PPC
#define PPCCOM PPC_OPCODE_PPC | PPC_OPCODE_COMMON
#define NOPOWER4 PPC_OPCODE_NOPOWER4 | PPCCOM
#define POWER4 PPC_OPCODE_POWER4
#define POWER5 PPC_OPCODE_POWER5
#define POWER6 PPC_OPCODE_POWER6
#define CELL PPC_OPCODE_CELL
#define PPC32 PPC_OPCODE_32 | PPC_OPCODE_PPC
#define PPC64 PPC_OPCODE_64 | PPC_OPCODE_PPC
#define PPC403 PPC_OPCODE_403
#define PPC405 PPC403
#define PPC440 PPC_OPCODE_440
#define PPC750 PPC
#define PPC860 PPC
#define PPCVEC PPC_OPCODE_ALTIVEC
#define POWER PPC_OPCODE_POWER
#define POWER2 PPC_OPCODE_POWER | PPC_OPCODE_POWER2
#define PPCPWR2 PPC_OPCODE_PPC | PPC_OPCODE_POWER | PPC_OPCODE_POWER2
#define POWER32 PPC_OPCODE_POWER | PPC_OPCODE_32
#define COM PPC_OPCODE_POWER | PPC_OPCODE_PPC | PPC_OPCODE_COMMON
#define COM32 PPC_OPCODE_POWER | PPC_OPCODE_PPC | PPC_OPCODE_COMMON | PPC_OPCODE_32
#define M601 PPC_OPCODE_POWER | PPC_OPCODE_601
#define PWRCOM PPC_OPCODE_POWER | PPC_OPCODE_601 | PPC_OPCODE_COMMON
#define MFDEC1 PPC_OPCODE_POWER
#define MFDEC2 PPC_OPCODE_PPC | PPC_OPCODE_601 | PPC_OPCODE_BOOKE
#define BOOKE PPC_OPCODE_BOOKE
#define BOOKE64 PPC_OPCODE_BOOKE64
#define CLASSIC PPC_OPCODE_CLASSIC
#define PPCE300 PPC_OPCODE_E300
#define PPCSPE PPC_OPCODE_SPE
#define PPCISEL PPC_OPCODE_ISEL
#define PPCEFS PPC_OPCODE_EFS
#define PPCBRLK PPC_OPCODE_BRLOCK
#define PPCPMR PPC_OPCODE_PMR
#define PPCCHLK PPC_OPCODE_CACHELCK
#define PPCCHLK64 PPC_OPCODE_CACHELCK | PPC_OPCODE_BOOKE64
#define PPCRFMCI PPC_OPCODE_RFMCI
/* The opcode table.
The format of the opcode table is:
NAME OPCODE MASK FLAGS { OPERANDS }
NAME is the name of the instruction.
OPCODE is the instruction opcode.
MASK is the opcode mask; this is used to tell the disassembler
which bits in the actual opcode must match OPCODE.
FLAGS are flags indicated what processors support the instruction.
OPERANDS is the list of operands.
The disassembler reads the table in order and prints the first
instruction which matches, so this table is sorted to put more
specific instructions before more general instructions. It is also
sorted by major opcode. */
const struct powerpc_opcode powerpc_opcodes[] = {
{ "attn", X(0,256), X_MASK, POWER4, { 0 } },
{ "tdlgti", OPTO(2,TOLGT), OPTO_MASK, PPC64, { RA, SI } },
{ "tdllti", OPTO(2,TOLLT), OPTO_MASK, PPC64, { RA, SI } },
{ "tdeqi", OPTO(2,TOEQ), OPTO_MASK, PPC64, { RA, SI } },
{ "tdlgei", OPTO(2,TOLGE), OPTO_MASK, PPC64, { RA, SI } },
{ "tdlnli", OPTO(2,TOLNL), OPTO_MASK, PPC64, { RA, SI } },
{ "tdllei", OPTO(2,TOLLE), OPTO_MASK, PPC64, { RA, SI } },
{ "tdlngi", OPTO(2,TOLNG), OPTO_MASK, PPC64, { RA, SI } },
{ "tdgti", OPTO(2,TOGT), OPTO_MASK, PPC64, { RA, SI } },
{ "tdgei", OPTO(2,TOGE), OPTO_MASK, PPC64, { RA, SI } },
{ "tdnli", OPTO(2,TONL), OPTO_MASK, PPC64, { RA, SI } },
{ "tdlti", OPTO(2,TOLT), OPTO_MASK, PPC64, { RA, SI } },
{ "tdlei", OPTO(2,TOLE), OPTO_MASK, PPC64, { RA, SI } },
{ "tdngi", OPTO(2,TONG), OPTO_MASK, PPC64, { RA, SI } },
{ "tdnei", OPTO(2,TONE), OPTO_MASK, PPC64, { RA, SI } },
{ "tdi", OP(2), OP_MASK, PPC64, { TO, RA, SI } },
{ "twlgti", OPTO(3,TOLGT), OPTO_MASK, PPCCOM, { RA, SI } },
{ "tlgti", OPTO(3,TOLGT), OPTO_MASK, PWRCOM, { RA, SI } },
{ "twllti", OPTO(3,TOLLT), OPTO_MASK, PPCCOM, { RA, SI } },
{ "tllti", OPTO(3,TOLLT), OPTO_MASK, PWRCOM, { RA, SI } },
{ "tweqi", OPTO(3,TOEQ), OPTO_MASK, PPCCOM, { RA, SI } },
{ "teqi", OPTO(3,TOEQ), OPTO_MASK, PWRCOM, { RA, SI } },
{ "twlgei", OPTO(3,TOLGE), OPTO_MASK, PPCCOM, { RA, SI } },
{ "tlgei", OPTO(3,TOLGE), OPTO_MASK, PWRCOM, { RA, SI } },
{ "twlnli", OPTO(3,TOLNL), OPTO_MASK, PPCCOM, { RA, SI } },
{ "tlnli", OPTO(3,TOLNL), OPTO_MASK, PWRCOM, { RA, SI } },
{ "twllei", OPTO(3,TOLLE), OPTO_MASK, PPCCOM, { RA, SI } },
{ "tllei", OPTO(3,TOLLE), OPTO_MASK, PWRCOM, { RA, SI } },
{ "twlngi", OPTO(3,TOLNG), OPTO_MASK, PPCCOM, { RA, SI } },
{ "tlngi", OPTO(3,TOLNG), OPTO_MASK, PWRCOM, { RA, SI } },
{ "twgti", OPTO(3,TOGT), OPTO_MASK, PPCCOM, { RA, SI } },
{ "tgti", OPTO(3,TOGT), OPTO_MASK, PWRCOM, { RA, SI } },
{ "twgei", OPTO(3,TOGE), OPTO_MASK, PPCCOM, { RA, SI } },
{ "tgei", OPTO(3,TOGE), OPTO_MASK, PWRCOM, { RA, SI } },
{ "twnli", OPTO(3,TONL), OPTO_MASK, PPCCOM, { RA, SI } },
{ "tnli", OPTO(3,TONL), OPTO_MASK, PWRCOM, { RA, SI } },
{ "twlti", OPTO(3,TOLT), OPTO_MASK, PPCCOM, { RA, SI } },
{ "tlti", OPTO(3,TOLT), OPTO_MASK, PWRCOM, { RA, SI } },
{ "twlei", OPTO(3,TOLE), OPTO_MASK, PPCCOM, { RA, SI } },
{ "tlei", OPTO(3,TOLE), OPTO_MASK, PWRCOM, { RA, SI } },
{ "twngi", OPTO(3,TONG), OPTO_MASK, PPCCOM, { RA, SI } },
{ "tngi", OPTO(3,TONG), OPTO_MASK, PWRCOM, { RA, SI } },
{ "twnei", OPTO(3,TONE), OPTO_MASK, PPCCOM, { RA, SI } },
{ "tnei", OPTO(3,TONE), OPTO_MASK, PWRCOM, { RA, SI } },
{ "twi", OP(3), OP_MASK, PPCCOM, { TO, RA, SI } },
{ "ti", OP(3), OP_MASK, PWRCOM, { TO, RA, SI } },
{ "macchw", XO(4,172,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchw.", XO(4,172,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchwo", XO(4,172,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchwo.", XO(4,172,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchws", XO(4,236,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchws.", XO(4,236,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchwso", XO(4,236,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchwso.", XO(4,236,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchwsu", XO(4,204,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchwsu.", XO(4,204,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchwsuo", XO(4,204,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchwsuo.", XO(4,204,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchwu", XO(4,140,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchwu.", XO(4,140,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchwuo", XO(4,140,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "macchwuo.", XO(4,140,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhw", XO(4,44,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhw.", XO(4,44,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhwo", XO(4,44,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhwo.", XO(4,44,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhws", XO(4,108,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhws.", XO(4,108,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhwso", XO(4,108,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhwso.", XO(4,108,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhwsu", XO(4,76,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhwsu.", XO(4,76,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhwsuo", XO(4,76,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhwsuo.", XO(4,76,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhwu", XO(4,12,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhwu.", XO(4,12,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhwuo", XO(4,12,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "machhwuo.", XO(4,12,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhw", XO(4,428,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhw.", XO(4,428,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhwo", XO(4,428,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhwo.", XO(4,428,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhws", XO(4,492,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhws.", XO(4,492,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhwso", XO(4,492,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhwso.", XO(4,492,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhwsu", XO(4,460,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhwsu.", XO(4,460,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhwsuo", XO(4,460,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhwsuo.", XO(4,460,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhwu", XO(4,396,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhwu.", XO(4,396,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhwuo", XO(4,396,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "maclhwuo.", XO(4,396,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "mulchw", XRC(4,168,0), X_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "mulchw.", XRC(4,168,1), X_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "mulchwu", XRC(4,136,0), X_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "mulchwu.", XRC(4,136,1), X_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "mulhhw", XRC(4,40,0), X_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "mulhhw.", XRC(4,40,1), X_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "mulhhwu", XRC(4,8,0), X_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "mulhhwu.", XRC(4,8,1), X_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "mullhw", XRC(4,424,0), X_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "mullhw.", XRC(4,424,1), X_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "mullhwu", XRC(4,392,0), X_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "mullhwu.", XRC(4,392,1), X_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmacchw", XO(4,174,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmacchw.", XO(4,174,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmacchwo", XO(4,174,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmacchwo.", XO(4,174,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmacchws", XO(4,238,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmacchws.", XO(4,238,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmacchwso", XO(4,238,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmacchwso.", XO(4,238,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmachhw", XO(4,46,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmachhw.", XO(4,46,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmachhwo", XO(4,46,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmachhwo.", XO(4,46,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmachhws", XO(4,110,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmachhws.", XO(4,110,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmachhwso", XO(4,110,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmachhwso.", XO(4,110,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmaclhw", XO(4,430,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmaclhw.", XO(4,430,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmaclhwo", XO(4,430,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmaclhwo.", XO(4,430,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmaclhws", XO(4,494,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmaclhws.", XO(4,494,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmaclhwso", XO(4,494,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "nmaclhwso.", XO(4,494,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "mfvscr", VX(4, 1540), VX_MASK, PPCVEC, { VD } },
{ "mtvscr", VX(4, 1604), VX_MASK, PPCVEC, { VB } },
/* Double-precision opcodes. */
/* Some of these conflict with AltiVec, so move them before, since
PPCVEC includes the PPC_OPCODE_PPC set. */
{ "efscfd", VX(4, 719), VX_MASK, PPCEFS, { RS, RB } },
{ "efdabs", VX(4, 740), VX_MASK, PPCEFS, { RS, RA } },
{ "efdnabs", VX(4, 741), VX_MASK, PPCEFS, { RS, RA } },
{ "efdneg", VX(4, 742), VX_MASK, PPCEFS, { RS, RA } },
{ "efdadd", VX(4, 736), VX_MASK, PPCEFS, { RS, RA, RB } },
{ "efdsub", VX(4, 737), VX_MASK, PPCEFS, { RS, RA, RB } },
{ "efdmul", VX(4, 744), VX_MASK, PPCEFS, { RS, RA, RB } },
{ "efddiv", VX(4, 745), VX_MASK, PPCEFS, { RS, RA, RB } },
{ "efdcmpgt", VX(4, 748), VX_MASK, PPCEFS, { CRFD, RA, RB } },
{ "efdcmplt", VX(4, 749), VX_MASK, PPCEFS, { CRFD, RA, RB } },
{ "efdcmpeq", VX(4, 750), VX_MASK, PPCEFS, { CRFD, RA, RB } },
{ "efdtstgt", VX(4, 764), VX_MASK, PPCEFS, { CRFD, RA, RB } },
{ "efdtstlt", VX(4, 765), VX_MASK, PPCEFS, { CRFD, RA, RB } },
{ "efdtsteq", VX(4, 766), VX_MASK, PPCEFS, { CRFD, RA, RB } },
{ "efdcfsi", VX(4, 753), VX_MASK, PPCEFS, { RS, RB } },
{ "efdcfsid", VX(4, 739), VX_MASK, PPCEFS, { RS, RB } },
{ "efdcfui", VX(4, 752), VX_MASK, PPCEFS, { RS, RB } },
{ "efdcfuid", VX(4, 738), VX_MASK, PPCEFS, { RS, RB } },
{ "efdcfsf", VX(4, 755), VX_MASK, PPCEFS, { RS, RB } },
{ "efdcfuf", VX(4, 754), VX_MASK, PPCEFS, { RS, RB } },
{ "efdctsi", VX(4, 757), VX_MASK, PPCEFS, { RS, RB } },
{ "efdctsidz",VX(4, 747), VX_MASK, PPCEFS, { RS, RB } },
{ "efdctsiz", VX(4, 762), VX_MASK, PPCEFS, { RS, RB } },
{ "efdctui", VX(4, 756), VX_MASK, PPCEFS, { RS, RB } },
{ "efdctuidz",VX(4, 746), VX_MASK, PPCEFS, { RS, RB } },
{ "efdctuiz", VX(4, 760), VX_MASK, PPCEFS, { RS, RB } },
{ "efdctsf", VX(4, 759), VX_MASK, PPCEFS, { RS, RB } },
{ "efdctuf", VX(4, 758), VX_MASK, PPCEFS, { RS, RB } },
{ "efdcfs", VX(4, 751), VX_MASK, PPCEFS, { RS, RB } },
/* End of double-precision opcodes. */
{ "vaddcuw", VX(4, 384), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vaddfp", VX(4, 10), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vaddsbs", VX(4, 768), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vaddshs", VX(4, 832), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vaddsws", VX(4, 896), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vaddubm", VX(4, 0), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vaddubs", VX(4, 512), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vadduhm", VX(4, 64), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vadduhs", VX(4, 576), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vadduwm", VX(4, 128), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vadduws", VX(4, 640), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vand", VX(4, 1028), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vandc", VX(4, 1092), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vavgsb", VX(4, 1282), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vavgsh", VX(4, 1346), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vavgsw", VX(4, 1410), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vavgub", VX(4, 1026), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vavguh", VX(4, 1090), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vavguw", VX(4, 1154), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vcfsx", VX(4, 842), VX_MASK, PPCVEC, { VD, VB, UIMM } },
{ "vcfux", VX(4, 778), VX_MASK, PPCVEC, { VD, VB, UIMM } },
{ "vcmpbfp", VXR(4, 966, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpbfp.", VXR(4, 966, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpeqfp", VXR(4, 198, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpeqfp.", VXR(4, 198, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpequb", VXR(4, 6, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpequb.", VXR(4, 6, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpequh", VXR(4, 70, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpequh.", VXR(4, 70, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpequw", VXR(4, 134, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpequw.", VXR(4, 134, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgefp", VXR(4, 454, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgefp.", VXR(4, 454, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgtfp", VXR(4, 710, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgtfp.", VXR(4, 710, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgtsb", VXR(4, 774, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgtsb.", VXR(4, 774, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgtsh", VXR(4, 838, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgtsh.", VXR(4, 838, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgtsw", VXR(4, 902, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgtsw.", VXR(4, 902, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgtub", VXR(4, 518, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgtub.", VXR(4, 518, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgtuh", VXR(4, 582, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgtuh.", VXR(4, 582, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgtuw", VXR(4, 646, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vcmpgtuw.", VXR(4, 646, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
{ "vctsxs", VX(4, 970), VX_MASK, PPCVEC, { VD, VB, UIMM } },
{ "vctuxs", VX(4, 906), VX_MASK, PPCVEC, { VD, VB, UIMM } },
{ "vexptefp", VX(4, 394), VX_MASK, PPCVEC, { VD, VB } },
{ "vlogefp", VX(4, 458), VX_MASK, PPCVEC, { VD, VB } },
{ "vmaddfp", VXA(4, 46), VXA_MASK, PPCVEC, { VD, VA, VC, VB } },
{ "vmaxfp", VX(4, 1034), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmaxsb", VX(4, 258), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmaxsh", VX(4, 322), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmaxsw", VX(4, 386), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmaxub", VX(4, 2), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmaxuh", VX(4, 66), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmaxuw", VX(4, 130), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmhaddshs", VXA(4, 32), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
{ "vmhraddshs", VXA(4, 33), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
{ "vminfp", VX(4, 1098), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vminsb", VX(4, 770), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vminsh", VX(4, 834), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vminsw", VX(4, 898), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vminub", VX(4, 514), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vminuh", VX(4, 578), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vminuw", VX(4, 642), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmladduhm", VXA(4, 34), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
{ "vmrghb", VX(4, 12), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmrghh", VX(4, 76), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmrghw", VX(4, 140), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmrglb", VX(4, 268), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmrglh", VX(4, 332), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmrglw", VX(4, 396), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmsummbm", VXA(4, 37), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
{ "vmsumshm", VXA(4, 40), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
{ "vmsumshs", VXA(4, 41), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
{ "vmsumubm", VXA(4, 36), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
{ "vmsumuhm", VXA(4, 38), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
{ "vmsumuhs", VXA(4, 39), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
{ "vmulesb", VX(4, 776), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmulesh", VX(4, 840), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmuleub", VX(4, 520), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmuleuh", VX(4, 584), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmulosb", VX(4, 264), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmulosh", VX(4, 328), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmuloub", VX(4, 8), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vmulouh", VX(4, 72), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vnmsubfp", VXA(4, 47), VXA_MASK, PPCVEC, { VD, VA, VC, VB } },
{ "vnor", VX(4, 1284), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vor", VX(4, 1156), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vperm", VXA(4, 43), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
{ "vpkpx", VX(4, 782), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vpkshss", VX(4, 398), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vpkshus", VX(4, 270), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vpkswss", VX(4, 462), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vpkswus", VX(4, 334), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vpkuhum", VX(4, 14), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vpkuhus", VX(4, 142), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vpkuwum", VX(4, 78), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vpkuwus", VX(4, 206), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vrefp", VX(4, 266), VX_MASK, PPCVEC, { VD, VB } },
{ "vrfim", VX(4, 714), VX_MASK, PPCVEC, { VD, VB } },
{ "vrfin", VX(4, 522), VX_MASK, PPCVEC, { VD, VB } },
{ "vrfip", VX(4, 650), VX_MASK, PPCVEC, { VD, VB } },
{ "vrfiz", VX(4, 586), VX_MASK, PPCVEC, { VD, VB } },
{ "vrlb", VX(4, 4), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vrlh", VX(4, 68), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vrlw", VX(4, 132), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vrsqrtefp", VX(4, 330), VX_MASK, PPCVEC, { VD, VB } },
{ "vsel", VXA(4, 42), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
{ "vsl", VX(4, 452), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vslb", VX(4, 260), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsldoi", VXA(4, 44), VXA_MASK, PPCVEC, { VD, VA, VB, SHB } },
{ "vslh", VX(4, 324), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vslo", VX(4, 1036), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vslw", VX(4, 388), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vspltb", VX(4, 524), VX_MASK, PPCVEC, { VD, VB, UIMM } },
{ "vsplth", VX(4, 588), VX_MASK, PPCVEC, { VD, VB, UIMM } },
{ "vspltisb", VX(4, 780), VX_MASK, PPCVEC, { VD, SIMM } },
{ "vspltish", VX(4, 844), VX_MASK, PPCVEC, { VD, SIMM } },
{ "vspltisw", VX(4, 908), VX_MASK, PPCVEC, { VD, SIMM } },
{ "vspltw", VX(4, 652), VX_MASK, PPCVEC, { VD, VB, UIMM } },
{ "vsr", VX(4, 708), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsrab", VX(4, 772), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsrah", VX(4, 836), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsraw", VX(4, 900), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsrb", VX(4, 516), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsrh", VX(4, 580), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsro", VX(4, 1100), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsrw", VX(4, 644), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsubcuw", VX(4, 1408), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsubfp", VX(4, 74), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsubsbs", VX(4, 1792), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsubshs", VX(4, 1856), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsubsws", VX(4, 1920), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsububm", VX(4, 1024), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsububs", VX(4, 1536), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsubuhm", VX(4, 1088), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsubuhs", VX(4, 1600), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsubuwm", VX(4, 1152), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsubuws", VX(4, 1664), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsumsws", VX(4, 1928), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsum2sws", VX(4, 1672), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsum4sbs", VX(4, 1800), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsum4shs", VX(4, 1608), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vsum4ubs", VX(4, 1544), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vupkhpx", VX(4, 846), VX_MASK, PPCVEC, { VD, VB } },
{ "vupkhsb", VX(4, 526), VX_MASK, PPCVEC, { VD, VB } },
{ "vupkhsh", VX(4, 590), VX_MASK, PPCVEC, { VD, VB } },
{ "vupklpx", VX(4, 974), VX_MASK, PPCVEC, { VD, VB } },
{ "vupklsb", VX(4, 654), VX_MASK, PPCVEC, { VD, VB } },
{ "vupklsh", VX(4, 718), VX_MASK, PPCVEC, { VD, VB } },
{ "vxor", VX(4, 1220), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "evaddw", VX(4, 512), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evaddiw", VX(4, 514), VX_MASK, PPCSPE, { RS, RB, UIMM } },
{ "evsubfw", VX(4, 516), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evsubw", VX(4, 516), VX_MASK, PPCSPE, { RS, RB, RA } },
{ "evsubifw", VX(4, 518), VX_MASK, PPCSPE, { RS, UIMM, RB } },
{ "evsubiw", VX(4, 518), VX_MASK, PPCSPE, { RS, RB, UIMM } },
{ "evabs", VX(4, 520), VX_MASK, PPCSPE, { RS, RA } },
{ "evneg", VX(4, 521), VX_MASK, PPCSPE, { RS, RA } },
{ "evextsb", VX(4, 522), VX_MASK, PPCSPE, { RS, RA } },
{ "evextsh", VX(4, 523), VX_MASK, PPCSPE, { RS, RA } },
{ "evrndw", VX(4, 524), VX_MASK, PPCSPE, { RS, RA } },
{ "evcntlzw", VX(4, 525), VX_MASK, PPCSPE, { RS, RA } },
{ "evcntlsw", VX(4, 526), VX_MASK, PPCSPE, { RS, RA } },
{ "brinc", VX(4, 527), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evand", VX(4, 529), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evandc", VX(4, 530), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmr", VX(4, 535), VX_MASK, PPCSPE, { RS, RA, BBA } },
{ "evor", VX(4, 535), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evorc", VX(4, 539), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evxor", VX(4, 534), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "eveqv", VX(4, 537), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evnand", VX(4, 542), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evnot", VX(4, 536), VX_MASK, PPCSPE, { RS, RA, BBA } },
{ "evnor", VX(4, 536), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evrlw", VX(4, 552), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evrlwi", VX(4, 554), VX_MASK, PPCSPE, { RS, RA, EVUIMM } },
{ "evslw", VX(4, 548), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evslwi", VX(4, 550), VX_MASK, PPCSPE, { RS, RA, EVUIMM } },
{ "evsrws", VX(4, 545), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evsrwu", VX(4, 544), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evsrwis", VX(4, 547), VX_MASK, PPCSPE, { RS, RA, EVUIMM } },
{ "evsrwiu", VX(4, 546), VX_MASK, PPCSPE, { RS, RA, EVUIMM } },
{ "evsplati", VX(4, 553), VX_MASK, PPCSPE, { RS, SIMM } },
{ "evsplatfi", VX(4, 555), VX_MASK, PPCSPE, { RS, SIMM } },
{ "evmergehi", VX(4, 556), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmergelo", VX(4, 557), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmergehilo",VX(4,558), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmergelohi",VX(4,559), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evcmpgts", VX(4, 561), VX_MASK, PPCSPE, { CRFD, RA, RB } },
{ "evcmpgtu", VX(4, 560), VX_MASK, PPCSPE, { CRFD, RA, RB } },
{ "evcmplts", VX(4, 563), VX_MASK, PPCSPE, { CRFD, RA, RB } },
{ "evcmpltu", VX(4, 562), VX_MASK, PPCSPE, { CRFD, RA, RB } },
{ "evcmpeq", VX(4, 564), VX_MASK, PPCSPE, { CRFD, RA, RB } },
{ "evsel", EVSEL(4,79),EVSEL_MASK, PPCSPE, { RS, RA, RB, CRFS } },
{ "evldd", VX(4, 769), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } },
{ "evlddx", VX(4, 768), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evldw", VX(4, 771), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } },
{ "evldwx", VX(4, 770), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evldh", VX(4, 773), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } },
{ "evldhx", VX(4, 772), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evlwhe", VX(4, 785), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
{ "evlwhex", VX(4, 784), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evlwhou", VX(4, 789), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
{ "evlwhoux", VX(4, 788), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evlwhos", VX(4, 791), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
{ "evlwhosx", VX(4, 790), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evlwwsplat",VX(4, 793), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
{ "evlwwsplatx",VX(4, 792), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evlwhsplat",VX(4, 797), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
{ "evlwhsplatx",VX(4, 796), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evlhhesplat",VX(4, 777), VX_MASK, PPCSPE, { RS, EVUIMM_2, RA } },
{ "evlhhesplatx",VX(4, 776), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evlhhousplat",VX(4, 781), VX_MASK, PPCSPE, { RS, EVUIMM_2, RA } },
{ "evlhhousplatx",VX(4, 780), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evlhhossplat",VX(4, 783), VX_MASK, PPCSPE, { RS, EVUIMM_2, RA } },
{ "evlhhossplatx",VX(4, 782), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evstdd", VX(4, 801), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } },
{ "evstddx", VX(4, 800), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evstdw", VX(4, 803), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } },
{ "evstdwx", VX(4, 802), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evstdh", VX(4, 805), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } },
{ "evstdhx", VX(4, 804), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evstwwe", VX(4, 825), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
{ "evstwwex", VX(4, 824), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evstwwo", VX(4, 829), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
{ "evstwwox", VX(4, 828), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evstwhe", VX(4, 817), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
{ "evstwhex", VX(4, 816), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evstwho", VX(4, 821), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
{ "evstwhox", VX(4, 820), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evfsabs", VX(4, 644), VX_MASK, PPCSPE, { RS, RA } },
{ "evfsnabs", VX(4, 645), VX_MASK, PPCSPE, { RS, RA } },
{ "evfsneg", VX(4, 646), VX_MASK, PPCSPE, { RS, RA } },
{ "evfsadd", VX(4, 640), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evfssub", VX(4, 641), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evfsmul", VX(4, 648), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evfsdiv", VX(4, 649), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evfscmpgt", VX(4, 652), VX_MASK, PPCSPE, { CRFD, RA, RB } },
{ "evfscmplt", VX(4, 653), VX_MASK, PPCSPE, { CRFD, RA, RB } },
{ "evfscmpeq", VX(4, 654), VX_MASK, PPCSPE, { CRFD, RA, RB } },
{ "evfststgt", VX(4, 668), VX_MASK, PPCSPE, { CRFD, RA, RB } },
{ "evfststlt", VX(4, 669), VX_MASK, PPCSPE, { CRFD, RA, RB } },
{ "evfststeq", VX(4, 670), VX_MASK, PPCSPE, { CRFD, RA, RB } },
{ "evfscfui", VX(4, 656), VX_MASK, PPCSPE, { RS, RB } },
{ "evfsctuiz", VX(4, 664), VX_MASK, PPCSPE, { RS, RB } },
{ "evfscfsi", VX(4, 657), VX_MASK, PPCSPE, { RS, RB } },
{ "evfscfuf", VX(4, 658), VX_MASK, PPCSPE, { RS, RB } },
{ "evfscfsf", VX(4, 659), VX_MASK, PPCSPE, { RS, RB } },
{ "evfsctui", VX(4, 660), VX_MASK, PPCSPE, { RS, RB } },
{ "evfsctsi", VX(4, 661), VX_MASK, PPCSPE, { RS, RB } },
{ "evfsctsiz", VX(4, 666), VX_MASK, PPCSPE, { RS, RB } },
{ "evfsctuf", VX(4, 662), VX_MASK, PPCSPE, { RS, RB } },
{ "evfsctsf", VX(4, 663), VX_MASK, PPCSPE, { RS, RB } },
{ "efsabs", VX(4, 708), VX_MASK, PPCEFS, { RS, RA } },
{ "efsnabs", VX(4, 709), VX_MASK, PPCEFS, { RS, RA } },
{ "efsneg", VX(4, 710), VX_MASK, PPCEFS, { RS, RA } },
{ "efsadd", VX(4, 704), VX_MASK, PPCEFS, { RS, RA, RB } },
{ "efssub", VX(4, 705), VX_MASK, PPCEFS, { RS, RA, RB } },
{ "efsmul", VX(4, 712), VX_MASK, PPCEFS, { RS, RA, RB } },
{ "efsdiv", VX(4, 713), VX_MASK, PPCEFS, { RS, RA, RB } },
{ "efscmpgt", VX(4, 716), VX_MASK, PPCEFS, { CRFD, RA, RB } },
{ "efscmplt", VX(4, 717), VX_MASK, PPCEFS, { CRFD, RA, RB } },
{ "efscmpeq", VX(4, 718), VX_MASK, PPCEFS, { CRFD, RA, RB } },
{ "efststgt", VX(4, 732), VX_MASK, PPCEFS, { CRFD, RA, RB } },
{ "efststlt", VX(4, 733), VX_MASK, PPCEFS, { CRFD, RA, RB } },
{ "efststeq", VX(4, 734), VX_MASK, PPCEFS, { CRFD, RA, RB } },
{ "efscfui", VX(4, 720), VX_MASK, PPCEFS, { RS, RB } },
{ "efsctuiz", VX(4, 728), VX_MASK, PPCEFS, { RS, RB } },
{ "efscfsi", VX(4, 721), VX_MASK, PPCEFS, { RS, RB } },
{ "efscfuf", VX(4, 722), VX_MASK, PPCEFS, { RS, RB } },
{ "efscfsf", VX(4, 723), VX_MASK, PPCEFS, { RS, RB } },
{ "efsctui", VX(4, 724), VX_MASK, PPCEFS, { RS, RB } },
{ "efsctsi", VX(4, 725), VX_MASK, PPCEFS, { RS, RB } },
{ "efsctsiz", VX(4, 730), VX_MASK, PPCEFS, { RS, RB } },
{ "efsctuf", VX(4, 726), VX_MASK, PPCEFS, { RS, RB } },
{ "efsctsf", VX(4, 727), VX_MASK, PPCEFS, { RS, RB } },
{ "evmhossf", VX(4, 1031), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhossfa", VX(4, 1063), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhosmf", VX(4, 1039), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhosmfa", VX(4, 1071), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhosmi", VX(4, 1037), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhosmia", VX(4, 1069), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhoumi", VX(4, 1036), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhoumia", VX(4, 1068), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhessf", VX(4, 1027), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhessfa", VX(4, 1059), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhesmf", VX(4, 1035), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhesmfa", VX(4, 1067), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhesmi", VX(4, 1033), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhesmia", VX(4, 1065), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmheumi", VX(4, 1032), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmheumia", VX(4, 1064), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhossfaaw",VX(4, 1287), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhossiaaw",VX(4, 1285), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhosmfaaw",VX(4, 1295), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhosmiaaw",VX(4, 1293), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhousiaaw",VX(4, 1284), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhoumiaaw",VX(4, 1292), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhessfaaw",VX(4, 1283), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhessiaaw",VX(4, 1281), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhesmfaaw",VX(4, 1291), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhesmiaaw",VX(4, 1289), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmheusiaaw",VX(4, 1280), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmheumiaaw",VX(4, 1288), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhossfanw",VX(4, 1415), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhossianw",VX(4, 1413), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhosmfanw",VX(4, 1423), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhosmianw",VX(4, 1421), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhousianw",VX(4, 1412), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhoumianw",VX(4, 1420), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhessfanw",VX(4, 1411), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhessianw",VX(4, 1409), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhesmfanw",VX(4, 1419), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhesmianw",VX(4, 1417), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmheusianw",VX(4, 1408), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmheumianw",VX(4, 1416), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhogsmfaa",VX(4, 1327), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhogsmiaa",VX(4, 1325), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhogumiaa",VX(4, 1324), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhegsmfaa",VX(4, 1323), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhegsmiaa",VX(4, 1321), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhegumiaa",VX(4, 1320), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhogsmfan",VX(4, 1455), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhogsmian",VX(4, 1453), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhogumian",VX(4, 1452), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhegsmfan",VX(4, 1451), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhegsmian",VX(4, 1449), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmhegumian",VX(4, 1448), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwhssf", VX(4, 1095), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwhssfa", VX(4, 1127), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwhsmf", VX(4, 1103), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwhsmfa", VX(4, 1135), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwhsmi", VX(4, 1101), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwhsmia", VX(4, 1133), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwhumi", VX(4, 1100), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwhumia", VX(4, 1132), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwlumi", VX(4, 1096), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwlumia", VX(4, 1128), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwlssiaaw",VX(4, 1345), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwlsmiaaw",VX(4, 1353), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwlusiaaw",VX(4, 1344), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwlumiaaw",VX(4, 1352), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwlssianw",VX(4, 1473), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwlsmianw",VX(4, 1481), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwlusianw",VX(4, 1472), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwlumianw",VX(4, 1480), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwssf", VX(4, 1107), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwssfa", VX(4, 1139), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwsmf", VX(4, 1115), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwsmfa", VX(4, 1147), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwsmi", VX(4, 1113), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwsmia", VX(4, 1145), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwumi", VX(4, 1112), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwumia", VX(4, 1144), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwssfaa", VX(4, 1363), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwsmfaa", VX(4, 1371), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwsmiaa", VX(4, 1369), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwumiaa", VX(4, 1368), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwssfan", VX(4, 1491), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwsmfan", VX(4, 1499), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwsmian", VX(4, 1497), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evmwumian", VX(4, 1496), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evaddssiaaw",VX(4, 1217), VX_MASK, PPCSPE, { RS, RA } },
{ "evaddsmiaaw",VX(4, 1225), VX_MASK, PPCSPE, { RS, RA } },
{ "evaddusiaaw",VX(4, 1216), VX_MASK, PPCSPE, { RS, RA } },
{ "evaddumiaaw",VX(4, 1224), VX_MASK, PPCSPE, { RS, RA } },
{ "evsubfssiaaw",VX(4, 1219), VX_MASK, PPCSPE, { RS, RA } },
{ "evsubfsmiaaw",VX(4, 1227), VX_MASK, PPCSPE, { RS, RA } },
{ "evsubfusiaaw",VX(4, 1218), VX_MASK, PPCSPE, { RS, RA } },
{ "evsubfumiaaw",VX(4, 1226), VX_MASK, PPCSPE, { RS, RA } },
{ "evmra", VX(4, 1220), VX_MASK, PPCSPE, { RS, RA } },
{ "evdivws", VX(4, 1222), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "evdivwu", VX(4, 1223), VX_MASK, PPCSPE, { RS, RA, RB } },
{ "mulli", OP(7), OP_MASK, PPCCOM, { RT, RA, SI } },
{ "muli", OP(7), OP_MASK, PWRCOM, { RT, RA, SI } },
{ "subfic", OP(8), OP_MASK, PPCCOM, { RT, RA, SI } },
{ "sfi", OP(8), OP_MASK, PWRCOM, { RT, RA, SI } },
{ "dozi", OP(9), OP_MASK, M601, { RT, RA, SI } },
{ "bce", B(9,0,0), B_MASK, BOOKE64, { BO, BI, BD } },
{ "bcel", B(9,0,1), B_MASK, BOOKE64, { BO, BI, BD } },
{ "bcea", B(9,1,0), B_MASK, BOOKE64, { BO, BI, BDA } },
{ "bcela", B(9,1,1), B_MASK, BOOKE64, { BO, BI, BDA } },
{ "cmplwi", OPL(10,0), OPL_MASK, PPCCOM, { OBF, RA, UI } },
{ "cmpldi", OPL(10,1), OPL_MASK, PPC64, { OBF, RA, UI } },
{ "cmpli", OP(10), OP_MASK, PPC, { BF, L, RA, UI } },
{ "cmpli", OP(10), OP_MASK, PWRCOM, { BF, RA, UI } },
{ "cmpwi", OPL(11,0), OPL_MASK, PPCCOM, { OBF, RA, SI } },
{ "cmpdi", OPL(11,1), OPL_MASK, PPC64, { OBF, RA, SI } },
{ "cmpi", OP(11), OP_MASK, PPC, { BF, L, RA, SI } },
{ "cmpi", OP(11), OP_MASK, PWRCOM, { BF, RA, SI } },
{ "addic", OP(12), OP_MASK, PPCCOM, { RT, RA, SI } },
{ "ai", OP(12), OP_MASK, PWRCOM, { RT, RA, SI } },
{ "subic", OP(12), OP_MASK, PPCCOM, { RT, RA, NSI } },
{ "addic.", OP(13), OP_MASK, PPCCOM, { RT, RA, SI } },
{ "ai.", OP(13), OP_MASK, PWRCOM, { RT, RA, SI } },
{ "subic.", OP(13), OP_MASK, PPCCOM, { RT, RA, NSI } },
{ "li", OP(14), DRA_MASK, PPCCOM, { RT, SI } },
{ "lil", OP(14), DRA_MASK, PWRCOM, { RT, SI } },
{ "addi", OP(14), OP_MASK, PPCCOM, { RT, RA0, SI } },
{ "cal", OP(14), OP_MASK, PWRCOM, { RT, D, RA0 } },
{ "subi", OP(14), OP_MASK, PPCCOM, { RT, RA0, NSI } },
{ "la", OP(14), OP_MASK, PPCCOM, { RT, D, RA0 } },
{ "lis", OP(15), DRA_MASK, PPCCOM, { RT, SISIGNOPT } },
{ "liu", OP(15), DRA_MASK, PWRCOM, { RT, SISIGNOPT } },
{ "addis", OP(15), OP_MASK, PPCCOM, { RT,RA0,SISIGNOPT } },
{ "cau", OP(15), OP_MASK, PWRCOM, { RT,RA0,SISIGNOPT } },
{ "subis", OP(15), OP_MASK, PPCCOM, { RT, RA0, NSI } },
{ "bdnz-", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, { BDM } },
{ "bdnz+", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, { BDP } },
{ "bdnz", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, { BD } },
{ "bdn", BBO(16,BODNZ,0,0), BBOATBI_MASK, PWRCOM, { BD } },
{ "bdnzl-", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, { BDM } },
{ "bdnzl+", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, { BDP } },
{ "bdnzl", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, { BD } },
{ "bdnl", BBO(16,BODNZ,0,1), BBOATBI_MASK, PWRCOM, { BD } },
{ "bdnza-", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, { BDMA } },
{ "bdnza+", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, { BDPA } },
{ "bdnza", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, { BDA } },
{ "bdna", BBO(16,BODNZ,1,0), BBOATBI_MASK, PWRCOM, { BDA } },
{ "bdnzla-", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, { BDMA } },
{ "bdnzla+", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, { BDPA } },
{ "bdnzla", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, { BDA } },
{ "bdnla", BBO(16,BODNZ,1,1), BBOATBI_MASK, PWRCOM, { BDA } },
{ "bdz-", BBO(16,BODZ,0,0), BBOATBI_MASK, PPCCOM, { BDM } },
{ "bdz+", BBO(16,BODZ,0,0), BBOATBI_MASK, PPCCOM, { BDP } },
{ "bdz", BBO(16,BODZ,0,0), BBOATBI_MASK, COM, { BD } },
{ "bdzl-", BBO(16,BODZ,0,1), BBOATBI_MASK, PPCCOM, { BDM } },
{ "bdzl+", BBO(16,BODZ,0,1), BBOATBI_MASK, PPCCOM, { BDP } },
{ "bdzl", BBO(16,BODZ,0,1), BBOATBI_MASK, COM, { BD } },
{ "bdza-", BBO(16,BODZ,1,0), BBOATBI_MASK, PPCCOM, { BDMA } },
{ "bdza+", BBO(16,BODZ,1,0), BBOATBI_MASK, PPCCOM, { BDPA } },
{ "bdza", BBO(16,BODZ,1,0), BBOATBI_MASK, COM, { BDA } },
{ "bdzla-", BBO(16,BODZ,1,1), BBOATBI_MASK, PPCCOM, { BDMA } },
{ "bdzla+", BBO(16,BODZ,1,1), BBOATBI_MASK, PPCCOM, { BDPA } },
{ "bdzla", BBO(16,BODZ,1,1), BBOATBI_MASK, COM, { BDA } },
{ "blt-", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "blt+", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "blt", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, COM, { CR, BD } },
{ "bltl-", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bltl+", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bltl", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, COM, { CR, BD } },
{ "blta-", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "blta+", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "blta", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, COM, { CR, BDA } },
{ "bltla-", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bltla+", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bltla", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, COM, { CR, BDA } },
{ "bgt-", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bgt+", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bgt", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, COM, { CR, BD } },
{ "bgtl-", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bgtl+", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bgtl", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, COM, { CR, BD } },
{ "bgta-", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bgta+", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bgta", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, COM, { CR, BDA } },
{ "bgtla-", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bgtla+", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bgtla", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, COM, { CR, BDA } },
{ "beq-", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "beq+", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "beq", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, COM, { CR, BD } },
{ "beql-", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "beql+", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "beql", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, COM, { CR, BD } },
{ "beqa-", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "beqa+", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "beqa", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, COM, { CR, BDA } },
{ "beqla-", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "beqla+", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "beqla", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, COM, { CR, BDA } },
{ "bso-", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bso+", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bso", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, COM, { CR, BD } },
{ "bsol-", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bsol+", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bsol", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, COM, { CR, BD } },
{ "bsoa-", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bsoa+", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bsoa", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, COM, { CR, BDA } },
{ "bsola-", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bsola+", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bsola", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, COM, { CR, BDA } },
{ "bun-", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bun+", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bun", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BD } },
{ "bunl-", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bunl+", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bunl", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BD } },
{ "buna-", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "buna+", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "buna", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDA } },
{ "bunla-", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bunla+", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bunla", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDA } },
{ "bge-", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bge+", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bge", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, COM, { CR, BD } },
{ "bgel-", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bgel+", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bgel", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, COM, { CR, BD } },
{ "bgea-", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bgea+", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bgea", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, COM, { CR, BDA } },
{ "bgela-", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bgela+", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bgela", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, COM, { CR, BDA } },
{ "bnl-", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bnl+", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bnl", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, COM, { CR, BD } },
{ "bnll-", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bnll+", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bnll", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, COM, { CR, BD } },
{ "bnla-", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bnla+", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bnla", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, COM, { CR, BDA } },
{ "bnlla-", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bnlla+", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bnlla", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, COM, { CR, BDA } },
{ "ble-", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "ble+", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "ble", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, COM, { CR, BD } },
{ "blel-", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "blel+", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "blel", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, COM, { CR, BD } },
{ "blea-", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "blea+", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "blea", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, COM, { CR, BDA } },
{ "blela-", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "blela+", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "blela", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, COM, { CR, BDA } },
{ "bng-", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bng+", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bng", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, COM, { CR, BD } },
{ "bngl-", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bngl+", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bngl", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, COM, { CR, BD } },
{ "bnga-", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bnga+", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bnga", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, COM, { CR, BDA } },
{ "bngla-", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bngla+", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bngla", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, COM, { CR, BDA } },
{ "bne-", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bne+", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bne", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, COM, { CR, BD } },
{ "bnel-", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bnel+", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bnel", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, COM, { CR, BD } },
{ "bnea-", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bnea+", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bnea", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, COM, { CR, BDA } },
{ "bnela-", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bnela+", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bnela", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, COM, { CR, BDA } },
{ "bns-", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bns+", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bns", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, COM, { CR, BD } },
{ "bnsl-", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bnsl+", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bnsl", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, COM, { CR, BD } },
{ "bnsa-", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bnsa+", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bnsa", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, COM, { CR, BDA } },
{ "bnsla-", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bnsla+", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bnsla", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, COM, { CR, BDA } },
{ "bnu-", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bnu+", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bnu", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BD } },
{ "bnul-", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
{ "bnul+", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
{ "bnul", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BD } },
{ "bnua-", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bnua+", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bnua", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDA } },
{ "bnula-", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
{ "bnula+", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
{ "bnula", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDA } },
{ "bdnzt-", BBO(16,BODNZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } },
{ "bdnzt+", BBO(16,BODNZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } },
{ "bdnzt", BBO(16,BODNZT,0,0), BBOY_MASK, PPCCOM, { BI, BD } },
{ "bdnztl-", BBO(16,BODNZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } },
{ "bdnztl+", BBO(16,BODNZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } },
{ "bdnztl", BBO(16,BODNZT,0,1), BBOY_MASK, PPCCOM, { BI, BD } },
{ "bdnzta-", BBO(16,BODNZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } },
{ "bdnzta+", BBO(16,BODNZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } },
{ "bdnzta", BBO(16,BODNZT,1,0), BBOY_MASK, PPCCOM, { BI, BDA } },
{ "bdnztla-",BBO(16,BODNZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } },
{ "bdnztla+",BBO(16,BODNZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } },
{ "bdnztla", BBO(16,BODNZT,1,1), BBOY_MASK, PPCCOM, { BI, BDA } },
{ "bdnzf-", BBO(16,BODNZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } },
{ "bdnzf+", BBO(16,BODNZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } },
{ "bdnzf", BBO(16,BODNZF,0,0), BBOY_MASK, PPCCOM, { BI, BD } },
{ "bdnzfl-", BBO(16,BODNZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } },
{ "bdnzfl+", BBO(16,BODNZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } },
{ "bdnzfl", BBO(16,BODNZF,0,1), BBOY_MASK, PPCCOM, { BI, BD } },
{ "bdnzfa-", BBO(16,BODNZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } },
{ "bdnzfa+", BBO(16,BODNZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } },
{ "bdnzfa", BBO(16,BODNZF,1,0), BBOY_MASK, PPCCOM, { BI, BDA } },
{ "bdnzfla-",BBO(16,BODNZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } },
{ "bdnzfla+",BBO(16,BODNZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } },
{ "bdnzfla", BBO(16,BODNZF,1,1), BBOY_MASK, PPCCOM, { BI, BDA } },
{ "bt-", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, { BI, BDM } },
{ "bt+", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, { BI, BDP } },
{ "bt", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, { BI, BD } },
{ "bbt", BBO(16,BOT,0,0), BBOAT_MASK, PWRCOM, { BI, BD } },
{ "btl-", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, { BI, BDM } },
{ "btl+", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, { BI, BDP } },
{ "btl", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, { BI, BD } },
{ "bbtl", BBO(16,BOT,0,1), BBOAT_MASK, PWRCOM, { BI, BD } },
{ "bta-", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, { BI, BDMA } },
{ "bta+", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, { BI, BDPA } },
{ "bta", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, { BI, BDA } },
{ "bbta", BBO(16,BOT,1,0), BBOAT_MASK, PWRCOM, { BI, BDA } },
{ "btla-", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, { BI, BDMA } },
{ "btla+", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, { BI, BDPA } },
{ "btla", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, { BI, BDA } },
{ "bbtla", BBO(16,BOT,1,1), BBOAT_MASK, PWRCOM, { BI, BDA } },
{ "bf-", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, { BI, BDM } },
{ "bf+", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, { BI, BDP } },
{ "bf", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, { BI, BD } },
{ "bbf", BBO(16,BOF,0,0), BBOAT_MASK, PWRCOM, { BI, BD } },
{ "bfl-", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, { BI, BDM } },
{ "bfl+", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, { BI, BDP } },
{ "bfl", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, { BI, BD } },
{ "bbfl", BBO(16,BOF,0,1), BBOAT_MASK, PWRCOM, { BI, BD } },
{ "bfa-", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, { BI, BDMA } },
{ "bfa+", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, { BI, BDPA } },
{ "bfa", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, { BI, BDA } },
{ "bbfa", BBO(16,BOF,1,0), BBOAT_MASK, PWRCOM, { BI, BDA } },
{ "bfla-", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, { BI, BDMA } },
{ "bfla+", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, { BI, BDPA } },
{ "bfla", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, { BI, BDA } },
{ "bbfla", BBO(16,BOF,1,1), BBOAT_MASK, PWRCOM, { BI, BDA } },
{ "bdzt-", BBO(16,BODZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } },
{ "bdzt+", BBO(16,BODZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } },
{ "bdzt", BBO(16,BODZT,0,0), BBOY_MASK, PPCCOM, { BI, BD } },
{ "bdztl-", BBO(16,BODZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } },
{ "bdztl+", BBO(16,BODZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } },
{ "bdztl", BBO(16,BODZT,0,1), BBOY_MASK, PPCCOM, { BI, BD } },
{ "bdzta-", BBO(16,BODZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } },
{ "bdzta+", BBO(16,BODZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } },
{ "bdzta", BBO(16,BODZT,1,0), BBOY_MASK, PPCCOM, { BI, BDA } },
{ "bdztla-", BBO(16,BODZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } },
{ "bdztla+", BBO(16,BODZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } },
{ "bdztla", BBO(16,BODZT,1,1), BBOY_MASK, PPCCOM, { BI, BDA } },
{ "bdzf-", BBO(16,BODZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } },
{ "bdzf+", BBO(16,BODZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } },
{ "bdzf", BBO(16,BODZF,0,0), BBOY_MASK, PPCCOM, { BI, BD } },
{ "bdzfl-", BBO(16,BODZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } },
{ "bdzfl+", BBO(16,BODZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } },
{ "bdzfl", BBO(16,BODZF,0,1), BBOY_MASK, PPCCOM, { BI, BD } },
{ "bdzfa-", BBO(16,BODZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } },
{ "bdzfa+", BBO(16,BODZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } },
{ "bdzfa", BBO(16,BODZF,1,0), BBOY_MASK, PPCCOM, { BI, BDA } },
{ "bdzfla-", BBO(16,BODZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } },
{ "bdzfla+", BBO(16,BODZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } },
{ "bdzfla", BBO(16,BODZF,1,1), BBOY_MASK, PPCCOM, { BI, BDA } },
{ "bc-", B(16,0,0), B_MASK, PPCCOM, { BOE, BI, BDM } },
{ "bc+", B(16,0,0), B_MASK, PPCCOM, { BOE, BI, BDP } },
{ "bc", B(16,0,0), B_MASK, COM, { BO, BI, BD } },
{ "bcl-", B(16,0,1), B_MASK, PPCCOM, { BOE, BI, BDM } },
{ "bcl+", B(16,0,1), B_MASK, PPCCOM, { BOE, BI, BDP } },
{ "bcl", B(16,0,1), B_MASK, COM, { BO, BI, BD } },
{ "bca-", B(16,1,0), B_MASK, PPCCOM, { BOE, BI, BDMA } },
{ "bca+", B(16,1,0), B_MASK, PPCCOM, { BOE, BI, BDPA } },
{ "bca", B(16,1,0), B_MASK, COM, { BO, BI, BDA } },
{ "bcla-", B(16,1,1), B_MASK, PPCCOM, { BOE, BI, BDMA } },
{ "bcla+", B(16,1,1), B_MASK, PPCCOM, { BOE, BI, BDPA } },
{ "bcla", B(16,1,1), B_MASK, COM, { BO, BI, BDA } },
{ "sc", SC(17,1,0), SC_MASK, PPC, { LEV } },
{ "svc", SC(17,0,0), SC_MASK, POWER, { SVC_LEV, FL1, FL2 } },
{ "svcl", SC(17,0,1), SC_MASK, POWER, { SVC_LEV, FL1, FL2 } },
{ "svca", SC(17,1,0), SC_MASK, PWRCOM, { SV } },
{ "svcla", SC(17,1,1), SC_MASK, POWER, { SV } },
{ "b", B(18,0,0), B_MASK, COM, { LI } },
{ "bl", B(18,0,1), B_MASK, COM, { LI } },
{ "ba", B(18,1,0), B_MASK, COM, { LIA } },
{ "bla", B(18,1,1), B_MASK, COM, { LIA } },
{ "mcrf", XL(19,0), XLBB_MASK|(3 << 21)|(3 << 16), COM, { BF, BFA } },
{ "blr", XLO(19,BOU,16,0), XLBOBIBB_MASK, PPCCOM, { 0 } },
{ "br", XLO(19,BOU,16,0), XLBOBIBB_MASK, PWRCOM, { 0 } },
{ "blrl", XLO(19,BOU,16,1), XLBOBIBB_MASK, PPCCOM, { 0 } },
{ "brl", XLO(19,BOU,16,1), XLBOBIBB_MASK, PWRCOM, { 0 } },
{ "bdnzlr", XLO(19,BODNZ,16,0), XLBOBIBB_MASK, PPCCOM, { 0 } },
{ "bdnzlr-", XLO(19,BODNZ,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } },
{ "bdnzlr-", XLO(19,BODNZM4,16,0), XLBOBIBB_MASK, POWER4, { 0 } },
{ "bdnzlr+", XLO(19,BODNZP,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } },
{ "bdnzlr+", XLO(19,BODNZP4,16,0), XLBOBIBB_MASK, POWER4, { 0 } },
{ "bdnzlrl", XLO(19,BODNZ,16,1), XLBOBIBB_MASK, PPCCOM, { 0 } },
{ "bdnzlrl-",XLO(19,BODNZ,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } },
{ "bdnzlrl-",XLO(19,BODNZM4,16,1), XLBOBIBB_MASK, POWER4, { 0 } },
{ "bdnzlrl+",XLO(19,BODNZP,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } },
{ "bdnzlrl+",XLO(19,BODNZP4,16,1), XLBOBIBB_MASK, POWER4, { 0 } },
{ "bdzlr", XLO(19,BODZ,16,0), XLBOBIBB_MASK, PPCCOM, { 0 } },
{ "bdzlr-", XLO(19,BODZ,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } },
{ "bdzlr-", XLO(19,BODZM4,16,0), XLBOBIBB_MASK, POWER4, { 0 } },
{ "bdzlr+", XLO(19,BODZP,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } },
{ "bdzlr+", XLO(19,BODZP4,16,0), XLBOBIBB_MASK, POWER4, { 0 } },
{ "bdzlrl", XLO(19,BODZ,16,1), XLBOBIBB_MASK, PPCCOM, { 0 } },
{ "bdzlrl-", XLO(19,BODZ,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } },
{ "bdzlrl-", XLO(19,BODZM4,16,1), XLBOBIBB_MASK, POWER4, { 0 } },
{ "bdzlrl+", XLO(19,BODZP,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } },
{ "bdzlrl+", XLO(19,BODZP4,16,1), XLBOBIBB_MASK, POWER4, { 0 } },
{ "bltlr", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bltlr-", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bltlr-", XLOCB(19,BOTM4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bltlr+", XLOCB(19,BOTP,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bltlr+", XLOCB(19,BOTP4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bltr", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bltlrl", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bltlrl-", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bltlrl-", XLOCB(19,BOTM4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bltlrl+", XLOCB(19,BOTP,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bltlrl+", XLOCB(19,BOTP4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bltrl", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bgtlr", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bgtlr-", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgtlr-", XLOCB(19,BOTM4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgtlr+", XLOCB(19,BOTP,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgtlr+", XLOCB(19,BOTP4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgtr", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bgtlrl", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bgtlrl-", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgtlrl-", XLOCB(19,BOTM4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgtlrl+", XLOCB(19,BOTP,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgtlrl+", XLOCB(19,BOTP4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgtrl", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "beqlr", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "beqlr-", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "beqlr-", XLOCB(19,BOTM4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "beqlr+", XLOCB(19,BOTP,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "beqlr+", XLOCB(19,BOTP4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "beqr", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "beqlrl", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "beqlrl-", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "beqlrl-", XLOCB(19,BOTM4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "beqlrl+", XLOCB(19,BOTP,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "beqlrl+", XLOCB(19,BOTP4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "beqrl", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bsolr", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bsolr-", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bsolr-", XLOCB(19,BOTM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bsolr+", XLOCB(19,BOTP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bsolr+", XLOCB(19,BOTP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bsor", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bsolrl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bsolrl-", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bsolrl-", XLOCB(19,BOTM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bsolrl+", XLOCB(19,BOTP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bsolrl+", XLOCB(19,BOTP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bsorl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bunlr", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bunlr-", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bunlr-", XLOCB(19,BOTM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bunlr+", XLOCB(19,BOTP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bunlr+", XLOCB(19,BOTP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bunlrl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bunlrl-", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bunlrl-", XLOCB(19,BOTM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bunlrl+", XLOCB(19,BOTP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bunlrl+", XLOCB(19,BOTP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgelr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bgelr-", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgelr-", XLOCB(19,BOFM4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgelr+", XLOCB(19,BOFP,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgelr+", XLOCB(19,BOFP4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bger", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bgelrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bgelrl-", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgelrl-", XLOCB(19,BOFM4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgelrl+", XLOCB(19,BOFP,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgelrl+", XLOCB(19,BOFP4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgerl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bnllr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnllr-", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnllr-", XLOCB(19,BOFM4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnllr+", XLOCB(19,BOFP,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnllr+", XLOCB(19,BOFP4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnlr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bnllrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnllrl-", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnllrl-", XLOCB(19,BOFM4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnllrl+", XLOCB(19,BOFP,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnllrl+", XLOCB(19,BOFP4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnlrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "blelr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "blelr-", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "blelr-", XLOCB(19,BOFM4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "blelr+", XLOCB(19,BOFP,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "blelr+", XLOCB(19,BOFP4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bler", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "blelrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "blelrl-", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "blelrl-", XLOCB(19,BOFM4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "blelrl+", XLOCB(19,BOFP,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "blelrl+", XLOCB(19,BOFP4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "blerl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bnglr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnglr-", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnglr-", XLOCB(19,BOFM4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnglr+", XLOCB(19,BOFP,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnglr+", XLOCB(19,BOFP4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bngr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bnglrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnglrl-", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnglrl-", XLOCB(19,BOFM4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnglrl+", XLOCB(19,BOFP,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnglrl+", XLOCB(19,BOFP4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bngrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bnelr", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnelr-", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnelr-", XLOCB(19,BOFM4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnelr+", XLOCB(19,BOFP,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnelr+", XLOCB(19,BOFP4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bner", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bnelrl", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnelrl-", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnelrl-", XLOCB(19,BOFM4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnelrl+", XLOCB(19,BOFP,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnelrl+", XLOCB(19,BOFP4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnerl", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bnslr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnslr-", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnslr-", XLOCB(19,BOFM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnslr+", XLOCB(19,BOFP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnslr+", XLOCB(19,BOFP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnsr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bnslrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnslrl-", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnslrl-", XLOCB(19,BOFM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnslrl+", XLOCB(19,BOFP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnslrl+", XLOCB(19,BOFP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnsrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
{ "bnulr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnulr-", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnulr-", XLOCB(19,BOFM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnulr+", XLOCB(19,BOFP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnulr+", XLOCB(19,BOFP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnulrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnulrl-", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnulrl-", XLOCB(19,BOFM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnulrl+", XLOCB(19,BOFP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnulrl+", XLOCB(19,BOFP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "btlr", XLO(19,BOT,16,0), XLBOBB_MASK, PPCCOM, { BI } },
{ "btlr-", XLO(19,BOT,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "btlr-", XLO(19,BOTM4,16,0), XLBOBB_MASK, POWER4, { BI } },
{ "btlr+", XLO(19,BOTP,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "btlr+", XLO(19,BOTP4,16,0), XLBOBB_MASK, POWER4, { BI } },
{ "bbtr", XLO(19,BOT,16,0), XLBOBB_MASK, PWRCOM, { BI } },
{ "btlrl", XLO(19,BOT,16,1), XLBOBB_MASK, PPCCOM, { BI } },
{ "btlrl-", XLO(19,BOT,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "btlrl-", XLO(19,BOTM4,16,1), XLBOBB_MASK, POWER4, { BI } },
{ "btlrl+", XLO(19,BOTP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "btlrl+", XLO(19,BOTP4,16,1), XLBOBB_MASK, POWER4, { BI } },
{ "bbtrl", XLO(19,BOT,16,1), XLBOBB_MASK, PWRCOM, { BI } },
{ "bflr", XLO(19,BOF,16,0), XLBOBB_MASK, PPCCOM, { BI } },
{ "bflr-", XLO(19,BOF,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bflr-", XLO(19,BOFM4,16,0), XLBOBB_MASK, POWER4, { BI } },
{ "bflr+", XLO(19,BOFP,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bflr+", XLO(19,BOFP4,16,0), XLBOBB_MASK, POWER4, { BI } },
{ "bbfr", XLO(19,BOF,16,0), XLBOBB_MASK, PWRCOM, { BI } },
{ "bflrl", XLO(19,BOF,16,1), XLBOBB_MASK, PPCCOM, { BI } },
{ "bflrl-", XLO(19,BOF,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bflrl-", XLO(19,BOFM4,16,1), XLBOBB_MASK, POWER4, { BI } },
{ "bflrl+", XLO(19,BOFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bflrl+", XLO(19,BOFP4,16,1), XLBOBB_MASK, POWER4, { BI } },
{ "bbfrl", XLO(19,BOF,16,1), XLBOBB_MASK, PWRCOM, { BI } },
{ "bdnztlr", XLO(19,BODNZT,16,0), XLBOBB_MASK, PPCCOM, { BI } },
{ "bdnztlr-",XLO(19,BODNZT,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdnztlr+",XLO(19,BODNZTP,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdnztlrl",XLO(19,BODNZT,16,1), XLBOBB_MASK, PPCCOM, { BI } },
{ "bdnztlrl-",XLO(19,BODNZT,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdnztlrl+",XLO(19,BODNZTP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdnzflr", XLO(19,BODNZF,16,0), XLBOBB_MASK, PPCCOM, { BI } },
{ "bdnzflr-",XLO(19,BODNZF,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdnzflr+",XLO(19,BODNZFP,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdnzflrl",XLO(19,BODNZF,16,1), XLBOBB_MASK, PPCCOM, { BI } },
{ "bdnzflrl-",XLO(19,BODNZF,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdnzflrl+",XLO(19,BODNZFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdztlr", XLO(19,BODZT,16,0), XLBOBB_MASK, PPCCOM, { BI } },
{ "bdztlr-", XLO(19,BODZT,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdztlr+", XLO(19,BODZTP,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdztlrl", XLO(19,BODZT,16,1), XLBOBB_MASK, PPCCOM, { BI } },
{ "bdztlrl-",XLO(19,BODZT,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdztlrl+",XLO(19,BODZTP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdzflr", XLO(19,BODZF,16,0), XLBOBB_MASK, PPCCOM, { BI } },
{ "bdzflr-", XLO(19,BODZF,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdzflr+", XLO(19,BODZFP,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdzflrl", XLO(19,BODZF,16,1), XLBOBB_MASK, PPCCOM, { BI } },
{ "bdzflrl-",XLO(19,BODZF,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdzflrl+",XLO(19,BODZFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bclr+", XLYLK(19,16,1,0), XLYBB_MASK, PPCCOM, { BOE, BI } },
{ "bclrl+", XLYLK(19,16,1,1), XLYBB_MASK, PPCCOM, { BOE, BI } },
{ "bclr-", XLYLK(19,16,0,0), XLYBB_MASK, PPCCOM, { BOE, BI } },
{ "bclrl-", XLYLK(19,16,0,1), XLYBB_MASK, PPCCOM, { BOE, BI } },
{ "bclr", XLLK(19,16,0), XLBH_MASK, PPCCOM, { BO, BI, BH } },
{ "bclrl", XLLK(19,16,1), XLBH_MASK, PPCCOM, { BO, BI, BH } },
{ "bcr", XLLK(19,16,0), XLBB_MASK, PWRCOM, { BO, BI } },
{ "bcrl", XLLK(19,16,1), XLBB_MASK, PWRCOM, { BO, BI } },
{ "bclre", XLLK(19,17,0), XLBB_MASK, BOOKE64, { BO, BI } },
{ "bclrel", XLLK(19,17,1), XLBB_MASK, BOOKE64, { BO, BI } },
{ "rfid", XL(19,18), 0xffffffff, PPC64, { 0 } },
{ "crnot", XL(19,33), XL_MASK, PPCCOM, { BT, BA, BBA } },
{ "crnor", XL(19,33), XL_MASK, COM, { BT, BA, BB } },
{ "rfmci", X(19,38), 0xffffffff, PPCRFMCI, { 0 } },
{ "rfi", XL(19,50), 0xffffffff, COM, { 0 } },
{ "rfci", XL(19,51), 0xffffffff, PPC403 | BOOKE, { 0 } },
{ "rfsvc", XL(19,82), 0xffffffff, POWER, { 0 } },
{ "crandc", XL(19,129), XL_MASK, COM, { BT, BA, BB } },
{ "isync", XL(19,150), 0xffffffff, PPCCOM, { 0 } },
{ "ics", XL(19,150), 0xffffffff, PWRCOM, { 0 } },
{ "crclr", XL(19,193), XL_MASK, PPCCOM, { BT, BAT, BBA } },
{ "crxor", XL(19,193), XL_MASK, COM, { BT, BA, BB } },
{ "crnand", XL(19,225), XL_MASK, COM, { BT, BA, BB } },
{ "crand", XL(19,257), XL_MASK, COM, { BT, BA, BB } },
{ "hrfid", XL(19,274), 0xffffffff, POWER5 | CELL, { 0 } },
{ "crset", XL(19,289), XL_MASK, PPCCOM, { BT, BAT, BBA } },
{ "creqv", XL(19,289), XL_MASK, COM, { BT, BA, BB } },
{ "doze", XL(19,402), 0xffffffff, POWER6, { 0 } },
{ "crorc", XL(19,417), XL_MASK, COM, { BT, BA, BB } },
{ "nap", XL(19,434), 0xffffffff, POWER6, { 0 } },
{ "crmove", XL(19,449), XL_MASK, PPCCOM, { BT, BA, BBA } },
{ "cror", XL(19,449), XL_MASK, COM, { BT, BA, BB } },
{ "sleep", XL(19,466), 0xffffffff, POWER6, { 0 } },
{ "rvwinkle", XL(19,498), 0xffffffff, POWER6, { 0 } },
{ "bctr", XLO(19,BOU,528,0), XLBOBIBB_MASK, COM, { 0 } },
{ "bctrl", XLO(19,BOU,528,1), XLBOBIBB_MASK, COM, { 0 } },
{ "bltctr", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bltctr-", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bltctr-", XLOCB(19,BOTM4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bltctr+", XLOCB(19,BOTP,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bltctr+", XLOCB(19,BOTP4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bltctrl", XLOCB(19,BOT,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bltctrl-",XLOCB(19,BOT,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bltctrl-",XLOCB(19,BOTM4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bltctrl+",XLOCB(19,BOTP,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bltctrl+",XLOCB(19,BOTP4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgtctr", XLOCB(19,BOT,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bgtctr-", XLOCB(19,BOT,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgtctr-", XLOCB(19,BOTM4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgtctr+", XLOCB(19,BOTP,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgtctr+", XLOCB(19,BOTP4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgtctrl", XLOCB(19,BOT,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bgtctrl-",XLOCB(19,BOT,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgtctrl-",XLOCB(19,BOTM4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgtctrl+",XLOCB(19,BOTP,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgtctrl+",XLOCB(19,BOTP4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "beqctr", XLOCB(19,BOT,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "beqctr-", XLOCB(19,BOT,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "beqctr-", XLOCB(19,BOTM4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "beqctr+", XLOCB(19,BOTP,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "beqctr+", XLOCB(19,BOTP4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "beqctrl", XLOCB(19,BOT,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "beqctrl-",XLOCB(19,BOT,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "beqctrl-",XLOCB(19,BOTM4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "beqctrl+",XLOCB(19,BOTP,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "beqctrl+",XLOCB(19,BOTP4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bsoctr", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bsoctr-", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bsoctr-", XLOCB(19,BOTM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bsoctr+", XLOCB(19,BOTP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bsoctr+", XLOCB(19,BOTP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bsoctrl", XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bsoctrl-",XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bsoctrl-",XLOCB(19,BOTM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bsoctrl+",XLOCB(19,BOTP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bsoctrl+",XLOCB(19,BOTP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bunctr", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bunctr-", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bunctr-", XLOCB(19,BOTM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bunctr+", XLOCB(19,BOTP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bunctr+", XLOCB(19,BOTP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bunctrl", XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bunctrl-",XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bunctrl-",XLOCB(19,BOTM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bunctrl+",XLOCB(19,BOTP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bunctrl+",XLOCB(19,BOTP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgectr", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bgectr-", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgectr-", XLOCB(19,BOFM4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgectr+", XLOCB(19,BOFP,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgectr+", XLOCB(19,BOFP4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgectrl", XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bgectrl-",XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgectrl-",XLOCB(19,BOFM4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bgectrl+",XLOCB(19,BOFP,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bgectrl+",XLOCB(19,BOFP4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnlctr", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnlctr-", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnlctr-", XLOCB(19,BOFM4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnlctr+", XLOCB(19,BOFP,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnlctr+", XLOCB(19,BOFP4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnlctrl", XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnlctrl-",XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnlctrl-",XLOCB(19,BOFM4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnlctrl+",XLOCB(19,BOFP,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnlctrl+",XLOCB(19,BOFP4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "blectr", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "blectr-", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "blectr-", XLOCB(19,BOFM4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "blectr+", XLOCB(19,BOFP,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "blectr+", XLOCB(19,BOFP4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "blectrl", XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "blectrl-",XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "blectrl-",XLOCB(19,BOFM4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "blectrl+",XLOCB(19,BOFP,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "blectrl+",XLOCB(19,BOFP4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bngctr", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bngctr-", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bngctr-", XLOCB(19,BOFM4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bngctr+", XLOCB(19,BOFP,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bngctr+", XLOCB(19,BOFP4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bngctrl", XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bngctrl-",XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bngctrl-",XLOCB(19,BOFM4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bngctrl+",XLOCB(19,BOFP,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bngctrl+",XLOCB(19,BOFP4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnectr", XLOCB(19,BOF,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnectr-", XLOCB(19,BOF,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnectr-", XLOCB(19,BOFM4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnectr+", XLOCB(19,BOFP,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnectr+", XLOCB(19,BOFP4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnectrl", XLOCB(19,BOF,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnectrl-",XLOCB(19,BOF,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnectrl-",XLOCB(19,BOFM4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnectrl+",XLOCB(19,BOFP,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnectrl+",XLOCB(19,BOFP4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnsctr", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnsctr-", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnsctr-", XLOCB(19,BOFM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnsctr+", XLOCB(19,BOFP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnsctr+", XLOCB(19,BOFP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnsctrl", XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnsctrl-",XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnsctrl-",XLOCB(19,BOFM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnsctrl+",XLOCB(19,BOFP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnsctrl+",XLOCB(19,BOFP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnuctr", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnuctr-", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnuctr-", XLOCB(19,BOFM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnuctr+", XLOCB(19,BOFP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnuctr+", XLOCB(19,BOFP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnuctrl", XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
{ "bnuctrl-",XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnuctrl-",XLOCB(19,BOFM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "bnuctrl+",XLOCB(19,BOFP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
{ "bnuctrl+",XLOCB(19,BOFP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
{ "btctr", XLO(19,BOT,528,0), XLBOBB_MASK, PPCCOM, { BI } },
{ "btctr-", XLO(19,BOT,528,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "btctr-", XLO(19,BOTM4,528,0), XLBOBB_MASK, POWER4, { BI } },
{ "btctr+", XLO(19,BOTP,528,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "btctr+", XLO(19,BOTP4,528,0), XLBOBB_MASK, POWER4, { BI } },
{ "btctrl", XLO(19,BOT,528,1), XLBOBB_MASK, PPCCOM, { BI } },
{ "btctrl-", XLO(19,BOT,528,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "btctrl-", XLO(19,BOTM4,528,1), XLBOBB_MASK, POWER4, { BI } },
{ "btctrl+", XLO(19,BOTP,528,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "btctrl+", XLO(19,BOTP4,528,1), XLBOBB_MASK, POWER4, { BI } },
{ "bfctr", XLO(19,BOF,528,0), XLBOBB_MASK, PPCCOM, { BI } },
{ "bfctr-", XLO(19,BOF,528,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bfctr-", XLO(19,BOFM4,528,0), XLBOBB_MASK, POWER4, { BI } },
{ "bfctr+", XLO(19,BOFP,528,0), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bfctr+", XLO(19,BOFP4,528,0), XLBOBB_MASK, POWER4, { BI } },
{ "bfctrl", XLO(19,BOF,528,1), XLBOBB_MASK, PPCCOM, { BI } },
{ "bfctrl-", XLO(19,BOF,528,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bfctrl-", XLO(19,BOFM4,528,1), XLBOBB_MASK, POWER4, { BI } },
{ "bfctrl+", XLO(19,BOFP,528,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bfctrl+", XLO(19,BOFP4,528,1), XLBOBB_MASK, POWER4, { BI } },
{ "bcctr-", XLYLK(19,528,0,0), XLYBB_MASK, PPCCOM, { BOE, BI } },
{ "bcctr+", XLYLK(19,528,1,0), XLYBB_MASK, PPCCOM, { BOE, BI } },
{ "bcctrl-", XLYLK(19,528,0,1), XLYBB_MASK, PPCCOM, { BOE, BI } },
{ "bcctrl+", XLYLK(19,528,1,1), XLYBB_MASK, PPCCOM, { BOE, BI } },
{ "bcctr", XLLK(19,528,0), XLBH_MASK, PPCCOM, { BO, BI, BH } },
{ "bcctrl", XLLK(19,528,1), XLBH_MASK, PPCCOM, { BO, BI, BH } },
{ "bcc", XLLK(19,528,0), XLBB_MASK, PWRCOM, { BO, BI } },
{ "bccl", XLLK(19,528,1), XLBB_MASK, PWRCOM, { BO, BI } },
{ "bcctre", XLLK(19,529,0), XLBB_MASK, BOOKE64, { BO, BI } },
{ "bcctrel", XLLK(19,529,1), XLBB_MASK, BOOKE64, { BO, BI } },
{ "rlwimi", M(20,0), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } },
{ "rlimi", M(20,0), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } },
{ "rlwimi.", M(20,1), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } },
{ "rlimi.", M(20,1), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } },
{ "rotlwi", MME(21,31,0), MMBME_MASK, PPCCOM, { RA, RS, SH } },
{ "clrlwi", MME(21,31,0), MSHME_MASK, PPCCOM, { RA, RS, MB } },
{ "rlwinm", M(21,0), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } },
{ "rlinm", M(21,0), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } },
{ "rotlwi.", MME(21,31,1), MMBME_MASK, PPCCOM, { RA,RS,SH } },
{ "clrlwi.", MME(21,31,1), MSHME_MASK, PPCCOM, { RA, RS, MB } },
{ "rlwinm.", M(21,1), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } },
{ "rlinm.", M(21,1), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } },
{ "rlmi", M(22,0), M_MASK, M601, { RA,RS,RB,MBE,ME } },
{ "rlmi.", M(22,1), M_MASK, M601, { RA,RS,RB,MBE,ME } },
{ "be", B(22,0,0), B_MASK, BOOKE64, { LI } },
{ "bel", B(22,0,1), B_MASK, BOOKE64, { LI } },
{ "bea", B(22,1,0), B_MASK, BOOKE64, { LIA } },
{ "bela", B(22,1,1), B_MASK, BOOKE64, { LIA } },
{ "rotlw", MME(23,31,0), MMBME_MASK, PPCCOM, { RA, RS, RB } },
{ "rlwnm", M(23,0), M_MASK, PPCCOM, { RA,RS,RB,MBE,ME } },
{ "rlnm", M(23,0), M_MASK, PWRCOM, { RA,RS,RB,MBE,ME } },
{ "rotlw.", MME(23,31,1), MMBME_MASK, PPCCOM, { RA, RS, RB } },
{ "rlwnm.", M(23,1), M_MASK, PPCCOM, { RA,RS,RB,MBE,ME } },
{ "rlnm.", M(23,1), M_MASK, PWRCOM, { RA,RS,RB,MBE,ME } },
{ "nop", OP(24), 0xffffffff, PPCCOM, { 0 } },
{ "ori", OP(24), OP_MASK, PPCCOM, { RA, RS, UI } },
{ "oril", OP(24), OP_MASK, PWRCOM, { RA, RS, UI } },
{ "oris", OP(25), OP_MASK, PPCCOM, { RA, RS, UI } },
{ "oriu", OP(25), OP_MASK, PWRCOM, { RA, RS, UI } },
{ "xori", OP(26), OP_MASK, PPCCOM, { RA, RS, UI } },
{ "xoril", OP(26), OP_MASK, PWRCOM, { RA, RS, UI } },
{ "xoris", OP(27), OP_MASK, PPCCOM, { RA, RS, UI } },
{ "xoriu", OP(27), OP_MASK, PWRCOM, { RA, RS, UI } },
{ "andi.", OP(28), OP_MASK, PPCCOM, { RA, RS, UI } },
{ "andil.", OP(28), OP_MASK, PWRCOM, { RA, RS, UI } },
{ "andis.", OP(29), OP_MASK, PPCCOM, { RA, RS, UI } },
{ "andiu.", OP(29), OP_MASK, PWRCOM, { RA, RS, UI } },
{ "rotldi", MD(30,0,0), MDMB_MASK, PPC64, { RA, RS, SH6 } },
{ "clrldi", MD(30,0,0), MDSH_MASK, PPC64, { RA, RS, MB6 } },
{ "rldicl", MD(30,0,0), MD_MASK, PPC64, { RA, RS, SH6, MB6 } },
{ "rotldi.", MD(30,0,1), MDMB_MASK, PPC64, { RA, RS, SH6 } },
{ "clrldi.", MD(30,0,1), MDSH_MASK, PPC64, { RA, RS, MB6 } },
{ "rldicl.", MD(30,0,1), MD_MASK, PPC64, { RA, RS, SH6, MB6 } },
{ "rldicr", MD(30,1,0), MD_MASK, PPC64, { RA, RS, SH6, ME6 } },
{ "rldicr.", MD(30,1,1), MD_MASK, PPC64, { RA, RS, SH6, ME6 } },
{ "rldic", MD(30,2,0), MD_MASK, PPC64, { RA, RS, SH6, MB6 } },
{ "rldic.", MD(30,2,1), MD_MASK, PPC64, { RA, RS, SH6, MB6 } },
{ "rldimi", MD(30,3,0), MD_MASK, PPC64, { RA, RS, SH6, MB6 } },
{ "rldimi.", MD(30,3,1), MD_MASK, PPC64, { RA, RS, SH6, MB6 } },
{ "rotld", MDS(30,8,0), MDSMB_MASK, PPC64, { RA, RS, RB } },
{ "rldcl", MDS(30,8,0), MDS_MASK, PPC64, { RA, RS, RB, MB6 } },
{ "rotld.", MDS(30,8,1), MDSMB_MASK, PPC64, { RA, RS, RB } },
{ "rldcl.", MDS(30,8,1), MDS_MASK, PPC64, { RA, RS, RB, MB6 } },
{ "rldcr", MDS(30,9,0), MDS_MASK, PPC64, { RA, RS, RB, ME6 } },
{ "rldcr.", MDS(30,9,1), MDS_MASK, PPC64, { RA, RS, RB, ME6 } },
{ "cmpw", XOPL(31,0,0), XCMPL_MASK, PPCCOM, { OBF, RA, RB } },
{ "cmpd", XOPL(31,0,1), XCMPL_MASK, PPC64, { OBF, RA, RB } },
{ "cmp", X(31,0), XCMP_MASK, PPC, { BF, L, RA, RB } },
{ "cmp", X(31,0), XCMPL_MASK, PWRCOM, { BF, RA, RB } },
{ "twlgt", XTO(31,4,TOLGT), XTO_MASK, PPCCOM, { RA, RB } },
{ "tlgt", XTO(31,4,TOLGT), XTO_MASK, PWRCOM, { RA, RB } },
{ "twllt", XTO(31,4,TOLLT), XTO_MASK, PPCCOM, { RA, RB } },
{ "tllt", XTO(31,4,TOLLT), XTO_MASK, PWRCOM, { RA, RB } },
{ "tweq", XTO(31,4,TOEQ), XTO_MASK, PPCCOM, { RA, RB } },
{ "teq", XTO(31,4,TOEQ), XTO_MASK, PWRCOM, { RA, RB } },
{ "twlge", XTO(31,4,TOLGE), XTO_MASK, PPCCOM, { RA, RB } },
{ "tlge", XTO(31,4,TOLGE), XTO_MASK, PWRCOM, { RA, RB } },
{ "twlnl", XTO(31,4,TOLNL), XTO_MASK, PPCCOM, { RA, RB } },
{ "tlnl", XTO(31,4,TOLNL), XTO_MASK, PWRCOM, { RA, RB } },
{ "twlle", XTO(31,4,TOLLE), XTO_MASK, PPCCOM, { RA, RB } },
{ "tlle", XTO(31,4,TOLLE), XTO_MASK, PWRCOM, { RA, RB } },
{ "twlng", XTO(31,4,TOLNG), XTO_MASK, PPCCOM, { RA, RB } },
{ "tlng", XTO(31,4,TOLNG), XTO_MASK, PWRCOM, { RA, RB } },
{ "twgt", XTO(31,4,TOGT), XTO_MASK, PPCCOM, { RA, RB } },
{ "tgt", XTO(31,4,TOGT), XTO_MASK, PWRCOM, { RA, RB } },
{ "twge", XTO(31,4,TOGE), XTO_MASK, PPCCOM, { RA, RB } },
{ "tge", XTO(31,4,TOGE), XTO_MASK, PWRCOM, { RA, RB } },
{ "twnl", XTO(31,4,TONL), XTO_MASK, PPCCOM, { RA, RB } },
{ "tnl", XTO(31,4,TONL), XTO_MASK, PWRCOM, { RA, RB } },
{ "twlt", XTO(31,4,TOLT), XTO_MASK, PPCCOM, { RA, RB } },
{ "tlt", XTO(31,4,TOLT), XTO_MASK, PWRCOM, { RA, RB } },
{ "twle", XTO(31,4,TOLE), XTO_MASK, PPCCOM, { RA, RB } },
{ "tle", XTO(31,4,TOLE), XTO_MASK, PWRCOM, { RA, RB } },
{ "twng", XTO(31,4,TONG), XTO_MASK, PPCCOM, { RA, RB } },
{ "tng", XTO(31,4,TONG), XTO_MASK, PWRCOM, { RA, RB } },
{ "twne", XTO(31,4,TONE), XTO_MASK, PPCCOM, { RA, RB } },
{ "tne", XTO(31,4,TONE), XTO_MASK, PWRCOM, { RA, RB } },
{ "trap", XTO(31,4,TOU), 0xffffffff, PPCCOM, { 0 } },
{ "tw", X(31,4), X_MASK, PPCCOM, { TO, RA, RB } },
{ "t", X(31,4), X_MASK, PWRCOM, { TO, RA, RB } },
{ "subfc", XO(31,8,0,0), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "sf", XO(31,8,0,0), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "subc", XO(31,8,0,0), XO_MASK, PPC, { RT, RB, RA } },
{ "subfc.", XO(31,8,0,1), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "sf.", XO(31,8,0,1), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "subc.", XO(31,8,0,1), XO_MASK, PPCCOM, { RT, RB, RA } },
{ "subfco", XO(31,8,1,0), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "sfo", XO(31,8,1,0), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "subco", XO(31,8,1,0), XO_MASK, PPC, { RT, RB, RA } },
{ "subfco.", XO(31,8,1,1), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "sfo.", XO(31,8,1,1), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "subco.", XO(31,8,1,1), XO_MASK, PPC, { RT, RB, RA } },
{ "mulhdu", XO(31,9,0,0), XO_MASK, PPC64, { RT, RA, RB } },
{ "mulhdu.", XO(31,9,0,1), XO_MASK, PPC64, { RT, RA, RB } },
{ "addc", XO(31,10,0,0), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "a", XO(31,10,0,0), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "addc.", XO(31,10,0,1), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "a.", XO(31,10,0,1), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "addco", XO(31,10,1,0), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "ao", XO(31,10,1,0), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "addco.", XO(31,10,1,1), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "ao.", XO(31,10,1,1), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "mulhwu", XO(31,11,0,0), XO_MASK, PPC, { RT, RA, RB } },
{ "mulhwu.", XO(31,11,0,1), XO_MASK, PPC, { RT, RA, RB } },
{ "isellt", X(31,15), X_MASK, PPCISEL, { RT, RA, RB } },
{ "iselgt", X(31,47), X_MASK, PPCISEL, { RT, RA, RB } },
{ "iseleq", X(31,79), X_MASK, PPCISEL, { RT, RA, RB } },
{ "isel", XISEL(31,15), XISEL_MASK, PPCISEL, { RT, RA, RB, CRB } },
{ "mfocrf", XFXM(31,19,0,1), XFXFXM_MASK, COM, { RT, FXM } },
{ "mfcr", X(31,19), XRARB_MASK, NOPOWER4 | COM, { RT } },
{ "mfcr", X(31,19), XFXFXM_MASK, POWER4, { RT, FXM4 } },
{ "lwarx", X(31,20), XEH_MASK, PPC, { RT, RA0, RB, EH } },
{ "ldx", X(31,21), X_MASK, PPC64, { RT, RA0, RB } },
{ "icbt", X(31,22), X_MASK, BOOKE|PPCE300, { CT, RA, RB } },
{ "icbt", X(31,262), XRT_MASK, PPC403, { RA, RB } },
{ "lwzx", X(31,23), X_MASK, PPCCOM, { RT, RA0, RB } },
{ "lx", X(31,23), X_MASK, PWRCOM, { RT, RA, RB } },
{ "slw", XRC(31,24,0), X_MASK, PPCCOM, { RA, RS, RB } },
{ "sl", XRC(31,24,0), X_MASK, PWRCOM, { RA, RS, RB } },
{ "slw.", XRC(31,24,1), X_MASK, PPCCOM, { RA, RS, RB } },
{ "sl.", XRC(31,24,1), X_MASK, PWRCOM, { RA, RS, RB } },
{ "cntlzw", XRC(31,26,0), XRB_MASK, PPCCOM, { RA, RS } },
{ "cntlz", XRC(31,26,0), XRB_MASK, PWRCOM, { RA, RS } },
{ "cntlzw.", XRC(31,26,1), XRB_MASK, PPCCOM, { RA, RS } },
{ "cntlz.", XRC(31,26,1), XRB_MASK, PWRCOM, { RA, RS } },
{ "sld", XRC(31,27,0), X_MASK, PPC64, { RA, RS, RB } },
{ "sld.", XRC(31,27,1), X_MASK, PPC64, { RA, RS, RB } },
{ "and", XRC(31,28,0), X_MASK, COM, { RA, RS, RB } },
{ "and.", XRC(31,28,1), X_MASK, COM, { RA, RS, RB } },
{ "maskg", XRC(31,29,0), X_MASK, M601, { RA, RS, RB } },
{ "maskg.", XRC(31,29,1), X_MASK, M601, { RA, RS, RB } },
{ "icbte", X(31,30), X_MASK, BOOKE64, { CT, RA, RB } },
{ "lwzxe", X(31,31), X_MASK, BOOKE64, { RT, RA0, RB } },
{ "cmplw", XOPL(31,32,0), XCMPL_MASK, PPCCOM, { OBF, RA, RB } },
{ "cmpld", XOPL(31,32,1), XCMPL_MASK, PPC64, { OBF, RA, RB } },
{ "cmpl", X(31,32), XCMP_MASK, PPC, { BF, L, RA, RB } },
{ "cmpl", X(31,32), XCMPL_MASK, PWRCOM, { BF, RA, RB } },
{ "subf", XO(31,40,0,0), XO_MASK, PPC, { RT, RA, RB } },
{ "sub", XO(31,40,0,0), XO_MASK, PPC, { RT, RB, RA } },
{ "subf.", XO(31,40,0,1), XO_MASK, PPC, { RT, RA, RB } },
{ "sub.", XO(31,40,0,1), XO_MASK, PPC, { RT, RB, RA } },
{ "subfo", XO(31,40,1,0), XO_MASK, PPC, { RT, RA, RB } },
{ "subo", XO(31,40,1,0), XO_MASK, PPC, { RT, RB, RA } },
{ "subfo.", XO(31,40,1,1), XO_MASK, PPC, { RT, RA, RB } },
{ "subo.", XO(31,40,1,1), XO_MASK, PPC, { RT, RB, RA } },
{ "ldux", X(31,53), X_MASK, PPC64, { RT, RAL, RB } },
{ "dcbst", X(31,54), XRT_MASK, PPC, { RA, RB } },
{ "lwzux", X(31,55), X_MASK, PPCCOM, { RT, RAL, RB } },
{ "lux", X(31,55), X_MASK, PWRCOM, { RT, RA, RB } },
{ "dcbste", X(31,62), XRT_MASK, BOOKE64, { RA, RB } },
{ "lwzuxe", X(31,63), X_MASK, BOOKE64, { RT, RAL, RB } },
{ "cntlzd", XRC(31,58,0), XRB_MASK, PPC64, { RA, RS } },
{ "cntlzd.", XRC(31,58,1), XRB_MASK, PPC64, { RA, RS } },
{ "andc", XRC(31,60,0), X_MASK, COM, { RA, RS, RB } },
{ "andc.", XRC(31,60,1), X_MASK, COM, { RA, RS, RB } },
{ "tdlgt", XTO(31,68,TOLGT), XTO_MASK, PPC64, { RA, RB } },
{ "tdllt", XTO(31,68,TOLLT), XTO_MASK, PPC64, { RA, RB } },
{ "tdeq", XTO(31,68,TOEQ), XTO_MASK, PPC64, { RA, RB } },
{ "tdlge", XTO(31,68,TOLGE), XTO_MASK, PPC64, { RA, RB } },
{ "tdlnl", XTO(31,68,TOLNL), XTO_MASK, PPC64, { RA, RB } },
{ "tdlle", XTO(31,68,TOLLE), XTO_MASK, PPC64, { RA, RB } },
{ "tdlng", XTO(31,68,TOLNG), XTO_MASK, PPC64, { RA, RB } },
{ "tdgt", XTO(31,68,TOGT), XTO_MASK, PPC64, { RA, RB } },
{ "tdge", XTO(31,68,TOGE), XTO_MASK, PPC64, { RA, RB } },
{ "tdnl", XTO(31,68,TONL), XTO_MASK, PPC64, { RA, RB } },
{ "tdlt", XTO(31,68,TOLT), XTO_MASK, PPC64, { RA, RB } },
{ "tdle", XTO(31,68,TOLE), XTO_MASK, PPC64, { RA, RB } },
{ "tdng", XTO(31,68,TONG), XTO_MASK, PPC64, { RA, RB } },
{ "tdne", XTO(31,68,TONE), XTO_MASK, PPC64, { RA, RB } },
{ "td", X(31,68), X_MASK, PPC64, { TO, RA, RB } },
{ "mulhd", XO(31,73,0,0), XO_MASK, PPC64, { RT, RA, RB } },
{ "mulhd.", XO(31,73,0,1), XO_MASK, PPC64, { RT, RA, RB } },
{ "mulhw", XO(31,75,0,0), XO_MASK, PPC, { RT, RA, RB } },
{ "mulhw.", XO(31,75,0,1), XO_MASK, PPC, { RT, RA, RB } },
{ "dlmzb", XRC(31,78,0), X_MASK, PPC403|PPC440, { RA, RS, RB } },
{ "dlmzb.", XRC(31,78,1), X_MASK, PPC403|PPC440, { RA, RS, RB } },
{ "mtsrd", X(31,82), XRB_MASK|(1<<20), PPC64, { SR, RS } },
{ "mfmsr", X(31,83), XRARB_MASK, COM, { RT } },
{ "ldarx", X(31,84), XEH_MASK, PPC64, { RT, RA0, RB, EH } },
{ "dcbfl", XOPL(31,86,1), XRT_MASK, POWER5, { RA, RB } },
{ "dcbf", X(31,86), XLRT_MASK, PPC, { RA, RB, L } },
{ "lbzx", X(31,87), X_MASK, COM, { RT, RA0, RB } },
{ "dcbfe", X(31,94), XRT_MASK, BOOKE64, { RA, RB } },
{ "lbzxe", X(31,95), X_MASK, BOOKE64, { RT, RA0, RB } },
{ "neg", XO(31,104,0,0), XORB_MASK, COM, { RT, RA } },
{ "neg.", XO(31,104,0,1), XORB_MASK, COM, { RT, RA } },
{ "nego", XO(31,104,1,0), XORB_MASK, COM, { RT, RA } },
{ "nego.", XO(31,104,1,1), XORB_MASK, COM, { RT, RA } },
{ "mul", XO(31,107,0,0), XO_MASK, M601, { RT, RA, RB } },
{ "mul.", XO(31,107,0,1), XO_MASK, M601, { RT, RA, RB } },
{ "mulo", XO(31,107,1,0), XO_MASK, M601, { RT, RA, RB } },
{ "mulo.", XO(31,107,1,1), XO_MASK, M601, { RT, RA, RB } },
{ "mtsrdin", X(31,114), XRA_MASK, PPC64, { RS, RB } },
{ "clf", X(31,118), XTO_MASK, POWER, { RA, RB } },
{ "lbzux", X(31,119), X_MASK, COM, { RT, RAL, RB } },
{ "popcntb", X(31,122), XRB_MASK, POWER5, { RA, RS } },
{ "not", XRC(31,124,0), X_MASK, COM, { RA, RS, RBS } },
{ "nor", XRC(31,124,0), X_MASK, COM, { RA, RS, RB } },
{ "not.", XRC(31,124,1), X_MASK, COM, { RA, RS, RBS } },
{ "nor.", XRC(31,124,1), X_MASK, COM, { RA, RS, RB } },
{ "lwarxe", X(31,126), X_MASK, BOOKE64, { RT, RA0, RB } },
{ "lbzuxe", X(31,127), X_MASK, BOOKE64, { RT, RAL, RB } },
{ "wrtee", X(31,131), XRARB_MASK, PPC403 | BOOKE, { RS } },
{ "dcbtstls",X(31,134), X_MASK, PPCCHLK, { CT, RA, RB }},
{ "subfe", XO(31,136,0,0), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "sfe", XO(31,136,0,0), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "subfe.", XO(31,136,0,1), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "sfe.", XO(31,136,0,1), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "subfeo", XO(31,136,1,0), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "sfeo", XO(31,136,1,0), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "subfeo.", XO(31,136,1,1), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "sfeo.", XO(31,136,1,1), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "adde", XO(31,138,0,0), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "ae", XO(31,138,0,0), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "adde.", XO(31,138,0,1), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "ae.", XO(31,138,0,1), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "addeo", XO(31,138,1,0), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "aeo", XO(31,138,1,0), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "addeo.", XO(31,138,1,1), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "aeo.", XO(31,138,1,1), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "dcbtstlse",X(31,142),X_MASK, PPCCHLK64, { CT, RA, RB }},
{ "mtocrf", XFXM(31,144,0,1), XFXFXM_MASK, COM, { FXM, RS } },
{ "mtcr", XFXM(31,144,0xff,0), XRARB_MASK, COM, { RS }},
{ "mtcrf", X(31,144), XFXFXM_MASK, COM, { FXM, RS } },
{ "mtmsr", X(31,146), XRARB_MASK, COM, { RS } },
{ "stdx", X(31,149), X_MASK, PPC64, { RS, RA0, RB } },
{ "stwcx.", XRC(31,150,1), X_MASK, PPC, { RS, RA0, RB } },
{ "stwx", X(31,151), X_MASK, PPCCOM, { RS, RA0, RB } },
{ "stx", X(31,151), X_MASK, PWRCOM, { RS, RA, RB } },
{ "stwcxe.", XRC(31,158,1), X_MASK, BOOKE64, { RS, RA0, RB } },
{ "stwxe", X(31,159), X_MASK, BOOKE64, { RS, RA0, RB } },
{ "slq", XRC(31,152,0), X_MASK, M601, { RA, RS, RB } },
{ "slq.", XRC(31,152,1), X_MASK, M601, { RA, RS, RB } },
{ "sle", XRC(31,153,0), X_MASK, M601, { RA, RS, RB } },
{ "sle.", XRC(31,153,1), X_MASK, M601, { RA, RS, RB } },
{ "prtyw", X(31,154), XRB_MASK, POWER6, { RA, RS } },
{ "wrteei", X(31,163), XE_MASK, PPC403 | BOOKE, { E } },
{ "dcbtls", X(31,166), X_MASK, PPCCHLK, { CT, RA, RB }},
{ "dcbtlse", X(31,174), X_MASK, PPCCHLK64, { CT, RA, RB }},
{ "mtmsrd", X(31,178), XRLARB_MASK, PPC64, { RS, A_L } },
{ "stdux", X(31,181), X_MASK, PPC64, { RS, RAS, RB } },
{ "stwux", X(31,183), X_MASK, PPCCOM, { RS, RAS, RB } },
{ "stux", X(31,183), X_MASK, PWRCOM, { RS, RA0, RB } },
{ "sliq", XRC(31,184,0), X_MASK, M601, { RA, RS, SH } },
{ "sliq.", XRC(31,184,1), X_MASK, M601, { RA, RS, SH } },
{ "prtyd", X(31,186), XRB_MASK, POWER6, { RA, RS } },
{ "stwuxe", X(31,191), X_MASK, BOOKE64, { RS, RAS, RB } },
{ "subfze", XO(31,200,0,0), XORB_MASK, PPCCOM, { RT, RA } },
{ "sfze", XO(31,200,0,0), XORB_MASK, PWRCOM, { RT, RA } },
{ "subfze.", XO(31,200,0,1), XORB_MASK, PPCCOM, { RT, RA } },
{ "sfze.", XO(31,200,0,1), XORB_MASK, PWRCOM, { RT, RA } },
{ "subfzeo", XO(31,200,1,0), XORB_MASK, PPCCOM, { RT, RA } },
{ "sfzeo", XO(31,200,1,0), XORB_MASK, PWRCOM, { RT, RA } },
{ "subfzeo.",XO(31,200,1,1), XORB_MASK, PPCCOM, { RT, RA } },
{ "sfzeo.", XO(31,200,1,1), XORB_MASK, PWRCOM, { RT, RA } },
{ "addze", XO(31,202,0,0), XORB_MASK, PPCCOM, { RT, RA } },
{ "aze", XO(31,202,0,0), XORB_MASK, PWRCOM, { RT, RA } },
{ "addze.", XO(31,202,0,1), XORB_MASK, PPCCOM, { RT, RA } },
{ "aze.", XO(31,202,0,1), XORB_MASK, PWRCOM, { RT, RA } },
{ "addzeo", XO(31,202,1,0), XORB_MASK, PPCCOM, { RT, RA } },
{ "azeo", XO(31,202,1,0), XORB_MASK, PWRCOM, { RT, RA } },
{ "addzeo.", XO(31,202,1,1), XORB_MASK, PPCCOM, { RT, RA } },
{ "azeo.", XO(31,202,1,1), XORB_MASK, PWRCOM, { RT, RA } },
{ "mtsr", X(31,210), XRB_MASK|(1<<20), COM32, { SR, RS } },
{ "stdcx.", XRC(31,214,1), X_MASK, PPC64, { RS, RA0, RB } },
{ "stbx", X(31,215), X_MASK, COM, { RS, RA0, RB } },
{ "sllq", XRC(31,216,0), X_MASK, M601, { RA, RS, RB } },
{ "sllq.", XRC(31,216,1), X_MASK, M601, { RA, RS, RB } },
{ "sleq", XRC(31,217,0), X_MASK, M601, { RA, RS, RB } },
{ "sleq.", XRC(31,217,1), X_MASK, M601, { RA, RS, RB } },
{ "stbxe", X(31,223), X_MASK, BOOKE64, { RS, RA0, RB } },
{ "icblc", X(31,230), X_MASK, PPCCHLK, { CT, RA, RB }},
{ "subfme", XO(31,232,0,0), XORB_MASK, PPCCOM, { RT, RA } },
{ "sfme", XO(31,232,0,0), XORB_MASK, PWRCOM, { RT, RA } },
{ "subfme.", XO(31,232,0,1), XORB_MASK, PPCCOM, { RT, RA } },
{ "sfme.", XO(31,232,0,1), XORB_MASK, PWRCOM, { RT, RA } },
{ "subfmeo", XO(31,232,1,0), XORB_MASK, PPCCOM, { RT, RA } },
{ "sfmeo", XO(31,232,1,0), XORB_MASK, PWRCOM, { RT, RA } },
{ "subfmeo.",XO(31,232,1,1), XORB_MASK, PPCCOM, { RT, RA } },
{ "sfmeo.", XO(31,232,1,1), XORB_MASK, PWRCOM, { RT, RA } },
{ "mulld", XO(31,233,0,0), XO_MASK, PPC64, { RT, RA, RB } },
{ "mulld.", XO(31,233,0,1), XO_MASK, PPC64, { RT, RA, RB } },
{ "mulldo", XO(31,233,1,0), XO_MASK, PPC64, { RT, RA, RB } },
{ "mulldo.", XO(31,233,1,1), XO_MASK, PPC64, { RT, RA, RB } },
{ "addme", XO(31,234,0,0), XORB_MASK, PPCCOM, { RT, RA } },
{ "ame", XO(31,234,0,0), XORB_MASK, PWRCOM, { RT, RA } },
{ "addme.", XO(31,234,0,1), XORB_MASK, PPCCOM, { RT, RA } },
{ "ame.", XO(31,234,0,1), XORB_MASK, PWRCOM, { RT, RA } },
{ "addmeo", XO(31,234,1,0), XORB_MASK, PPCCOM, { RT, RA } },
{ "ameo", XO(31,234,1,0), XORB_MASK, PWRCOM, { RT, RA } },
{ "addmeo.", XO(31,234,1,1), XORB_MASK, PPCCOM, { RT, RA } },
{ "ameo.", XO(31,234,1,1), XORB_MASK, PWRCOM, { RT, RA } },
{ "mullw", XO(31,235,0,0), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "muls", XO(31,235,0,0), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "mullw.", XO(31,235,0,1), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "muls.", XO(31,235,0,1), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "mullwo", XO(31,235,1,0), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "mulso", XO(31,235,1,0), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "mullwo.", XO(31,235,1,1), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "mulso.", XO(31,235,1,1), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "icblce", X(31,238), X_MASK, PPCCHLK64, { CT, RA, RB }},
{ "mtsrin", X(31,242), XRA_MASK, PPC32, { RS, RB } },
{ "mtsri", X(31,242), XRA_MASK, POWER32, { RS, RB } },
{ "dcbtst", X(31,246), X_MASK, PPC, { CT, RA, RB } },
{ "stbux", X(31,247), X_MASK, COM, { RS, RAS, RB } },
{ "slliq", XRC(31,248,0), X_MASK, M601, { RA, RS, SH } },
{ "slliq.", XRC(31,248,1), X_MASK, M601, { RA, RS, SH } },
{ "dcbtste", X(31,253), X_MASK, BOOKE64, { CT, RA, RB } },
{ "stbuxe", X(31,255), X_MASK, BOOKE64, { RS, RAS, RB } },
{ "mfdcrx", X(31,259), X_MASK, BOOKE, { RS, RA } },
{ "doz", XO(31,264,0,0), XO_MASK, M601, { RT, RA, RB } },
{ "doz.", XO(31,264,0,1), XO_MASK, M601, { RT, RA, RB } },
{ "dozo", XO(31,264,1,0), XO_MASK, M601, { RT, RA, RB } },
{ "dozo.", XO(31,264,1,1), XO_MASK, M601, { RT, RA, RB } },
{ "add", XO(31,266,0,0), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "cax", XO(31,266,0,0), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "add.", XO(31,266,0,1), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "cax.", XO(31,266,0,1), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "addo", XO(31,266,1,0), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "caxo", XO(31,266,1,0), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "addo.", XO(31,266,1,1), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "caxo.", XO(31,266,1,1), XO_MASK, PWRCOM, { RT, RA, RB } },
{ "tlbiel", X(31,274), XRTLRA_MASK, POWER4, { RB, L } },
{ "mfapidi", X(31,275), X_MASK, BOOKE, { RT, RA } },
{ "lscbx", XRC(31,277,0), X_MASK, M601, { RT, RA, RB } },
{ "lscbx.", XRC(31,277,1), X_MASK, M601, { RT, RA, RB } },
{ "dcbt", X(31,278), X_MASK, PPC, { CT, RA, RB } },
{ "lhzx", X(31,279), X_MASK, COM, { RT, RA0, RB } },
{ "eqv", XRC(31,284,0), X_MASK, COM, { RA, RS, RB } },
{ "eqv.", XRC(31,284,1), X_MASK, COM, { RA, RS, RB } },
{ "dcbte", X(31,286), X_MASK, BOOKE64, { CT, RA, RB } },
{ "lhzxe", X(31,287), X_MASK, BOOKE64, { RT, RA0, RB } },
{ "tlbie", X(31,306), XRTLRA_MASK, PPC, { RB, L } },
{ "tlbi", X(31,306), XRT_MASK, POWER, { RA0, RB } },
{ "eciwx", X(31,310), X_MASK, PPC, { RT, RA, RB } },
{ "lhzux", X(31,311), X_MASK, COM, { RT, RAL, RB } },
{ "xor", XRC(31,316,0), X_MASK, COM, { RA, RS, RB } },
{ "xor.", XRC(31,316,1), X_MASK, COM, { RA, RS, RB } },
{ "lhzuxe", X(31,319), X_MASK, BOOKE64, { RT, RAL, RB } },
{ "mfexisr", XSPR(31,323,64), XSPR_MASK, PPC403, { RT } },
{ "mfexier", XSPR(31,323,66), XSPR_MASK, PPC403, { RT } },
{ "mfbr0", XSPR(31,323,128), XSPR_MASK, PPC403, { RT } },
{ "mfbr1", XSPR(31,323,129), XSPR_MASK, PPC403, { RT } },
{ "mfbr2", XSPR(31,323,130), XSPR_MASK, PPC403, { RT } },
{ "mfbr3", XSPR(31,323,131), XSPR_MASK, PPC403, { RT } },
{ "mfbr4", XSPR(31,323,132), XSPR_MASK, PPC403, { RT } },
{ "mfbr5", XSPR(31,323,133), XSPR_MASK, PPC403, { RT } },
{ "mfbr6", XSPR(31,323,134), XSPR_MASK, PPC403, { RT } },
{ "mfbr7", XSPR(31,323,135), XSPR_MASK, PPC403, { RT } },
{ "mfbear", XSPR(31,323,144), XSPR_MASK, PPC403, { RT } },
{ "mfbesr", XSPR(31,323,145), XSPR_MASK, PPC403, { RT } },
{ "mfiocr", XSPR(31,323,160), XSPR_MASK, PPC403, { RT } },
{ "mfdmacr0", XSPR(31,323,192), XSPR_MASK, PPC403, { RT } },
{ "mfdmact0", XSPR(31,323,193), XSPR_MASK, PPC403, { RT } },
{ "mfdmada0", XSPR(31,323,194), XSPR_MASK, PPC403, { RT } },
{ "mfdmasa0", XSPR(31,323,195), XSPR_MASK, PPC403, { RT } },
{ "mfdmacc0", XSPR(31,323,196), XSPR_MASK, PPC403, { RT } },
{ "mfdmacr1", XSPR(31,323,200), XSPR_MASK, PPC403, { RT } },
{ "mfdmact1", XSPR(31,323,201), XSPR_MASK, PPC403, { RT } },
{ "mfdmada1", XSPR(31,323,202), XSPR_MASK, PPC403, { RT } },
{ "mfdmasa1", XSPR(31,323,203), XSPR_MASK, PPC403, { RT } },
{ "mfdmacc1", XSPR(31,323,204), XSPR_MASK, PPC403, { RT } },
{ "mfdmacr2", XSPR(31,323,208), XSPR_MASK, PPC403, { RT } },
{ "mfdmact2", XSPR(31,323,209), XSPR_MASK, PPC403, { RT } },
{ "mfdmada2", XSPR(31,323,210), XSPR_MASK, PPC403, { RT } },
{ "mfdmasa2", XSPR(31,323,211), XSPR_MASK, PPC403, { RT } },
{ "mfdmacc2", XSPR(31,323,212), XSPR_MASK, PPC403, { RT } },
{ "mfdmacr3", XSPR(31,323,216), XSPR_MASK, PPC403, { RT } },
{ "mfdmact3", XSPR(31,323,217), XSPR_MASK, PPC403, { RT } },
{ "mfdmada3", XSPR(31,323,218), XSPR_MASK, PPC403, { RT } },
{ "mfdmasa3", XSPR(31,323,219), XSPR_MASK, PPC403, { RT } },
{ "mfdmacc3", XSPR(31,323,220), XSPR_MASK, PPC403, { RT } },
{ "mfdmasr", XSPR(31,323,224), XSPR_MASK, PPC403, { RT } },
{ "mfdcr", X(31,323), X_MASK, PPC403 | BOOKE, { RT, SPR } },
{ "div", XO(31,331,0,0), XO_MASK, M601, { RT, RA, RB } },
{ "div.", XO(31,331,0,1), XO_MASK, M601, { RT, RA, RB } },
{ "divo", XO(31,331,1,0), XO_MASK, M601, { RT, RA, RB } },
{ "divo.", XO(31,331,1,1), XO_MASK, M601, { RT, RA, RB } },
{ "mfpmr", X(31,334), X_MASK, PPCPMR, { RT, PMR }},
{ "mfmq", XSPR(31,339,0), XSPR_MASK, M601, { RT } },
{ "mfxer", XSPR(31,339,1), XSPR_MASK, COM, { RT } },
{ "mfrtcu", XSPR(31,339,4), XSPR_MASK, COM, { RT } },
{ "mfrtcl", XSPR(31,339,5), XSPR_MASK, COM, { RT } },
{ "mfdec", XSPR(31,339,6), XSPR_MASK, MFDEC1, { RT } },
{ "mfdec", XSPR(31,339,22), XSPR_MASK, MFDEC2, { RT } },
{ "mflr", XSPR(31,339,8), XSPR_MASK, COM, { RT } },
{ "mfctr", XSPR(31,339,9), XSPR_MASK, COM, { RT } },
{ "mftid", XSPR(31,339,17), XSPR_MASK, POWER, { RT } },
{ "mfdsisr", XSPR(31,339,18), XSPR_MASK, COM, { RT } },
{ "mfdar", XSPR(31,339,19), XSPR_MASK, COM, { RT } },
{ "mfsdr0", XSPR(31,339,24), XSPR_MASK, POWER, { RT } },
{ "mfsdr1", XSPR(31,339,25), XSPR_MASK, COM, { RT } },
{ "mfsrr0", XSPR(31,339,26), XSPR_MASK, COM, { RT } },
{ "mfsrr1", XSPR(31,339,27), XSPR_MASK, COM, { RT } },
{ "mfcfar", XSPR(31,339,28), XSPR_MASK, POWER6, { RT } },
{ "mfpid", XSPR(31,339,48), XSPR_MASK, BOOKE, { RT } },
{ "mfpid", XSPR(31,339,945), XSPR_MASK, PPC403, { RT } },
{ "mfcsrr0", XSPR(31,339,58), XSPR_MASK, BOOKE, { RT } },
{ "mfcsrr1", XSPR(31,339,59), XSPR_MASK, BOOKE, { RT } },
{ "mfdear", XSPR(31,339,61), XSPR_MASK, BOOKE, { RT } },
{ "mfdear", XSPR(31,339,981), XSPR_MASK, PPC403, { RT } },
{ "mfesr", XSPR(31,339,62), XSPR_MASK, BOOKE, { RT } },
{ "mfesr", XSPR(31,339,980), XSPR_MASK, PPC403, { RT } },
{ "mfivpr", XSPR(31,339,63), XSPR_MASK, BOOKE, { RT } },
{ "mfcmpa", XSPR(31,339,144), XSPR_MASK, PPC860, { RT } },
{ "mfcmpb", XSPR(31,339,145), XSPR_MASK, PPC860, { RT } },
{ "mfcmpc", XSPR(31,339,146), XSPR_MASK, PPC860, { RT } },
{ "mfcmpd", XSPR(31,339,147), XSPR_MASK, PPC860, { RT } },
{ "mficr", XSPR(31,339,148), XSPR_MASK, PPC860, { RT } },
{ "mfder", XSPR(31,339,149), XSPR_MASK, PPC860, { RT } },
{ "mfcounta", XSPR(31,339,150), XSPR_MASK, PPC860, { RT } },
{ "mfcountb", XSPR(31,339,151), XSPR_MASK, PPC860, { RT } },
{ "mfcmpe", XSPR(31,339,152), XSPR_MASK, PPC860, { RT } },
{ "mfcmpf", XSPR(31,339,153), XSPR_MASK, PPC860, { RT } },
{ "mfcmpg", XSPR(31,339,154), XSPR_MASK, PPC860, { RT } },
{ "mfcmph", XSPR(31,339,155), XSPR_MASK, PPC860, { RT } },
{ "mflctrl1", XSPR(31,339,156), XSPR_MASK, PPC860, { RT } },
{ "mflctrl2", XSPR(31,339,157), XSPR_MASK, PPC860, { RT } },
{ "mfictrl", XSPR(31,339,158), XSPR_MASK, PPC860, { RT } },
{ "mfbar", XSPR(31,339,159), XSPR_MASK, PPC860, { RT } },
{ "mfvrsave", XSPR(31,339,256), XSPR_MASK, PPCVEC, { RT } },
{ "mfusprg0", XSPR(31,339,256), XSPR_MASK, BOOKE, { RT } },
{ "mftb", X(31,371), X_MASK, CLASSIC, { RT, TBR } },
{ "mftb", XSPR(31,339,268), XSPR_MASK, BOOKE, { RT } },
{ "mftbl", XSPR(31,371,268), XSPR_MASK, CLASSIC, { RT } },
{ "mftbl", XSPR(31,339,268), XSPR_MASK, BOOKE, { RT } },
{ "mftbu", XSPR(31,371,269), XSPR_MASK, CLASSIC, { RT } },
{ "mftbu", XSPR(31,339,269), XSPR_MASK, BOOKE, { RT } },
{ "mfsprg", XSPR(31,339,256), XSPRG_MASK, PPC, { RT, SPRG } },
{ "mfsprg0", XSPR(31,339,272), XSPR_MASK, PPC, { RT } },
{ "mfsprg1", XSPR(31,339,273), XSPR_MASK, PPC, { RT } },
{ "mfsprg2", XSPR(31,339,274), XSPR_MASK, PPC, { RT } },
{ "mfsprg3", XSPR(31,339,275), XSPR_MASK, PPC, { RT } },
{ "mfsprg4", XSPR(31,339,260), XSPR_MASK, PPC405 | BOOKE, { RT } },
{ "mfsprg5", XSPR(31,339,261), XSPR_MASK, PPC405 | BOOKE, { RT } },
{ "mfsprg6", XSPR(31,339,262), XSPR_MASK, PPC405 | BOOKE, { RT } },
{ "mfsprg7", XSPR(31,339,263), XSPR_MASK, PPC405 | BOOKE, { RT } },
{ "mfasr", XSPR(31,339,280), XSPR_MASK, PPC64, { RT } },
{ "mfear", XSPR(31,339,282), XSPR_MASK, PPC, { RT } },
{ "mfpir", XSPR(31,339,286), XSPR_MASK, BOOKE, { RT } },
{ "mfpvr", XSPR(31,339,287), XSPR_MASK, PPC, { RT } },
{ "mfdbsr", XSPR(31,339,304), XSPR_MASK, BOOKE, { RT } },
{ "mfdbsr", XSPR(31,339,1008), XSPR_MASK, PPC403, { RT } },
{ "mfdbcr0", XSPR(31,339,308), XSPR_MASK, BOOKE, { RT } },
{ "mfdbcr0", XSPR(31,339,1010), XSPR_MASK, PPC405, { RT } },
{ "mfdbcr1", XSPR(31,339,309), XSPR_MASK, BOOKE, { RT } },
{ "mfdbcr1", XSPR(31,339,957), XSPR_MASK, PPC405, { RT } },
{ "mfdbcr2", XSPR(31,339,310), XSPR_MASK, BOOKE, { RT } },
{ "mfiac1", XSPR(31,339,312), XSPR_MASK, BOOKE, { RT } },
{ "mfiac1", XSPR(31,339,1012), XSPR_MASK, PPC403, { RT } },
{ "mfiac2", XSPR(31,339,313), XSPR_MASK, BOOKE, { RT } },
{ "mfiac2", XSPR(31,339,1013), XSPR_MASK, PPC403, { RT } },
{ "mfiac3", XSPR(31,339,314), XSPR_MASK, BOOKE, { RT } },
{ "mfiac3", XSPR(31,339,948), XSPR_MASK, PPC405, { RT } },
{ "mfiac4", XSPR(31,339,315), XSPR_MASK, BOOKE, { RT } },
{ "mfiac4", XSPR(31,339,949), XSPR_MASK, PPC405, { RT } },
{ "mfdac1", XSPR(31,339,316), XSPR_MASK, BOOKE, { RT } },
{ "mfdac1", XSPR(31,339,1014), XSPR_MASK, PPC403, { RT } },
{ "mfdac2", XSPR(31,339,317), XSPR_MASK, BOOKE, { RT } },
{ "mfdac2", XSPR(31,339,1015), XSPR_MASK, PPC403, { RT } },
{ "mfdvc1", XSPR(31,339,318), XSPR_MASK, BOOKE, { RT } },
{ "mfdvc1", XSPR(31,339,950), XSPR_MASK, PPC405, { RT } },
{ "mfdvc2", XSPR(31,339,319), XSPR_MASK, BOOKE, { RT } },
{ "mfdvc2", XSPR(31,339,951), XSPR_MASK, PPC405, { RT } },
{ "mftsr", XSPR(31,339,336), XSPR_MASK, BOOKE, { RT } },
{ "mftsr", XSPR(31,339,984), XSPR_MASK, PPC403, { RT } },
{ "mftcr", XSPR(31,339,340), XSPR_MASK, BOOKE, { RT } },
{ "mftcr", XSPR(31,339,986), XSPR_MASK, PPC403, { RT } },
{ "mfivor0", XSPR(31,339,400), XSPR_MASK, BOOKE, { RT } },
{ "mfivor1", XSPR(31,339,401), XSPR_MASK, BOOKE, { RT } },
{ "mfivor2", XSPR(31,339,402), XSPR_MASK, BOOKE, { RT } },
{ "mfivor3", XSPR(31,339,403), XSPR_MASK, BOOKE, { RT } },
{ "mfivor4", XSPR(31,339,404), XSPR_MASK, BOOKE, { RT } },
{ "mfivor5", XSPR(31,339,405), XSPR_MASK, BOOKE, { RT } },
{ "mfivor6", XSPR(31,339,406), XSPR_MASK, BOOKE, { RT } },
{ "mfivor7", XSPR(31,339,407), XSPR_MASK, BOOKE, { RT } },
{ "mfivor8", XSPR(31,339,408), XSPR_MASK, BOOKE, { RT } },
{ "mfivor9", XSPR(31,339,409), XSPR_MASK, BOOKE, { RT } },
{ "mfivor10", XSPR(31,339,410), XSPR_MASK, BOOKE, { RT } },
{ "mfivor11", XSPR(31,339,411), XSPR_MASK, BOOKE, { RT } },
{ "mfivor12", XSPR(31,339,412), XSPR_MASK, BOOKE, { RT } },
{ "mfivor13", XSPR(31,339,413), XSPR_MASK, BOOKE, { RT } },
{ "mfivor14", XSPR(31,339,414), XSPR_MASK, BOOKE, { RT } },
{ "mfivor15", XSPR(31,339,415), XSPR_MASK, BOOKE, { RT } },
{ "mfspefscr", XSPR(31,339,512), XSPR_MASK, PPCSPE, { RT } },
{ "mfbbear", XSPR(31,339,513), XSPR_MASK, PPCBRLK, { RT } },
{ "mfbbtar", XSPR(31,339,514), XSPR_MASK, PPCBRLK, { RT } },
{ "mfivor32", XSPR(31,339,528), XSPR_MASK, PPCSPE, { RT } },
{ "mfivor33", XSPR(31,339,529), XSPR_MASK, PPCSPE, { RT } },
{ "mfivor34", XSPR(31,339,530), XSPR_MASK, PPCSPE, { RT } },
{ "mfivor35", XSPR(31,339,531), XSPR_MASK, PPCPMR, { RT } },
{ "mfibatu", XSPR(31,339,528), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
{ "mfibatl", XSPR(31,339,529), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
{ "mfdbatu", XSPR(31,339,536), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
{ "mfdbatl", XSPR(31,339,537), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
{ "mfic_cst", XSPR(31,339,560), XSPR_MASK, PPC860, { RT } },
{ "mfic_adr", XSPR(31,339,561), XSPR_MASK, PPC860, { RT } },
{ "mfic_dat", XSPR(31,339,562), XSPR_MASK, PPC860, { RT } },
{ "mfdc_cst", XSPR(31,339,568), XSPR_MASK, PPC860, { RT } },
{ "mfdc_adr", XSPR(31,339,569), XSPR_MASK, PPC860, { RT } },
{ "mfmcsrr0", XSPR(31,339,570), XSPR_MASK, PPCRFMCI, { RT } },
{ "mfdc_dat", XSPR(31,339,570), XSPR_MASK, PPC860, { RT } },
{ "mfmcsrr1", XSPR(31,339,571), XSPR_MASK, PPCRFMCI, { RT } },
{ "mfmcsr", XSPR(31,339,572), XSPR_MASK, PPCRFMCI, { RT } },
{ "mfmcar", XSPR(31,339,573), XSPR_MASK, PPCRFMCI, { RT } },
{ "mfdpdr", XSPR(31,339,630), XSPR_MASK, PPC860, { RT } },
{ "mfdpir", XSPR(31,339,631), XSPR_MASK, PPC860, { RT } },
{ "mfimmr", XSPR(31,339,638), XSPR_MASK, PPC860, { RT } },
{ "mfmi_ctr", XSPR(31,339,784), XSPR_MASK, PPC860, { RT } },
{ "mfmi_ap", XSPR(31,339,786), XSPR_MASK, PPC860, { RT } },
{ "mfmi_epn", XSPR(31,339,787), XSPR_MASK, PPC860, { RT } },
{ "mfmi_twc", XSPR(31,339,789), XSPR_MASK, PPC860, { RT } },
{ "mfmi_rpn", XSPR(31,339,790), XSPR_MASK, PPC860, { RT } },
{ "mfmd_ctr", XSPR(31,339,792), XSPR_MASK, PPC860, { RT } },
{ "mfm_casid", XSPR(31,339,793), XSPR_MASK, PPC860, { RT } },
{ "mfmd_ap", XSPR(31,339,794), XSPR_MASK, PPC860, { RT } },
{ "mfmd_epn", XSPR(31,339,795), XSPR_MASK, PPC860, { RT } },
{ "mfmd_twb", XSPR(31,339,796), XSPR_MASK, PPC860, { RT } },
{ "mfmd_twc", XSPR(31,339,797), XSPR_MASK, PPC860, { RT } },
{ "mfmd_rpn", XSPR(31,339,798), XSPR_MASK, PPC860, { RT } },
{ "mfm_tw", XSPR(31,339,799), XSPR_MASK, PPC860, { RT } },
{ "mfmi_dbcam", XSPR(31,339,816), XSPR_MASK, PPC860, { RT } },
{ "mfmi_dbram0",XSPR(31,339,817), XSPR_MASK, PPC860, { RT } },
{ "mfmi_dbram1",XSPR(31,339,818), XSPR_MASK, PPC860, { RT } },
{ "mfmd_dbcam", XSPR(31,339,824), XSPR_MASK, PPC860, { RT } },
{ "mfmd_dbram0",XSPR(31,339,825), XSPR_MASK, PPC860, { RT } },
{ "mfmd_dbram1",XSPR(31,339,826), XSPR_MASK, PPC860, { RT } },
{ "mfummcr0", XSPR(31,339,936), XSPR_MASK, PPC750, { RT } },
{ "mfupmc1", XSPR(31,339,937), XSPR_MASK, PPC750, { RT } },
{ "mfupmc2", XSPR(31,339,938), XSPR_MASK, PPC750, { RT } },
{ "mfusia", XSPR(31,339,939), XSPR_MASK, PPC750, { RT } },
{ "mfummcr1", XSPR(31,339,940), XSPR_MASK, PPC750, { RT } },
{ "mfupmc3", XSPR(31,339,941), XSPR_MASK, PPC750, { RT } },
{ "mfupmc4", XSPR(31,339,942), XSPR_MASK, PPC750, { RT } },
{ "mfzpr", XSPR(31,339,944), XSPR_MASK, PPC403, { RT } },
{ "mfccr0", XSPR(31,339,947), XSPR_MASK, PPC405, { RT } },
{ "mfmmcr0", XSPR(31,339,952), XSPR_MASK, PPC750, { RT } },
{ "mfpmc1", XSPR(31,339,953), XSPR_MASK, PPC750, { RT } },
{ "mfsgr", XSPR(31,339,953), XSPR_MASK, PPC403, { RT } },
{ "mfpmc2", XSPR(31,339,954), XSPR_MASK, PPC750, { RT } },
{ "mfdcwr", XSPR(31,339,954), XSPR_MASK, PPC403, { RT } },
{ "mfsia", XSPR(31,339,955), XSPR_MASK, PPC750, { RT } },
{ "mfsler", XSPR(31,339,955), XSPR_MASK, PPC405, { RT } },
{ "mfmmcr1", XSPR(31,339,956), XSPR_MASK, PPC750, { RT } },
{ "mfsu0r", XSPR(31,339,956), XSPR_MASK, PPC405, { RT } },
{ "mfpmc3", XSPR(31,339,957), XSPR_MASK, PPC750, { RT } },
{ "mfpmc4", XSPR(31,339,958), XSPR_MASK, PPC750, { RT } },
{ "mficdbdr", XSPR(31,339,979), XSPR_MASK, PPC403, { RT } },
{ "mfevpr", XSPR(31,339,982), XSPR_MASK, PPC403, { RT } },
{ "mfcdbcr", XSPR(31,339,983), XSPR_MASK, PPC403, { RT } },
{ "mfpit", XSPR(31,339,987), XSPR_MASK, PPC403, { RT } },
{ "mftbhi", XSPR(31,339,988), XSPR_MASK, PPC403, { RT } },
{ "mftblo", XSPR(31,339,989), XSPR_MASK, PPC403, { RT } },
{ "mfsrr2", XSPR(31,339,990), XSPR_MASK, PPC403, { RT } },
{ "mfsrr3", XSPR(31,339,991), XSPR_MASK, PPC403, { RT } },
{ "mfl2cr", XSPR(31,339,1017), XSPR_MASK, PPC750, { RT } },
{ "mfdccr", XSPR(31,339,1018), XSPR_MASK, PPC403, { RT } },
{ "mficcr", XSPR(31,339,1019), XSPR_MASK, PPC403, { RT } },
{ "mfictc", XSPR(31,339,1019), XSPR_MASK, PPC750, { RT } },
{ "mfpbl1", XSPR(31,339,1020), XSPR_MASK, PPC403, { RT } },
{ "mfthrm1", XSPR(31,339,1020), XSPR_MASK, PPC750, { RT } },
{ "mfpbu1", XSPR(31,339,1021), XSPR_MASK, PPC403, { RT } },
{ "mfthrm2", XSPR(31,339,1021), XSPR_MASK, PPC750, { RT } },
{ "mfpbl2", XSPR(31,339,1022), XSPR_MASK, PPC403, { RT } },
{ "mfthrm3", XSPR(31,339,1022), XSPR_MASK, PPC750, { RT } },
{ "mfpbu2", XSPR(31,339,1023), XSPR_MASK, PPC403, { RT } },
{ "mfspr", X(31,339), X_MASK, COM, { RT, SPR } },
{ "lwax", X(31,341), X_MASK, PPC64, { RT, RA0, RB } },
{ "dst", XDSS(31,342,0), XDSS_MASK, PPCVEC, { RA, RB, STRM } },
{ "dstt", XDSS(31,342,1), XDSS_MASK, PPCVEC, { RA, RB, STRM } },
{ "lhax", X(31,343), X_MASK, COM, { RT, RA0, RB } },
{ "lhaxe", X(31,351), X_MASK, BOOKE64, { RT, RA0, RB } },
{ "dstst", XDSS(31,374,0), XDSS_MASK, PPCVEC, { RA, RB, STRM } },
{ "dststt", XDSS(31,374,1), XDSS_MASK, PPCVEC, { RA, RB, STRM } },
{ "dccci", X(31,454), XRT_MASK, PPC403|PPC440, { RA, RB } },
{ "abs", XO(31,360,0,0), XORB_MASK, M601, { RT, RA } },
{ "abs.", XO(31,360,0,1), XORB_MASK, M601, { RT, RA } },
{ "abso", XO(31,360,1,0), XORB_MASK, M601, { RT, RA } },
{ "abso.", XO(31,360,1,1), XORB_MASK, M601, { RT, RA } },
{ "divs", XO(31,363,0,0), XO_MASK, M601, { RT, RA, RB } },
{ "divs.", XO(31,363,0,1), XO_MASK, M601, { RT, RA, RB } },
{ "divso", XO(31,363,1,0), XO_MASK, M601, { RT, RA, RB } },
{ "divso.", XO(31,363,1,1), XO_MASK, M601, { RT, RA, RB } },
{ "tlbia", X(31,370), 0xffffffff, PPC, { 0 } },
{ "lwaux", X(31,373), X_MASK, PPC64, { RT, RAL, RB } },
{ "lhaux", X(31,375), X_MASK, COM, { RT, RAL, RB } },
{ "lhauxe", X(31,383), X_MASK, BOOKE64, { RT, RAL, RB } },
{ "mtdcrx", X(31,387), X_MASK, BOOKE, { RA, RS } },
{ "dcblc", X(31,390), X_MASK, PPCCHLK, { CT, RA, RB }},
{ "subfe64", XO(31,392,0,0), XO_MASK, BOOKE64, { RT, RA, RB } },
{ "subfe64o",XO(31,392,1,0), XO_MASK, BOOKE64, { RT, RA, RB } },
{ "adde64", XO(31,394,0,0), XO_MASK, BOOKE64, { RT, RA, RB } },
{ "adde64o", XO(31,394,1,0), XO_MASK, BOOKE64, { RT, RA, RB } },
{ "dcblce", X(31,398), X_MASK, PPCCHLK64, { CT, RA, RB }},
{ "slbmte", X(31,402), XRA_MASK, PPC64, { RS, RB } },
{ "sthx", X(31,407), X_MASK, COM, { RS, RA0, RB } },
{ "cmpb", X(31,508), X_MASK, POWER6, { RA, RS, RB } },
{ "lfqx", X(31,791), X_MASK, POWER2, { FRT, RA, RB } },
{ "lfdpx", X(31,791), X_MASK, POWER6, { FRT, RA, RB } },
{ "lfqux", X(31,823), X_MASK, POWER2, { FRT, RA, RB } },
{ "stfqx", X(31,919), X_MASK, POWER2, { FRS, RA, RB } },
{ "stfdpx", X(31,919), X_MASK, POWER6, { FRS, RA, RB } },
{ "stfqux", X(31,951), X_MASK, POWER2, { FRS, RA, RB } },
{ "orc", XRC(31,412,0), X_MASK, COM, { RA, RS, RB } },
{ "orc.", XRC(31,412,1), X_MASK, COM, { RA, RS, RB } },
{ "sradi", XS(31,413,0), XS_MASK, PPC64, { RA, RS, SH6 } },
{ "sradi.", XS(31,413,1), XS_MASK, PPC64, { RA, RS, SH6 } },
{ "sthxe", X(31,415), X_MASK, BOOKE64, { RS, RA0, RB } },
{ "slbie", X(31,434), XRTRA_MASK, PPC64, { RB } },
{ "ecowx", X(31,438), X_MASK, PPC, { RT, RA, RB } },
{ "sthux", X(31,439), X_MASK, COM, { RS, RAS, RB } },
{ "sthuxe", X(31,447), X_MASK, BOOKE64, { RS, RAS, RB } },
{ "cctpl", 0x7c210b78, 0xffffffff, CELL, { 0 }},
{ "cctpm", 0x7c421378, 0xffffffff, CELL, { 0 }},
{ "cctph", 0x7c631b78, 0xffffffff, CELL, { 0 }},
{ "db8cyc", 0x7f9ce378, 0xffffffff, CELL, { 0 }},
{ "db10cyc", 0x7fbdeb78, 0xffffffff, CELL, { 0 }},
{ "db12cyc", 0x7fdef378, 0xffffffff, CELL, { 0 }},
{ "db16cyc", 0x7ffffb78, 0xffffffff, CELL, { 0 }},
{ "mr", XRC(31,444,0), X_MASK, COM, { RA, RS, RBS } },
{ "or", XRC(31,444,0), X_MASK, COM, { RA, RS, RB } },
{ "mr.", XRC(31,444,1), X_MASK, COM, { RA, RS, RBS } },
{ "or.", XRC(31,444,1), X_MASK, COM, { RA, RS, RB } },
{ "mtexisr", XSPR(31,451,64), XSPR_MASK, PPC403, { RS } },
{ "mtexier", XSPR(31,451,66), XSPR_MASK, PPC403, { RS } },
{ "mtbr0", XSPR(31,451,128), XSPR_MASK, PPC403, { RS } },
{ "mtbr1", XSPR(31,451,129), XSPR_MASK, PPC403, { RS } },
{ "mtbr2", XSPR(31,451,130), XSPR_MASK, PPC403, { RS } },
{ "mtbr3", XSPR(31,451,131), XSPR_MASK, PPC403, { RS } },
{ "mtbr4", XSPR(31,451,132), XSPR_MASK, PPC403, { RS } },
{ "mtbr5", XSPR(31,451,133), XSPR_MASK, PPC403, { RS } },
{ "mtbr6", XSPR(31,451,134), XSPR_MASK, PPC403, { RS } },
{ "mtbr7", XSPR(31,451,135), XSPR_MASK, PPC403, { RS } },
{ "mtbear", XSPR(31,451,144), XSPR_MASK, PPC403, { RS } },
{ "mtbesr", XSPR(31,451,145), XSPR_MASK, PPC403, { RS } },
{ "mtiocr", XSPR(31,451,160), XSPR_MASK, PPC403, { RS } },
{ "mtdmacr0", XSPR(31,451,192), XSPR_MASK, PPC403, { RS } },
{ "mtdmact0", XSPR(31,451,193), XSPR_MASK, PPC403, { RS } },
{ "mtdmada0", XSPR(31,451,194), XSPR_MASK, PPC403, { RS } },
{ "mtdmasa0", XSPR(31,451,195), XSPR_MASK, PPC403, { RS } },
{ "mtdmacc0", XSPR(31,451,196), XSPR_MASK, PPC403, { RS } },
{ "mtdmacr1", XSPR(31,451,200), XSPR_MASK, PPC403, { RS } },
{ "mtdmact1", XSPR(31,451,201), XSPR_MASK, PPC403, { RS } },
{ "mtdmada1", XSPR(31,451,202), XSPR_MASK, PPC403, { RS } },
{ "mtdmasa1", XSPR(31,451,203), XSPR_MASK, PPC403, { RS } },
{ "mtdmacc1", XSPR(31,451,204), XSPR_MASK, PPC403, { RS } },
{ "mtdmacr2", XSPR(31,451,208), XSPR_MASK, PPC403, { RS } },
{ "mtdmact2", XSPR(31,451,209), XSPR_MASK, PPC403, { RS } },
{ "mtdmada2", XSPR(31,451,210), XSPR_MASK, PPC403, { RS } },
{ "mtdmasa2", XSPR(31,451,211), XSPR_MASK, PPC403, { RS } },
{ "mtdmacc2", XSPR(31,451,212), XSPR_MASK, PPC403, { RS } },
{ "mtdmacr3", XSPR(31,451,216), XSPR_MASK, PPC403, { RS } },
{ "mtdmact3", XSPR(31,451,217), XSPR_MASK, PPC403, { RS } },
{ "mtdmada3", XSPR(31,451,218), XSPR_MASK, PPC403, { RS } },
{ "mtdmasa3", XSPR(31,451,219), XSPR_MASK, PPC403, { RS } },
{ "mtdmacc3", XSPR(31,451,220), XSPR_MASK, PPC403, { RS } },
{ "mtdmasr", XSPR(31,451,224), XSPR_MASK, PPC403, { RS } },
{ "mtdcr", X(31,451), X_MASK, PPC403 | BOOKE, { SPR, RS } },
{ "subfze64",XO(31,456,0,0), XORB_MASK, BOOKE64, { RT, RA } },
{ "subfze64o",XO(31,456,1,0), XORB_MASK, BOOKE64, { RT, RA } },
{ "divdu", XO(31,457,0,0), XO_MASK, PPC64, { RT, RA, RB } },
{ "divdu.", XO(31,457,0,1), XO_MASK, PPC64, { RT, RA, RB } },
{ "divduo", XO(31,457,1,0), XO_MASK, PPC64, { RT, RA, RB } },
{ "divduo.", XO(31,457,1,1), XO_MASK, PPC64, { RT, RA, RB } },
{ "addze64", XO(31,458,0,0), XORB_MASK, BOOKE64, { RT, RA } },
{ "addze64o",XO(31,458,1,0), XORB_MASK, BOOKE64, { RT, RA } },
{ "divwu", XO(31,459,0,0), XO_MASK, PPC, { RT, RA, RB } },
{ "divwu.", XO(31,459,0,1), XO_MASK, PPC, { RT, RA, RB } },
{ "divwuo", XO(31,459,1,0), XO_MASK, PPC, { RT, RA, RB } },
{ "divwuo.", XO(31,459,1,1), XO_MASK, PPC, { RT, RA, RB } },
{ "mtmq", XSPR(31,467,0), XSPR_MASK, M601, { RS } },
{ "mtxer", XSPR(31,467,1), XSPR_MASK, COM, { RS } },
{ "mtlr", XSPR(31,467,8), XSPR_MASK, COM, { RS } },
{ "mtctr", XSPR(31,467,9), XSPR_MASK, COM, { RS } },
{ "mttid", XSPR(31,467,17), XSPR_MASK, POWER, { RS } },
{ "mtdsisr", XSPR(31,467,18), XSPR_MASK, COM, { RS } },
{ "mtdar", XSPR(31,467,19), XSPR_MASK, COM, { RS } },
{ "mtrtcu", XSPR(31,467,20), XSPR_MASK, COM, { RS } },
{ "mtrtcl", XSPR(31,467,21), XSPR_MASK, COM, { RS } },
{ "mtdec", XSPR(31,467,22), XSPR_MASK, COM, { RS } },
{ "mtsdr0", XSPR(31,467,24), XSPR_MASK, POWER, { RS } },
{ "mtsdr1", XSPR(31,467,25), XSPR_MASK, COM, { RS } },
{ "mtsrr0", XSPR(31,467,26), XSPR_MASK, COM, { RS } },
{ "mtsrr1", XSPR(31,467,27), XSPR_MASK, COM, { RS } },
{ "mtcfar", XSPR(31,467,28), XSPR_MASK, POWER6, { RS } },
{ "mtpid", XSPR(31,467,48), XSPR_MASK, BOOKE, { RS } },
{ "mtpid", XSPR(31,467,945), XSPR_MASK, PPC403, { RS } },
{ "mtdecar", XSPR(31,467,54), XSPR_MASK, BOOKE, { RS } },
{ "mtcsrr0", XSPR(31,467,58), XSPR_MASK, BOOKE, { RS } },
{ "mtcsrr1", XSPR(31,467,59), XSPR_MASK, BOOKE, { RS } },
{ "mtdear", XSPR(31,467,61), XSPR_MASK, BOOKE, { RS } },
{ "mtdear", XSPR(31,467,981), XSPR_MASK, PPC403, { RS } },
{ "mtesr", XSPR(31,467,62), XSPR_MASK, BOOKE, { RS } },
{ "mtesr", XSPR(31,467,980), XSPR_MASK, PPC403, { RS } },
{ "mtivpr", XSPR(31,467,63), XSPR_MASK, BOOKE, { RS } },
{ "mtcmpa", XSPR(31,467,144), XSPR_MASK, PPC860, { RS } },
{ "mtcmpb", XSPR(31,467,145), XSPR_MASK, PPC860, { RS } },
{ "mtcmpc", XSPR(31,467,146), XSPR_MASK, PPC860, { RS } },
{ "mtcmpd", XSPR(31,467,147), XSPR_MASK, PPC860, { RS } },
{ "mticr", XSPR(31,467,148), XSPR_MASK, PPC860, { RS } },
{ "mtder", XSPR(31,467,149), XSPR_MASK, PPC860, { RS } },
{ "mtcounta", XSPR(31,467,150), XSPR_MASK, PPC860, { RS } },
{ "mtcountb", XSPR(31,467,151), XSPR_MASK, PPC860, { RS } },
{ "mtcmpe", XSPR(31,467,152), XSPR_MASK, PPC860, { RS } },
{ "mtcmpf", XSPR(31,467,153), XSPR_MASK, PPC860, { RS } },
{ "mtcmpg", XSPR(31,467,154), XSPR_MASK, PPC860, { RS } },
{ "mtcmph", XSPR(31,467,155), XSPR_MASK, PPC860, { RS } },
{ "mtlctrl1", XSPR(31,467,156), XSPR_MASK, PPC860, { RS } },
{ "mtlctrl2", XSPR(31,467,157), XSPR_MASK, PPC860, { RS } },
{ "mtictrl", XSPR(31,467,158), XSPR_MASK, PPC860, { RS } },
{ "mtbar", XSPR(31,467,159), XSPR_MASK, PPC860, { RS } },
{ "mtvrsave", XSPR(31,467,256), XSPR_MASK, PPCVEC, { RS } },
{ "mtusprg0", XSPR(31,467,256), XSPR_MASK, BOOKE, { RS } },
{ "mtsprg", XSPR(31,467,256), XSPRG_MASK,PPC, { SPRG, RS } },
{ "mtsprg0", XSPR(31,467,272), XSPR_MASK, PPC, { RS } },
{ "mtsprg1", XSPR(31,467,273), XSPR_MASK, PPC, { RS } },
{ "mtsprg2", XSPR(31,467,274), XSPR_MASK, PPC, { RS } },
{ "mtsprg3", XSPR(31,467,275), XSPR_MASK, PPC, { RS } },
{ "mtsprg4", XSPR(31,467,276), XSPR_MASK, PPC405 | BOOKE, { RS } },
{ "mtsprg5", XSPR(31,467,277), XSPR_MASK, PPC405 | BOOKE, { RS } },
{ "mtsprg6", XSPR(31,467,278), XSPR_MASK, PPC405 | BOOKE, { RS } },
{ "mtsprg7", XSPR(31,467,279), XSPR_MASK, PPC405 | BOOKE, { RS } },
{ "mtasr", XSPR(31,467,280), XSPR_MASK, PPC64, { RS } },
{ "mtear", XSPR(31,467,282), XSPR_MASK, PPC, { RS } },
{ "mttbl", XSPR(31,467,284), XSPR_MASK, PPC, { RS } },
{ "mttbu", XSPR(31,467,285), XSPR_MASK, PPC, { RS } },
{ "mtdbsr", XSPR(31,467,304), XSPR_MASK, BOOKE, { RS } },
{ "mtdbsr", XSPR(31,467,1008), XSPR_MASK, PPC403, { RS } },
{ "mtdbcr0", XSPR(31,467,308), XSPR_MASK, BOOKE, { RS } },
{ "mtdbcr0", XSPR(31,467,1010), XSPR_MASK, PPC405, { RS } },
{ "mtdbcr1", XSPR(31,467,309), XSPR_MASK, BOOKE, { RS } },
{ "mtdbcr1", XSPR(31,467,957), XSPR_MASK, PPC405, { RS } },
{ "mtdbcr2", XSPR(31,467,310), XSPR_MASK, BOOKE, { RS } },
{ "mtiac1", XSPR(31,467,312), XSPR_MASK, BOOKE, { RS } },
{ "mtiac1", XSPR(31,467,1012), XSPR_MASK, PPC403, { RS } },
{ "mtiac2", XSPR(31,467,313), XSPR_MASK, BOOKE, { RS } },
{ "mtiac2", XSPR(31,467,1013), XSPR_MASK, PPC403, { RS } },
{ "mtiac3", XSPR(31,467,314), XSPR_MASK, BOOKE, { RS } },
{ "mtiac3", XSPR(31,467,948), XSPR_MASK, PPC405, { RS } },
{ "mtiac4", XSPR(31,467,315), XSPR_MASK, BOOKE, { RS } },
{ "mtiac4", XSPR(31,467,949), XSPR_MASK, PPC405, { RS } },
{ "mtdac1", XSPR(31,467,316), XSPR_MASK, BOOKE, { RS } },
{ "mtdac1", XSPR(31,467,1014), XSPR_MASK, PPC403, { RS } },
{ "mtdac2", XSPR(31,467,317), XSPR_MASK, BOOKE, { RS } },
{ "mtdac2", XSPR(31,467,1015), XSPR_MASK, PPC403, { RS } },
{ "mtdvc1", XSPR(31,467,318), XSPR_MASK, BOOKE, { RS } },
{ "mtdvc1", XSPR(31,467,950), XSPR_MASK, PPC405, { RS } },
{ "mtdvc2", XSPR(31,467,319), XSPR_MASK, BOOKE, { RS } },
{ "mtdvc2", XSPR(31,467,951), XSPR_MASK, PPC405, { RS } },
{ "mttsr", XSPR(31,467,336), XSPR_MASK, BOOKE, { RS } },
{ "mttsr", XSPR(31,467,984), XSPR_MASK, PPC403, { RS } },
{ "mttcr", XSPR(31,467,340), XSPR_MASK, BOOKE, { RS } },
{ "mttcr", XSPR(31,467,986), XSPR_MASK, PPC403, { RS } },
{ "mtivor0", XSPR(31,467,400), XSPR_MASK, BOOKE, { RS } },
{ "mtivor1", XSPR(31,467,401), XSPR_MASK, BOOKE, { RS } },
{ "mtivor2", XSPR(31,467,402), XSPR_MASK, BOOKE, { RS } },
{ "mtivor3", XSPR(31,467,403), XSPR_MASK, BOOKE, { RS } },
{ "mtivor4", XSPR(31,467,404), XSPR_MASK, BOOKE, { RS } },
{ "mtivor5", XSPR(31,467,405), XSPR_MASK, BOOKE, { RS } },
{ "mtivor6", XSPR(31,467,406), XSPR_MASK, BOOKE, { RS } },
{ "mtivor7", XSPR(31,467,407), XSPR_MASK, BOOKE, { RS } },
{ "mtivor8", XSPR(31,467,408), XSPR_MASK, BOOKE, { RS } },
{ "mtivor9", XSPR(31,467,409), XSPR_MASK, BOOKE, { RS } },
{ "mtivor10", XSPR(31,467,410), XSPR_MASK, BOOKE, { RS } },
{ "mtivor11", XSPR(31,467,411), XSPR_MASK, BOOKE, { RS } },
{ "mtivor12", XSPR(31,467,412), XSPR_MASK, BOOKE, { RS } },
{ "mtivor13", XSPR(31,467,413), XSPR_MASK, BOOKE, { RS } },
{ "mtivor14", XSPR(31,467,414), XSPR_MASK, BOOKE, { RS } },
{ "mtivor15", XSPR(31,467,415), XSPR_MASK, BOOKE, { RS } },
{ "mtspefscr", XSPR(31,467,512), XSPR_MASK, PPCSPE, { RS } },
{ "mtbbear", XSPR(31,467,513), XSPR_MASK, PPCBRLK, { RS } },
{ "mtbbtar", XSPR(31,467,514), XSPR_MASK, PPCBRLK, { RS } },
{ "mtivor32", XSPR(31,467,528), XSPR_MASK, PPCSPE, { RS } },
{ "mtivor33", XSPR(31,467,529), XSPR_MASK, PPCSPE, { RS } },
{ "mtivor34", XSPR(31,467,530), XSPR_MASK, PPCSPE, { RS } },
{ "mtivor35", XSPR(31,467,531), XSPR_MASK, PPCPMR, { RS } },
{ "mtibatu", XSPR(31,467,528), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
{ "mtibatl", XSPR(31,467,529), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
{ "mtdbatu", XSPR(31,467,536), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
{ "mtdbatl", XSPR(31,467,537), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
{ "mtmcsrr0", XSPR(31,467,570), XSPR_MASK, PPCRFMCI, { RS } },
{ "mtmcsrr1", XSPR(31,467,571), XSPR_MASK, PPCRFMCI, { RS } },
{ "mtmcsr", XSPR(31,467,572), XSPR_MASK, PPCRFMCI, { RS } },
{ "mtummcr0", XSPR(31,467,936), XSPR_MASK, PPC750, { RS } },
{ "mtupmc1", XSPR(31,467,937), XSPR_MASK, PPC750, { RS } },
{ "mtupmc2", XSPR(31,467,938), XSPR_MASK, PPC750, { RS } },
{ "mtusia", XSPR(31,467,939), XSPR_MASK, PPC750, { RS } },
{ "mtummcr1", XSPR(31,467,940), XSPR_MASK, PPC750, { RS } },
{ "mtupmc3", XSPR(31,467,941), XSPR_MASK, PPC750, { RS } },
{ "mtupmc4", XSPR(31,467,942), XSPR_MASK, PPC750, { RS } },
{ "mtzpr", XSPR(31,467,944), XSPR_MASK, PPC403, { RS } },
{ "mtccr0", XSPR(31,467,947), XSPR_MASK, PPC405, { RS } },
{ "mtmmcr0", XSPR(31,467,952), XSPR_MASK, PPC750, { RS } },
{ "mtsgr", XSPR(31,467,953), XSPR_MASK, PPC403, { RS } },
{ "mtpmc1", XSPR(31,467,953), XSPR_MASK, PPC750, { RS } },
{ "mtdcwr", XSPR(31,467,954), XSPR_MASK, PPC403, { RS } },
{ "mtpmc2", XSPR(31,467,954), XSPR_MASK, PPC750, { RS } },
{ "mtsler", XSPR(31,467,955), XSPR_MASK, PPC405, { RS } },
{ "mtsia", XSPR(31,467,955), XSPR_MASK, PPC750, { RS } },
{ "mtsu0r", XSPR(31,467,956), XSPR_MASK, PPC405, { RS } },
{ "mtmmcr1", XSPR(31,467,956), XSPR_MASK, PPC750, { RS } },
{ "mtpmc3", XSPR(31,467,957), XSPR_MASK, PPC750, { RS } },
{ "mtpmc4", XSPR(31,467,958), XSPR_MASK, PPC750, { RS } },
{ "mticdbdr", XSPR(31,467,979), XSPR_MASK, PPC403, { RS } },
{ "mtevpr", XSPR(31,467,982), XSPR_MASK, PPC403, { RS } },
{ "mtcdbcr", XSPR(31,467,983), XSPR_MASK, PPC403, { RS } },
{ "mtpit", XSPR(31,467,987), XSPR_MASK, PPC403, { RS } },
{ "mttbhi", XSPR(31,467,988), XSPR_MASK, PPC403, { RS } },
{ "mttblo", XSPR(31,467,989), XSPR_MASK, PPC403, { RS } },
{ "mtsrr2", XSPR(31,467,990), XSPR_MASK, PPC403, { RS } },
{ "mtsrr3", XSPR(31,467,991), XSPR_MASK, PPC403, { RS } },
{ "mtl2cr", XSPR(31,467,1017), XSPR_MASK, PPC750, { RS } },
{ "mtdccr", XSPR(31,467,1018), XSPR_MASK, PPC403, { RS } },
{ "mticcr", XSPR(31,467,1019), XSPR_MASK, PPC403, { RS } },
{ "mtictc", XSPR(31,467,1019), XSPR_MASK, PPC750, { RS } },
{ "mtpbl1", XSPR(31,467,1020), XSPR_MASK, PPC403, { RS } },
{ "mtthrm1", XSPR(31,467,1020), XSPR_MASK, PPC750, { RS } },
{ "mtpbu1", XSPR(31,467,1021), XSPR_MASK, PPC403, { RS } },
{ "mtthrm2", XSPR(31,467,1021), XSPR_MASK, PPC750, { RS } },
{ "mtpbl2", XSPR(31,467,1022), XSPR_MASK, PPC403, { RS } },
{ "mtthrm3", XSPR(31,467,1022), XSPR_MASK, PPC750, { RS } },
{ "mtpbu2", XSPR(31,467,1023), XSPR_MASK, PPC403, { RS } },
{ "mtspr", X(31,467), X_MASK, COM, { SPR, RS } },
{ "dcbi", X(31,470), XRT_MASK, PPC, { RA, RB } },
{ "nand", XRC(31,476,0), X_MASK, COM, { RA, RS, RB } },
{ "nand.", XRC(31,476,1), X_MASK, COM, { RA, RS, RB } },
{ "dcbie", X(31,478), XRT_MASK, BOOKE64, { RA, RB } },
{ "dcread", X(31,486), X_MASK, PPC403|PPC440, { RT, RA, RB }},
{ "mtpmr", X(31,462), X_MASK, PPCPMR, { PMR, RS }},
{ "icbtls", X(31,486), X_MASK, PPCCHLK, { CT, RA, RB }},
{ "nabs", XO(31,488,0,0), XORB_MASK, M601, { RT, RA } },
{ "subfme64",XO(31,488,0,0), XORB_MASK, BOOKE64, { RT, RA } },
{ "nabs.", XO(31,488,0,1), XORB_MASK, M601, { RT, RA } },
{ "nabso", XO(31,488,1,0), XORB_MASK, M601, { RT, RA } },
{ "subfme64o",XO(31,488,1,0), XORB_MASK, BOOKE64, { RT, RA } },
{ "nabso.", XO(31,488,1,1), XORB_MASK, M601, { RT, RA } },
{ "divd", XO(31,489,0,0), XO_MASK, PPC64, { RT, RA, RB } },
{ "divd.", XO(31,489,0,1), XO_MASK, PPC64, { RT, RA, RB } },
{ "divdo", XO(31,489,1,0), XO_MASK, PPC64, { RT, RA, RB } },
{ "divdo.", XO(31,489,1,1), XO_MASK, PPC64, { RT, RA, RB } },
{ "addme64", XO(31,490,0,0), XORB_MASK, BOOKE64, { RT, RA } },
{ "addme64o",XO(31,490,1,0), XORB_MASK, BOOKE64, { RT, RA } },
{ "divw", XO(31,491,0,0), XO_MASK, PPC, { RT, RA, RB } },
{ "divw.", XO(31,491,0,1), XO_MASK, PPC, { RT, RA, RB } },
{ "divwo", XO(31,491,1,0), XO_MASK, PPC, { RT, RA, RB } },
{ "divwo.", XO(31,491,1,1), XO_MASK, PPC, { RT, RA, RB } },
{ "icbtlse", X(31,494), X_MASK, PPCCHLK64, { CT, RA, RB }},
{ "slbia", X(31,498), 0xffffffff, PPC64, { 0 } },
{ "cli", X(31,502), XRB_MASK, POWER, { RT, RA } },
{ "stdcxe.", XRC(31,511,1), X_MASK, BOOKE64, { RS, RA, RB } },
{ "mcrxr", X(31,512), XRARB_MASK|(3<<21), COM, { BF } },
{ "bblels", X(31,518), X_MASK, PPCBRLK, { 0 }},
{ "mcrxr64", X(31,544), XRARB_MASK|(3<<21), BOOKE64, { BF } },
{ "clcs", X(31,531), XRB_MASK, M601, { RT, RA } },
{ "ldbrx", X(31,532), X_MASK, CELL, { RT, RA0, RB } },
{ "lswx", X(31,533), X_MASK, PPCCOM, { RT, RA0, RB } },
{ "lsx", X(31,533), X_MASK, PWRCOM, { RT, RA, RB } },
{ "lwbrx", X(31,534), X_MASK, PPCCOM, { RT, RA0, RB } },
{ "lbrx", X(31,534), X_MASK, PWRCOM, { RT, RA, RB } },
{ "lfsx", X(31,535), X_MASK, COM, { FRT, RA0, RB } },
{ "srw", XRC(31,536,0), X_MASK, PPCCOM, { RA, RS, RB } },
{ "sr", XRC(31,536,0), X_MASK, PWRCOM, { RA, RS, RB } },
{ "srw.", XRC(31,536,1), X_MASK, PPCCOM, { RA, RS, RB } },
{ "sr.", XRC(31,536,1), X_MASK, PWRCOM, { RA, RS, RB } },
{ "rrib", XRC(31,537,0), X_MASK, M601, { RA, RS, RB } },
{ "rrib.", XRC(31,537,1), X_MASK, M601, { RA, RS, RB } },
{ "srd", XRC(31,539,0), X_MASK, PPC64, { RA, RS, RB } },
{ "srd.", XRC(31,539,1), X_MASK, PPC64, { RA, RS, RB } },
{ "maskir", XRC(31,541,0), X_MASK, M601, { RA, RS, RB } },
{ "maskir.", XRC(31,541,1), X_MASK, M601, { RA, RS, RB } },
{ "lwbrxe", X(31,542), X_MASK, BOOKE64, { RT, RA0, RB } },
{ "lfsxe", X(31,543), X_MASK, BOOKE64, { FRT, RA0, RB } },
{ "bbelr", X(31,550), X_MASK, PPCBRLK, { 0 }},
{ "tlbsync", X(31,566), 0xffffffff, PPC, { 0 } },
{ "lfsux", X(31,567), X_MASK, COM, { FRT, RAS, RB } },
{ "lfsuxe", X(31,575), X_MASK, BOOKE64, { FRT, RAS, RB } },
{ "mfsr", X(31,595), XRB_MASK|(1<<20), COM32, { RT, SR } },
{ "lswi", X(31,597), X_MASK, PPCCOM, { RT, RA0, NB } },
{ "lsi", X(31,597), X_MASK, PWRCOM, { RT, RA0, NB } },
{ "lwsync", XSYNC(31,598,1), 0xffffffff, PPC, { 0 } },
{ "ptesync", XSYNC(31,598,2), 0xffffffff, PPC64, { 0 } },
{ "msync", X(31,598), 0xffffffff, BOOKE, { 0 } },
{ "sync", X(31,598), XSYNC_MASK, PPCCOM, { LS } },
{ "dcs", X(31,598), 0xffffffff, PWRCOM, { 0 } },
{ "lfdx", X(31,599), X_MASK, COM, { FRT, RA0, RB } },
{ "lfdxe", X(31,607), X_MASK, BOOKE64, { FRT, RA0, RB } },
{ "mffgpr", XRC(31,607,0), XRA_MASK, POWER6, { FRT, RB } },
{ "mfsri", X(31,627), X_MASK, PWRCOM, { RT, RA, RB } },
{ "dclst", X(31,630), XRB_MASK, PWRCOM, { RS, RA } },
{ "lfdux", X(31,631), X_MASK, COM, { FRT, RAS, RB } },
{ "lfduxe", X(31,639), X_MASK, BOOKE64, { FRT, RAS, RB } },
{ "mfsrin", X(31,659), XRA_MASK, PPC32, { RT, RB } },
{ "stdbrx", X(31,660), X_MASK, CELL, { RS, RA0, RB } },
{ "stswx", X(31,661), X_MASK, PPCCOM, { RS, RA0, RB } },
{ "stsx", X(31,661), X_MASK, PWRCOM, { RS, RA0, RB } },
{ "stwbrx", X(31,662), X_MASK, PPCCOM, { RS, RA0, RB } },
{ "stbrx", X(31,662), X_MASK, PWRCOM, { RS, RA0, RB } },
{ "stfsx", X(31,663), X_MASK, COM, { FRS, RA0, RB } },
{ "srq", XRC(31,664,0), X_MASK, M601, { RA, RS, RB } },
{ "srq.", XRC(31,664,1), X_MASK, M601, { RA, RS, RB } },
{ "sre", XRC(31,665,0), X_MASK, M601, { RA, RS, RB } },
{ "sre.", XRC(31,665,1), X_MASK, M601, { RA, RS, RB } },
{ "stwbrxe", X(31,670), X_MASK, BOOKE64, { RS, RA0, RB } },
{ "stfsxe", X(31,671), X_MASK, BOOKE64, { FRS, RA0, RB } },
{ "stfsux", X(31,695), X_MASK, COM, { FRS, RAS, RB } },
{ "sriq", XRC(31,696,0), X_MASK, M601, { RA, RS, SH } },
{ "sriq.", XRC(31,696,1), X_MASK, M601, { RA, RS, SH } },
{ "stfsuxe", X(31,703), X_MASK, BOOKE64, { FRS, RAS, RB } },
{ "stswi", X(31,725), X_MASK, PPCCOM, { RS, RA0, NB } },
{ "stsi", X(31,725), X_MASK, PWRCOM, { RS, RA0, NB } },
{ "stfdx", X(31,727), X_MASK, COM, { FRS, RA0, RB } },
{ "srlq", XRC(31,728,0), X_MASK, M601, { RA, RS, RB } },
{ "srlq.", XRC(31,728,1), X_MASK, M601, { RA, RS, RB } },
{ "sreq", XRC(31,729,0), X_MASK, M601, { RA, RS, RB } },
{ "sreq.", XRC(31,729,1), X_MASK, M601, { RA, RS, RB } },
{ "stfdxe", X(31,735), X_MASK, BOOKE64, { FRS, RA0, RB } },
{ "mftgpr", XRC(31,735,0), XRA_MASK, POWER6, { RT, FRB } },
{ "dcba", X(31,758), XRT_MASK, PPC405 | BOOKE, { RA, RB } },
{ "stfdux", X(31,759), X_MASK, COM, { FRS, RAS, RB } },
{ "srliq", XRC(31,760,0), X_MASK, M601, { RA, RS, SH } },
{ "srliq.", XRC(31,760,1), X_MASK, M601, { RA, RS, SH } },
{ "dcbae", X(31,766), XRT_MASK, BOOKE64, { RA, RB } },
{ "stfduxe", X(31,767), X_MASK, BOOKE64, { FRS, RAS, RB } },
{ "tlbivax", X(31,786), XRT_MASK, BOOKE, { RA, RB } },
{ "tlbivaxe",X(31,787), XRT_MASK, BOOKE64, { RA, RB } },
{ "lwzcix", X(31,789), X_MASK, POWER6, { RT, RA0, RB } },
{ "lhbrx", X(31,790), X_MASK, COM, { RT, RA0, RB } },
{ "sraw", XRC(31,792,0), X_MASK, PPCCOM, { RA, RS, RB } },
{ "sra", XRC(31,792,0), X_MASK, PWRCOM, { RA, RS, RB } },
{ "sraw.", XRC(31,792,1), X_MASK, PPCCOM, { RA, RS, RB } },
{ "sra.", XRC(31,792,1), X_MASK, PWRCOM, { RA, RS, RB } },
{ "srad", XRC(31,794,0), X_MASK, PPC64, { RA, RS, RB } },
{ "srad.", XRC(31,794,1), X_MASK, PPC64, { RA, RS, RB } },
{ "lhbrxe", X(31,798), X_MASK, BOOKE64, { RT, RA0, RB } },
{ "ldxe", X(31,799), X_MASK, BOOKE64, { RT, RA0, RB } },
{ "lduxe", X(31,831), X_MASK, BOOKE64, { RT, RA0, RB } },
{ "rac", X(31,818), X_MASK, PWRCOM, { RT, RA, RB } },
{ "lhzcix", X(31,821), X_MASK, POWER6, { RT, RA0, RB } },
{ "dss", XDSS(31,822,0), XDSS_MASK, PPCVEC, { STRM } },
{ "dssall", XDSS(31,822,1), XDSS_MASK, PPCVEC, { 0 } },
{ "srawi", XRC(31,824,0), X_MASK, PPCCOM, { RA, RS, SH } },
{ "srai", XRC(31,824,0), X_MASK, PWRCOM, { RA, RS, SH } },
{ "srawi.", XRC(31,824,1), X_MASK, PPCCOM, { RA, RS, SH } },
{ "srai.", XRC(31,824,1), X_MASK, PWRCOM, { RA, RS, SH } },
{ "slbmfev", X(31,851), XRA_MASK, PPC64, { RT, RB } },
{ "lbzcix", X(31,853), X_MASK, POWER6, { RT, RA0, RB } },
{ "mbar", X(31,854), X_MASK, BOOKE, { MO } },
{ "eieio", X(31,854), 0xffffffff, PPC, { 0 } },
{ "lfiwax", X(31,855), X_MASK, POWER6, { FRT, RA0, RB } },
{ "ldcix", X(31,885), X_MASK, POWER6, { RT, RA0, RB } },
{ "tlbsx", XRC(31,914,0), X_MASK, PPC403|BOOKE, { RTO, RA, RB } },
{ "tlbsx.", XRC(31,914,1), X_MASK, PPC403|BOOKE, { RTO, RA, RB } },
{ "tlbsxe", XRC(31,915,0), X_MASK, BOOKE64, { RTO, RA, RB } },
{ "tlbsxe.", XRC(31,915,1), X_MASK, BOOKE64, { RTO, RA, RB } },
{ "slbmfee", X(31,915), XRA_MASK, PPC64, { RT, RB } },
{ "stwcix", X(31,917), X_MASK, POWER6, { RS, RA0, RB } },
{ "sthbrx", X(31,918), X_MASK, COM, { RS, RA0, RB } },
{ "sraq", XRC(31,920,0), X_MASK, M601, { RA, RS, RB } },
{ "sraq.", XRC(31,920,1), X_MASK, M601, { RA, RS, RB } },
{ "srea", XRC(31,921,0), X_MASK, M601, { RA, RS, RB } },
{ "srea.", XRC(31,921,1), X_MASK, M601, { RA, RS, RB } },
{ "extsh", XRC(31,922,0), XRB_MASK, PPCCOM, { RA, RS } },
{ "exts", XRC(31,922,0), XRB_MASK, PWRCOM, { RA, RS } },
{ "extsh.", XRC(31,922,1), XRB_MASK, PPCCOM, { RA, RS } },
{ "exts.", XRC(31,922,1), XRB_MASK, PWRCOM, { RA, RS } },
{ "sthbrxe", X(31,926), X_MASK, BOOKE64, { RS, RA0, RB } },
{ "stdxe", X(31,927), X_MASK, BOOKE64, { RS, RA0, RB } },
{ "tlbrehi", XTLB(31,946,0), XTLB_MASK, PPC403, { RT, RA } },
{ "tlbrelo", XTLB(31,946,1), XTLB_MASK, PPC403, { RT, RA } },
{ "tlbre", X(31,946), X_MASK, PPC403|BOOKE, { RSO, RAOPT, SHO } },
{ "sthcix", X(31,949), X_MASK, POWER6, { RS, RA0, RB } },
{ "sraiq", XRC(31,952,0), X_MASK, M601, { RA, RS, SH } },
{ "sraiq.", XRC(31,952,1), X_MASK, M601, { RA, RS, SH } },
{ "extsb", XRC(31,954,0), XRB_MASK, PPC, { RA, RS} },
{ "extsb.", XRC(31,954,1), XRB_MASK, PPC, { RA, RS} },
{ "stduxe", X(31,959), X_MASK, BOOKE64, { RS, RAS, RB } },
{ "iccci", X(31,966), XRT_MASK, PPC403|PPC440, { RA, RB } },
{ "tlbwehi", XTLB(31,978,0), XTLB_MASK, PPC403, { RT, RA } },
{ "tlbwelo", XTLB(31,978,1), XTLB_MASK, PPC403, { RT, RA } },
{ "tlbwe", X(31,978), X_MASK, PPC403|BOOKE, { RSO, RAOPT, SHO } },
{ "tlbld", X(31,978), XRTRA_MASK, PPC, { RB } },
{ "stbcix", X(31,981), X_MASK, POWER6, { RS, RA0, RB } },
{ "icbi", X(31,982), XRT_MASK, PPC, { RA, RB } },
{ "stfiwx", X(31,983), X_MASK, PPC, { FRS, RA0, RB } },
{ "extsw", XRC(31,986,0), XRB_MASK, PPC64 | BOOKE64,{ RA, RS } },
{ "extsw.", XRC(31,986,1), XRB_MASK, PPC64, { RA, RS } },
{ "icread", X(31,998), XRT_MASK, PPC403|PPC440, { RA, RB } },
{ "icbie", X(31,990), XRT_MASK, BOOKE64, { RA, RB } },
{ "stfiwxe", X(31,991), X_MASK, BOOKE64, { FRS, RA0, RB } },
{ "tlbli", X(31,1010), XRTRA_MASK, PPC, { RB } },
{ "stdcix", X(31,1013), X_MASK, POWER6, { RS, RA0, RB } },
{ "dcbzl", XOPL(31,1014,1), XRT_MASK,POWER4, { RA, RB } },
{ "dcbz", X(31,1014), XRT_MASK, PPC, { RA, RB } },
{ "dclz", X(31,1014), XRT_MASK, PPC, { RA, RB } },
{ "dcbze", X(31,1022), XRT_MASK, BOOKE64, { RA, RB } },
{ "lvebx", X(31, 7), X_MASK, PPCVEC, { VD, RA, RB } },
{ "lvehx", X(31, 39), X_MASK, PPCVEC, { VD, RA, RB } },
{ "lvewx", X(31, 71), X_MASK, PPCVEC, { VD, RA, RB } },
{ "lvsl", X(31, 6), X_MASK, PPCVEC, { VD, RA, RB } },
{ "lvsr", X(31, 38), X_MASK, PPCVEC, { VD, RA, RB } },
{ "lvx", X(31, 103), X_MASK, PPCVEC, { VD, RA, RB } },
{ "lvxl", X(31, 359), X_MASK, PPCVEC, { VD, RA, RB } },
{ "stvebx", X(31, 135), X_MASK, PPCVEC, { VS, RA, RB } },
{ "stvehx", X(31, 167), X_MASK, PPCVEC, { VS, RA, RB } },
{ "stvewx", X(31, 199), X_MASK, PPCVEC, { VS, RA, RB } },
{ "stvx", X(31, 231), X_MASK, PPCVEC, { VS, RA, RB } },
{ "stvxl", X(31, 487), X_MASK, PPCVEC, { VS, RA, RB } },
/* New load/store left/right index vector instructions that are in the Cell only. */
{ "lvlx", X(31, 519), X_MASK, CELL, { VD, RA0, RB } },
{ "lvlxl", X(31, 775), X_MASK, CELL, { VD, RA0, RB } },
{ "lvrx", X(31, 551), X_MASK, CELL, { VD, RA0, RB } },
{ "lvrxl", X(31, 807), X_MASK, CELL, { VD, RA0, RB } },
{ "stvlx", X(31, 647), X_MASK, CELL, { VS, RA0, RB } },
{ "stvlxl", X(31, 903), X_MASK, CELL, { VS, RA0, RB } },
{ "stvrx", X(31, 679), X_MASK, CELL, { VS, RA0, RB } },
{ "stvrxl", X(31, 935), X_MASK, CELL, { VS, RA0, RB } },
{ "lwz", OP(32), OP_MASK, PPCCOM, { RT, D, RA0 } },
{ "l", OP(32), OP_MASK, PWRCOM, { RT, D, RA0 } },
{ "lwzu", OP(33), OP_MASK, PPCCOM, { RT, D, RAL } },
{ "lu", OP(33), OP_MASK, PWRCOM, { RT, D, RA0 } },
{ "lbz", OP(34), OP_MASK, COM, { RT, D, RA0 } },
{ "lbzu", OP(35), OP_MASK, COM, { RT, D, RAL } },
{ "stw", OP(36), OP_MASK, PPCCOM, { RS, D, RA0 } },
{ "st", OP(36), OP_MASK, PWRCOM, { RS, D, RA0 } },
{ "stwu", OP(37), OP_MASK, PPCCOM, { RS, D, RAS } },
{ "stu", OP(37), OP_MASK, PWRCOM, { RS, D, RA0 } },
{ "stb", OP(38), OP_MASK, COM, { RS, D, RA0 } },
{ "stbu", OP(39), OP_MASK, COM, { RS, D, RAS } },
{ "lhz", OP(40), OP_MASK, COM, { RT, D, RA0 } },
{ "lhzu", OP(41), OP_MASK, COM, { RT, D, RAL } },
{ "lha", OP(42), OP_MASK, COM, { RT, D, RA0 } },
{ "lhau", OP(43), OP_MASK, COM, { RT, D, RAL } },
{ "sth", OP(44), OP_MASK, COM, { RS, D, RA0 } },
{ "sthu", OP(45), OP_MASK, COM, { RS, D, RAS } },
{ "lmw", OP(46), OP_MASK, PPCCOM, { RT, D, RAM } },
{ "lm", OP(46), OP_MASK, PWRCOM, { RT, D, RA0 } },
{ "stmw", OP(47), OP_MASK, PPCCOM, { RS, D, RA0 } },
{ "stm", OP(47), OP_MASK, PWRCOM, { RS, D, RA0 } },
{ "lfs", OP(48), OP_MASK, COM, { FRT, D, RA0 } },
{ "lfsu", OP(49), OP_MASK, COM, { FRT, D, RAS } },
{ "lfd", OP(50), OP_MASK, COM, { FRT, D, RA0 } },
{ "lfdu", OP(51), OP_MASK, COM, { FRT, D, RAS } },
{ "stfs", OP(52), OP_MASK, COM, { FRS, D, RA0 } },
{ "stfsu", OP(53), OP_MASK, COM, { FRS, D, RAS } },
{ "stfd", OP(54), OP_MASK, COM, { FRS, D, RA0 } },
{ "stfdu", OP(55), OP_MASK, COM, { FRS, D, RAS } },
{ "lq", OP(56), OP_MASK, POWER4, { RTQ, DQ, RAQ } },
{ "lfq", OP(56), OP_MASK, POWER2, { FRT, D, RA0 } },
{ "lfqu", OP(57), OP_MASK, POWER2, { FRT, D, RA0 } },
{ "lfdp", OP(57), OP_MASK, POWER6, { FRT, D, RA0 } },
{ "lbze", DEO(58,0), DE_MASK, BOOKE64, { RT, DE, RA0 } },
{ "lbzue", DEO(58,1), DE_MASK, BOOKE64, { RT, DE, RAL } },
{ "lhze", DEO(58,2), DE_MASK, BOOKE64, { RT, DE, RA0 } },
{ "lhzue", DEO(58,3), DE_MASK, BOOKE64, { RT, DE, RAL } },
{ "lhae", DEO(58,4), DE_MASK, BOOKE64, { RT, DE, RA0 } },
{ "lhaue", DEO(58,5), DE_MASK, BOOKE64, { RT, DE, RAL } },
{ "lwze", DEO(58,6), DE_MASK, BOOKE64, { RT, DE, RA0 } },
{ "lwzue", DEO(58,7), DE_MASK, BOOKE64, { RT, DE, RAL } },
{ "stbe", DEO(58,8), DE_MASK, BOOKE64, { RS, DE, RA0 } },
{ "stbue", DEO(58,9), DE_MASK, BOOKE64, { RS, DE, RAS } },
{ "sthe", DEO(58,10), DE_MASK, BOOKE64, { RS, DE, RA0 } },
{ "sthue", DEO(58,11), DE_MASK, BOOKE64, { RS, DE, RAS } },
{ "stwe", DEO(58,14), DE_MASK, BOOKE64, { RS, DE, RA0 } },
{ "stwue", DEO(58,15), DE_MASK, BOOKE64, { RS, DE, RAS } },
{ "ld", DSO(58,0), DS_MASK, PPC64, { RT, DS, RA0 } },
{ "ldu", DSO(58,1), DS_MASK, PPC64, { RT, DS, RAL } },
{ "lwa", DSO(58,2), DS_MASK, PPC64, { RT, DS, RA0 } },
{ "dadd", XRC(59,2,0), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "dadd.", XRC(59,2,1), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "dqua", ZRC(59,3,0), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } },
{ "dqua.", ZRC(59,3,1), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } },
{ "fdivs", A(59,18,0), AFRC_MASK, PPC, { FRT, FRA, FRB } },
{ "fdivs.", A(59,18,1), AFRC_MASK, PPC, { FRT, FRA, FRB } },
{ "fsubs", A(59,20,0), AFRC_MASK, PPC, { FRT, FRA, FRB } },
{ "fsubs.", A(59,20,1), AFRC_MASK, PPC, { FRT, FRA, FRB } },
{ "fadds", A(59,21,0), AFRC_MASK, PPC, { FRT, FRA, FRB } },
{ "fadds.", A(59,21,1), AFRC_MASK, PPC, { FRT, FRA, FRB } },
{ "fsqrts", A(59,22,0), AFRAFRC_MASK, PPC, { FRT, FRB } },
{ "fsqrts.", A(59,22,1), AFRAFRC_MASK, PPC, { FRT, FRB } },
{ "fres", A(59,24,0), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } },
{ "fres.", A(59,24,1), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } },
{ "fmuls", A(59,25,0), AFRB_MASK, PPC, { FRT, FRA, FRC } },
{ "fmuls.", A(59,25,1), AFRB_MASK, PPC, { FRT, FRA, FRC } },
{ "frsqrtes", A(59,26,0), AFRALFRC_MASK,POWER5, { FRT, FRB, A_L } },
{ "frsqrtes.",A(59,26,1), AFRALFRC_MASK,POWER5, { FRT, FRB, A_L } },
{ "fmsubs", A(59,28,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
{ "fmsubs.", A(59,28,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
{ "fmadds", A(59,29,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
{ "fmadds.", A(59,29,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
{ "fnmsubs", A(59,30,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
{ "fnmsubs.",A(59,30,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
{ "fnmadds", A(59,31,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
{ "fnmadds.",A(59,31,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
{ "dmul", XRC(59,34,0), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "dmul.", XRC(59,34,1), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "drrnd", ZRC(59,35,0), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } },
{ "drrnd.", ZRC(59,35,1), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } },
{ "dscli", ZRC(59,66,0), Z_MASK, POWER6, { FRT, FRA, SH16 } },
{ "dscli.", ZRC(59,66,1), Z_MASK, POWER6, { FRT, FRA, SH16 } },
{ "dquai", ZRC(59,67,0), Z2_MASK, POWER6, { TE, FRT, FRB, RMC } },
{ "dquai.", ZRC(59,67,1), Z2_MASK, POWER6, { TE, FRT, FRB, RMC } },
{ "dscri", ZRC(59,98,0), Z_MASK, POWER6, { FRT, FRA, SH16 } },
{ "dscri.", ZRC(59,98,1), Z_MASK, POWER6, { FRT, FRA, SH16 } },
{ "drintx", ZRC(59,99,0), Z2_MASK, POWER6, { R, FRT, FRB, RMC } },
{ "drintx.", ZRC(59,99,1), Z2_MASK, POWER6, { R, FRT, FRB, RMC } },
{ "dcmpo", X(59,130), X_MASK, POWER6, { BF, FRA, FRB } },
{ "dtstex", X(59,162), X_MASK, POWER6, { BF, FRA, FRB } },
{ "dtstdc", Z(59,194), Z_MASK, POWER6, { BF, FRA, DCM } },
{ "dtstdg", Z(59,226), Z_MASK, POWER6, { BF, FRA, DGM } },
{ "drintn", ZRC(59,227,0), Z2_MASK, POWER6, { R, FRT, FRB, RMC } },
{ "drintn.", ZRC(59,227,1), Z2_MASK, POWER6, { R, FRT, FRB, RMC } },
{ "dctdp", XRC(59,258,0), X_MASK, POWER6, { FRT, FRB } },
{ "dctdp.", XRC(59,258,1), X_MASK, POWER6, { FRT, FRB } },
{ "dctfix", XRC(59,290,0), X_MASK, POWER6, { FRT, FRB } },
{ "dctfix.", XRC(59,290,1), X_MASK, POWER6, { FRT, FRB } },
{ "ddedpd", XRC(59,322,0), X_MASK, POWER6, { SP, FRT, FRB } },
{ "ddedpd.", XRC(59,322,1), X_MASK, POWER6, { SP, FRT, FRB } },
{ "dxex", XRC(59,354,0), X_MASK, POWER6, { FRT, FRB } },
{ "dxex.", XRC(59,354,1), X_MASK, POWER6, { FRT, FRB } },
{ "dsub", XRC(59,514,0), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "dsub.", XRC(59,514,1), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "ddiv", XRC(59,546,0), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "ddiv.", XRC(59,546,1), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "dcmpu", X(59,642), X_MASK, POWER6, { BF, FRA, FRB } },
{ "dtstsf", X(59,674), X_MASK, POWER6, { BF, FRA, FRB } },
{ "drsp", XRC(59,770,0), X_MASK, POWER6, { FRT, FRB } },
{ "drsp.", XRC(59,770,1), X_MASK, POWER6, { FRT, FRB } },
{ "dcffix", XRC(59,802,0), X_MASK, POWER6, { FRT, FRB } },
{ "dcffix.", XRC(59,802,1), X_MASK, POWER6, { FRT, FRB } },
{ "denbcd", XRC(59,834,0), X_MASK, POWER6, { S, FRT, FRB } },
{ "denbcd.", XRC(59,834,1), X_MASK, POWER6, { S, FRT, FRB } },
{ "diex", XRC(59,866,0), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "diex.", XRC(59,866,1), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "stfq", OP(60), OP_MASK, POWER2, { FRS, D, RA } },
{ "stfqu", OP(61), OP_MASK, POWER2, { FRS, D, RA } },
{ "stfdp", OP(61), OP_MASK, POWER6, { FRT, D, RA0 } },
{ "lde", DEO(62,0), DE_MASK, BOOKE64, { RT, DES, RA0 } },
{ "ldue", DEO(62,1), DE_MASK, BOOKE64, { RT, DES, RA0 } },
{ "lfse", DEO(62,4), DE_MASK, BOOKE64, { FRT, DES, RA0 } },
{ "lfsue", DEO(62,5), DE_MASK, BOOKE64, { FRT, DES, RAS } },
{ "lfde", DEO(62,6), DE_MASK, BOOKE64, { FRT, DES, RA0 } },
{ "lfdue", DEO(62,7), DE_MASK, BOOKE64, { FRT, DES, RAS } },
{ "stde", DEO(62,8), DE_MASK, BOOKE64, { RS, DES, RA0 } },
{ "stdue", DEO(62,9), DE_MASK, BOOKE64, { RS, DES, RAS } },
{ "stfse", DEO(62,12), DE_MASK, BOOKE64, { FRS, DES, RA0 } },
{ "stfsue", DEO(62,13), DE_MASK, BOOKE64, { FRS, DES, RAS } },
{ "stfde", DEO(62,14), DE_MASK, BOOKE64, { FRS, DES, RA0 } },
{ "stfdue", DEO(62,15), DE_MASK, BOOKE64, { FRS, DES, RAS } },
{ "std", DSO(62,0), DS_MASK, PPC64, { RS, DS, RA0 } },
{ "stdu", DSO(62,1), DS_MASK, PPC64, { RS, DS, RAS } },
{ "stq", DSO(62,2), DS_MASK, POWER4, { RSQ, DS, RA0 } },
{ "fcmpu", X(63,0), X_MASK|(3<<21), COM, { BF, FRA, FRB } },
{ "daddq", XRC(63,2,0), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "daddq.", XRC(63,2,1), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "dquaq", ZRC(63,3,0), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } },
{ "dquaq.", ZRC(63,3,1), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } },
{ "fcpsgn", XRC(63,8,0), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "fcpsgn.", XRC(63,8,1), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "frsp", XRC(63,12,0), XRA_MASK, COM, { FRT, FRB } },
{ "frsp.", XRC(63,12,1), XRA_MASK, COM, { FRT, FRB } },
{ "fctiw", XRC(63,14,0), XRA_MASK, PPCCOM, { FRT, FRB } },
{ "fcir", XRC(63,14,0), XRA_MASK, POWER2, { FRT, FRB } },
{ "fctiw.", XRC(63,14,1), XRA_MASK, PPCCOM, { FRT, FRB } },
{ "fcir.", XRC(63,14,1), XRA_MASK, POWER2, { FRT, FRB } },
{ "fctiwz", XRC(63,15,0), XRA_MASK, PPCCOM, { FRT, FRB } },
{ "fcirz", XRC(63,15,0), XRA_MASK, POWER2, { FRT, FRB } },
{ "fctiwz.", XRC(63,15,1), XRA_MASK, PPCCOM, { FRT, FRB } },
{ "fcirz.", XRC(63,15,1), XRA_MASK, POWER2, { FRT, FRB } },
{ "fdiv", A(63,18,0), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } },
{ "fd", A(63,18,0), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } },
{ "fdiv.", A(63,18,1), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } },
{ "fd.", A(63,18,1), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } },
{ "fsub", A(63,20,0), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } },
{ "fs", A(63,20,0), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } },
{ "fsub.", A(63,20,1), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } },
{ "fs.", A(63,20,1), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } },
{ "fadd", A(63,21,0), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } },
{ "fa", A(63,21,0), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } },
{ "fadd.", A(63,21,1), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } },
{ "fa.", A(63,21,1), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } },
{ "fsqrt", A(63,22,0), AFRAFRC_MASK, PPCPWR2, { FRT, FRB } },
{ "fsqrt.", A(63,22,1), AFRAFRC_MASK, PPCPWR2, { FRT, FRB } },
{ "fsel", A(63,23,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
{ "fsel.", A(63,23,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
{ "fre", A(63,24,0), AFRALFRC_MASK, POWER5, { FRT, FRB, A_L } },
{ "fre.", A(63,24,1), AFRALFRC_MASK, POWER5, { FRT, FRB, A_L } },
{ "fmul", A(63,25,0), AFRB_MASK, PPCCOM, { FRT, FRA, FRC } },
{ "fm", A(63,25,0), AFRB_MASK, PWRCOM, { FRT, FRA, FRC } },
{ "fmul.", A(63,25,1), AFRB_MASK, PPCCOM, { FRT, FRA, FRC } },
{ "fm.", A(63,25,1), AFRB_MASK, PWRCOM, { FRT, FRA, FRC } },
{ "frsqrte", A(63,26,0), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } },
{ "frsqrte.",A(63,26,1), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } },
{ "fmsub", A(63,28,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
{ "fms", A(63,28,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
{ "fmsub.", A(63,28,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
{ "fms.", A(63,28,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
{ "fmadd", A(63,29,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
{ "fma", A(63,29,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
{ "fmadd.", A(63,29,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
{ "fma.", A(63,29,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
{ "fnmsub", A(63,30,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
{ "fnms", A(63,30,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
{ "fnmsub.", A(63,30,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
{ "fnms.", A(63,30,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
{ "fnmadd", A(63,31,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
{ "fnma", A(63,31,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
{ "fnmadd.", A(63,31,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
{ "fnma.", A(63,31,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
{ "fcmpo", X(63,32), X_MASK|(3<<21), COM, { BF, FRA, FRB } },
{ "dmulq", XRC(63,34,0), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "dmulq.", XRC(63,34,1), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "drrndq", ZRC(63,35,0), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } },
{ "drrndq.", ZRC(63,35,1), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } },
{ "mtfsb1", XRC(63,38,0), XRARB_MASK, COM, { BT } },
{ "mtfsb1.", XRC(63,38,1), XRARB_MASK, COM, { BT } },
{ "fneg", XRC(63,40,0), XRA_MASK, COM, { FRT, FRB } },
{ "fneg.", XRC(63,40,1), XRA_MASK, COM, { FRT, FRB } },
{ "mcrfs", X(63,64), XRB_MASK|(3<<21)|(3<<16), COM, { BF, BFA } },
{ "dscliq", ZRC(63,66,0), Z_MASK, POWER6, { FRT, FRA, SH16 } },
{ "dscliq.", ZRC(63,66,1), Z_MASK, POWER6, { FRT, FRA, SH16 } },
{ "dquaiq", ZRC(63,67,0), Z2_MASK, POWER6, { TE, FRT, FRB, RMC } },
{ "dquaiq.", ZRC(63,67,1), Z2_MASK, POWER6, { FRT, FRA, FRB, RMC } },
{ "mtfsb0", XRC(63,70,0), XRARB_MASK, COM, { BT } },
{ "mtfsb0.", XRC(63,70,1), XRARB_MASK, COM, { BT } },
{ "fmr", XRC(63,72,0), XRA_MASK, COM, { FRT, FRB } },
{ "fmr.", XRC(63,72,1), XRA_MASK, COM, { FRT, FRB } },
{ "dscriq", ZRC(63,98,0), Z_MASK, POWER6, { FRT, FRA, SH16 } },
{ "dscriq.", ZRC(63,98,1), Z_MASK, POWER6, { FRT, FRA, SH16 } },
{ "drintxq", ZRC(63,99,0), Z2_MASK, POWER6, { R, FRT, FRB, RMC } },
{ "drintxq.",ZRC(63,99,1), Z2_MASK, POWER6, { R, FRT, FRB, RMC } },
{ "dcmpoq", X(63,130), X_MASK, POWER6, { BF, FRA, FRB } },
{ "mtfsfi", XRC(63,134,0), XWRA_MASK|(3<<21)|(1<<11), COM, { BFF, U, W } },
{ "mtfsfi.", XRC(63,134,1), XWRA_MASK|(3<<21)|(1<<11), COM, { BFF, U, W } },
{ "fnabs", XRC(63,136,0), XRA_MASK, COM, { FRT, FRB } },
{ "fnabs.", XRC(63,136,1), XRA_MASK, COM, { FRT, FRB } },
{ "dtstexq", X(63,162), X_MASK, POWER6, { BF, FRA, FRB } },
{ "dtstdcq", Z(63,194), Z_MASK, POWER6, { BF, FRA, DCM } },
{ "dtstdgq", Z(63,226), Z_MASK, POWER6, { BF, FRA, DGM } },
{ "drintnq", ZRC(63,227,0), Z2_MASK, POWER6, { R, FRT, FRB, RMC } },
{ "drintnq.",ZRC(63,227,1), Z2_MASK, POWER6, { R, FRT, FRB, RMC } },
{ "dctqpq", XRC(63,258,0), X_MASK, POWER6, { FRT, FRB } },
{ "dctqpq.", XRC(63,258,1), X_MASK, POWER6, { FRT, FRB } },
{ "fabs", XRC(63,264,0), XRA_MASK, COM, { FRT, FRB } },
{ "fabs.", XRC(63,264,1), XRA_MASK, COM, { FRT, FRB } },
{ "dctfixq", XRC(63,290,0), X_MASK, POWER6, { FRT, FRB } },
{ "dctfixq.",XRC(63,290,1), X_MASK, POWER6, { FRT, FRB } },
{ "ddedpdq", XRC(63,322,0), X_MASK, POWER6, { SP, FRT, FRB } },
{ "ddedpdq.",XRC(63,322,1), X_MASK, POWER6, { SP, FRT, FRB } },
{ "dxexq", XRC(63,354,0), X_MASK, POWER6, { FRT, FRB } },
{ "dxexq.", XRC(63,354,1), X_MASK, POWER6, { FRT, FRB } },
{ "frin", XRC(63,392,0), XRA_MASK, POWER5, { FRT, FRB } },
{ "frin.", XRC(63,392,1), XRA_MASK, POWER5, { FRT, FRB } },
{ "friz", XRC(63,424,0), XRA_MASK, POWER5, { FRT, FRB } },
{ "friz.", XRC(63,424,1), XRA_MASK, POWER5, { FRT, FRB } },
{ "frip", XRC(63,456,0), XRA_MASK, POWER5, { FRT, FRB } },
{ "frip.", XRC(63,456,1), XRA_MASK, POWER5, { FRT, FRB } },
{ "frim", XRC(63,488,0), XRA_MASK, POWER5, { FRT, FRB } },
{ "frim.", XRC(63,488,1), XRA_MASK, POWER5, { FRT, FRB } },
{ "dsubq", XRC(63,514,0), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "dsubq.", XRC(63,514,1), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "ddivq", XRC(63,546,0), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "ddivq.", XRC(63,546,1), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "mffs", XRC(63,583,0), XRARB_MASK, COM, { FRT } },
{ "mffs.", XRC(63,583,1), XRARB_MASK, COM, { FRT } },
{ "dcmpuq", X(63,642), X_MASK, POWER6, { BF, FRA, FRB } },
{ "dtstsfq", X(63,674), X_MASK, POWER6, { BF, FRA, FRB } },
{ "mtfsf", XFL(63,711,0), XFL_MASK, COM, { FLM, FRB, XFL_L, W } },
{ "mtfsf.", XFL(63,711,1), XFL_MASK, COM, { FLM, FRB, XFL_L, W } },
{ "drdpq", XRC(63,770,0), X_MASK, POWER6, { FRT, FRB } },
{ "drdpq.", XRC(63,770,1), X_MASK, POWER6, { FRT, FRB } },
{ "dcffixq", XRC(63,802,0), X_MASK, POWER6, { FRT, FRB } },
{ "dcffixq.",XRC(63,802,1), X_MASK, POWER6, { FRT, FRB } },
{ "fctid", XRC(63,814,0), XRA_MASK, PPC64, { FRT, FRB } },
{ "fctid.", XRC(63,814,1), XRA_MASK, PPC64, { FRT, FRB } },
{ "fctidz", XRC(63,815,0), XRA_MASK, PPC64, { FRT, FRB } },
{ "fctidz.", XRC(63,815,1), XRA_MASK, PPC64, { FRT, FRB } },
{ "denbcdq", XRC(63,834,0), X_MASK, POWER6, { S, FRT, FRB } },
{ "denbcdq.",XRC(63,834,1), X_MASK, POWER6, { S, FRT, FRB } },
{ "fcfid", XRC(63,846,0), XRA_MASK, PPC64, { FRT, FRB } },
{ "fcfid.", XRC(63,846,1), XRA_MASK, PPC64, { FRT, FRB } },
{ "diexq", XRC(63,866,0), X_MASK, POWER6, { FRT, FRA, FRB } },
{ "diexq.", XRC(63,866,1), X_MASK, POWER6, { FRT, FRA, FRB } },
};
const int powerpc_num_opcodes =
sizeof (powerpc_opcodes) / sizeof (powerpc_opcodes[0]);
/* The macro table. This is only used by the assembler. */
/* The expressions of the form (-x ! 31) & (x | 31) have the value 0
when x=0; 32-x when x is between 1 and 31; are negative if x is
negative; and are 32 or more otherwise. This is what you want
when, for instance, you are emulating a right shift by a
rotate-left-and-mask, because the underlying instructions support
shifts of size 0 but not shifts of size 32. By comparison, when
extracting x bits from some word you want to use just 32-x, because
the underlying instructions don't support extracting 0 bits but do
support extracting the whole word (32 bits in this case). */
const struct powerpc_macro powerpc_macros[] = {
{ "extldi", 4, PPC64, "rldicr %0,%1,%3,(%2)-1" },
{ "extldi.", 4, PPC64, "rldicr. %0,%1,%3,(%2)-1" },
{ "extrdi", 4, PPC64, "rldicl %0,%1,(%2)+(%3),64-(%2)" },
{ "extrdi.", 4, PPC64, "rldicl. %0,%1,(%2)+(%3),64-(%2)" },
{ "insrdi", 4, PPC64, "rldimi %0,%1,64-((%2)+(%3)),%3" },
{ "insrdi.", 4, PPC64, "rldimi. %0,%1,64-((%2)+(%3)),%3" },
{ "rotrdi", 3, PPC64, "rldicl %0,%1,(-(%2)!63)&((%2)|63),0" },
{ "rotrdi.", 3, PPC64, "rldicl. %0,%1,(-(%2)!63)&((%2)|63),0" },
{ "sldi", 3, PPC64, "rldicr %0,%1,%2,63-(%2)" },
{ "sldi.", 3, PPC64, "rldicr. %0,%1,%2,63-(%2)" },
{ "srdi", 3, PPC64, "rldicl %0,%1,(-(%2)!63)&((%2)|63),%2" },
{ "srdi.", 3, PPC64, "rldicl. %0,%1,(-(%2)!63)&((%2)|63),%2" },
{ "clrrdi", 3, PPC64, "rldicr %0,%1,0,63-(%2)" },
{ "clrrdi.", 3, PPC64, "rldicr. %0,%1,0,63-(%2)" },
{ "clrlsldi",4, PPC64, "rldic %0,%1,%3,(%2)-(%3)" },
{ "clrlsldi.",4, PPC64, "rldic. %0,%1,%3,(%2)-(%3)" },
{ "extlwi", 4, PPCCOM, "rlwinm %0,%1,%3,0,(%2)-1" },
{ "extlwi.", 4, PPCCOM, "rlwinm. %0,%1,%3,0,(%2)-1" },
{ "extrwi", 4, PPCCOM, "rlwinm %0,%1,((%2)+(%3))&((%2)+(%3)<>32),32-(%2),31" },
{ "extrwi.", 4, PPCCOM, "rlwinm. %0,%1,((%2)+(%3))&((%2)+(%3)<>32),32-(%2),31" },
{ "inslwi", 4, PPCCOM, "rlwimi %0,%1,(-(%3)!31)&((%3)|31),%3,(%2)+(%3)-1" },
{ "inslwi.", 4, PPCCOM, "rlwimi. %0,%1,(-(%3)!31)&((%3)|31),%3,(%2)+(%3)-1"},
{ "insrwi", 4, PPCCOM, "rlwimi %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1" },
{ "insrwi.", 4, PPCCOM, "rlwimi. %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1"},
{ "rotrwi", 3, PPCCOM, "rlwinm %0,%1,(-(%2)!31)&((%2)|31),0,31" },
{ "rotrwi.", 3, PPCCOM, "rlwinm. %0,%1,(-(%2)!31)&((%2)|31),0,31" },
{ "slwi", 3, PPCCOM, "rlwinm %0,%1,%2,0,31-(%2)" },
{ "sli", 3, PWRCOM, "rlinm %0,%1,%2,0,31-(%2)" },
{ "slwi.", 3, PPCCOM, "rlwinm. %0,%1,%2,0,31-(%2)" },
{ "sli.", 3, PWRCOM, "rlinm. %0,%1,%2,0,31-(%2)" },
{ "srwi", 3, PPCCOM, "rlwinm %0,%1,(-(%2)!31)&((%2)|31),%2,31" },
{ "sri", 3, PWRCOM, "rlinm %0,%1,(-(%2)!31)&((%2)|31),%2,31" },
{ "srwi.", 3, PPCCOM, "rlwinm. %0,%1,(-(%2)!31)&((%2)|31),%2,31" },
{ "sri.", 3, PWRCOM, "rlinm. %0,%1,(-(%2)!31)&((%2)|31),%2,31" },
{ "clrrwi", 3, PPCCOM, "rlwinm %0,%1,0,0,31-(%2)" },
{ "clrrwi.", 3, PPCCOM, "rlwinm. %0,%1,0,0,31-(%2)" },
{ "clrlslwi",4, PPCCOM, "rlwinm %0,%1,%3,(%2)-(%3),31-(%3)" },
{ "clrlslwi.",4, PPCCOM, "rlwinm. %0,%1,%3,(%2)-(%3),31-(%3)" },
};
const int powerpc_num_macros =
sizeof (powerpc_macros) / sizeof (powerpc_macros[0]);
/* This file provides several disassembler functions, all of which use
the disassembler interface defined in dis-asm.h. Several functions
are provided because this file handles disassembly for the PowerPC
in both big and little endian mode and also for the POWER (RS/6000)
chip. */
static int print_insn_powerpc (bfd_vma, struct disassemble_info *, int, int);
/* Determine which set of machines to disassemble for. PPC403/601 or
BookE. For convenience, also disassemble instructions supported
by the AltiVec vector unit. */
static int
powerpc_dialect (struct disassemble_info *info)
{
int dialect = PPC_OPCODE_PPC;
if (BFD_DEFAULT_TARGET_SIZE == 64)
dialect |= PPC_OPCODE_64;
if (info->disassembler_options
&& strstr (info->disassembler_options, "booke") != NULL)
dialect |= PPC_OPCODE_BOOKE | PPC_OPCODE_BOOKE64;
else if ((info->mach == bfd_mach_ppc_e500)
|| (info->disassembler_options
&& strstr (info->disassembler_options, "e500") != NULL))
dialect |= (PPC_OPCODE_BOOKE
| PPC_OPCODE_SPE | PPC_OPCODE_ISEL
| PPC_OPCODE_EFS | PPC_OPCODE_BRLOCK
| PPC_OPCODE_PMR | PPC_OPCODE_CACHELCK
| PPC_OPCODE_RFMCI);
else if (info->disassembler_options
&& strstr (info->disassembler_options, "efs") != NULL)
dialect |= PPC_OPCODE_EFS;
else if (info->disassembler_options
&& strstr (info->disassembler_options, "e300") != NULL)
dialect |= PPC_OPCODE_E300 | PPC_OPCODE_CLASSIC | PPC_OPCODE_COMMON;
else if (info->disassembler_options
&& strstr (info->disassembler_options, "440") != NULL)
dialect |= PPC_OPCODE_BOOKE | PPC_OPCODE_32
| PPC_OPCODE_440 | PPC_OPCODE_ISEL | PPC_OPCODE_RFMCI;
else
dialect |= (PPC_OPCODE_403 | PPC_OPCODE_601 | PPC_OPCODE_CLASSIC
| PPC_OPCODE_COMMON | PPC_OPCODE_ALTIVEC);
if (info->disassembler_options
&& strstr (info->disassembler_options, "power4") != NULL)
dialect |= PPC_OPCODE_POWER4;
if (info->disassembler_options
&& strstr (info->disassembler_options, "power5") != NULL)
dialect |= PPC_OPCODE_POWER4 | PPC_OPCODE_POWER5;
if (info->disassembler_options
&& strstr (info->disassembler_options, "cell") != NULL)
dialect |= PPC_OPCODE_POWER4 | PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC;
if (info->disassembler_options
&& strstr (info->disassembler_options, "power6") != NULL)
dialect |= PPC_OPCODE_POWER4 | PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC;
if (info->disassembler_options
&& strstr (info->disassembler_options, "any") != NULL)
dialect |= PPC_OPCODE_ANY;
if (info->disassembler_options)
{
if (strstr (info->disassembler_options, "32") != NULL)
dialect &= ~PPC_OPCODE_64;
else if (strstr (info->disassembler_options, "64") != NULL)
dialect |= PPC_OPCODE_64;
}
info->private_data = (char *) 0 + dialect;
return dialect;
}
/* QEMU default */
int
print_insn_ppc (bfd_vma memaddr, struct disassemble_info *info)
{
int dialect = (char *) info->private_data - (char *) 0;
return print_insn_powerpc (memaddr, info, info->endian == BFD_ENDIAN_BIG,
dialect);
}
/* Print a big endian PowerPC instruction. */
int
print_insn_big_powerpc (bfd_vma memaddr, struct disassemble_info *info)
{
int dialect = (char *) info->private_data - (char *) 0;
return print_insn_powerpc (memaddr, info, 1, dialect);
}
/* Print a little endian PowerPC instruction. */
int
print_insn_little_powerpc (bfd_vma memaddr, struct disassemble_info *info)
{
int dialect = (char *) info->private_data - (char *) 0;
return print_insn_powerpc (memaddr, info, 0, dialect);
}
/* Print a POWER (RS/6000) instruction. */
int
print_insn_rs6000 (bfd_vma memaddr, struct disassemble_info *info)
{
return print_insn_powerpc (memaddr, info, 1, PPC_OPCODE_POWER);
}
/* Extract the operand value from the PowerPC or POWER instruction. */
static long
operand_value_powerpc (const struct powerpc_operand *operand,
unsigned long insn, int dialect)
{
long value;
int invalid;
/* Extract the value from the instruction. */
if (operand->extract)
value = (*operand->extract) (insn, dialect, &invalid);
else
{
value = (insn >> operand->shift) & operand->bitm;
if ((operand->flags & PPC_OPERAND_SIGNED) != 0)
{
/* BITM is always some number of zeros followed by some
number of ones, followed by some numer of zeros. */
unsigned long top = operand->bitm;
/* top & -top gives the rightmost 1 bit, so this
fills in any trailing zeros. */
top |= (top & -top) - 1;
top &= ~(top >> 1);
value = (value ^ top) - top;
}
}
return value;
}
/* Determine whether the optional operand(s) should be printed. */
static int
skip_optional_operands (const unsigned char *opindex,
unsigned long insn, int dialect)
{
const struct powerpc_operand *operand;
for (; *opindex != 0; opindex++)
{
operand = &powerpc_operands[*opindex];
if ((operand->flags & PPC_OPERAND_NEXT) != 0
|| ((operand->flags & PPC_OPERAND_OPTIONAL) != 0
&& operand_value_powerpc (operand, insn, dialect) != 0))
return 0;
}
return 1;
}
/* Print a PowerPC or POWER instruction. */
static int
print_insn_powerpc (bfd_vma memaddr,
struct disassemble_info *info,
int bigendian,
int dialect)
{
bfd_byte buffer[4];
int status;
unsigned long insn;
const struct powerpc_opcode *opcode;
const struct powerpc_opcode *opcode_end;
unsigned long op;
if (dialect == 0)
dialect = powerpc_dialect (info);
status = (*info->read_memory_func) (memaddr, buffer, 4, info);
if (status != 0)
{
(*info->memory_error_func) (status, memaddr, info);
return -1;
}
if (bigendian)
insn = bfd_getb32 (buffer);
else
insn = bfd_getl32 (buffer);
/* Get the major opcode of the instruction. */
op = PPC_OP (insn);
/* Find the first match in the opcode table. We could speed this up
a bit by doing a binary search on the major opcode. */
opcode_end = powerpc_opcodes + powerpc_num_opcodes;
again:
for (opcode = powerpc_opcodes; opcode < opcode_end; opcode++)
{
unsigned long table_op;
const unsigned char *opindex;
const struct powerpc_operand *operand;
int invalid;
int need_comma;
int need_paren;
int skip_optional;
table_op = PPC_OP (opcode->opcode);
if (op < table_op)
break;
if (op > table_op)
continue;
if ((insn & opcode->mask) != opcode->opcode
|| (opcode->flags & dialect) == 0)
continue;
/* Make two passes over the operands. First see if any of them
have extraction functions, and, if they do, make sure the
instruction is valid. */
invalid = 0;
for (opindex = opcode->operands; *opindex != 0; opindex++)
{
operand = powerpc_operands + *opindex;
if (operand->extract)
(*operand->extract) (insn, dialect, &invalid);
}
if (invalid)
continue;
/* The instruction is valid. */
if (opcode->operands[0] != 0)
(*info->fprintf_func) (info->stream, "%-7s ", opcode->name);
else
(*info->fprintf_func) (info->stream, "%s", opcode->name);
/* Now extract and print the operands. */
need_comma = 0;
need_paren = 0;
skip_optional = -1;
for (opindex = opcode->operands; *opindex != 0; opindex++)
{
long value;
operand = powerpc_operands + *opindex;
/* Operands that are marked FAKE are simply ignored. We
already made sure that the extract function considered
the instruction to be valid. */
if ((operand->flags & PPC_OPERAND_FAKE) != 0)
continue;
/* If all of the optional operands have the value zero,
then don't print any of them. */
if ((operand->flags & PPC_OPERAND_OPTIONAL) != 0)
{
if (skip_optional < 0)
skip_optional = skip_optional_operands (opindex, insn,
dialect);
if (skip_optional)
continue;
}
value = operand_value_powerpc (operand, insn, dialect);
if (need_comma)
{
(*info->fprintf_func) (info->stream, ",");
need_comma = 0;
}
/* Print the operand as directed by the flags. */
if ((operand->flags & PPC_OPERAND_GPR) != 0
|| ((operand->flags & PPC_OPERAND_GPR_0) != 0 && value != 0))
(*info->fprintf_func) (info->stream, "r%ld", value);
else if ((operand->flags & PPC_OPERAND_FPR) != 0)
(*info->fprintf_func) (info->stream, "f%ld", value);
else if ((operand->flags & PPC_OPERAND_VR) != 0)
(*info->fprintf_func) (info->stream, "v%ld", value);
else if ((operand->flags & PPC_OPERAND_RELATIVE) != 0)
(*info->print_address_func) (memaddr + value, info);
else if ((operand->flags & PPC_OPERAND_ABSOLUTE) != 0)
(*info->print_address_func) ((bfd_vma) value & 0xffffffff, info);
else if ((operand->flags & PPC_OPERAND_CR) == 0
|| (dialect & PPC_OPCODE_PPC) == 0)
(*info->fprintf_func) (info->stream, "%ld", value);
else
{
if (operand->bitm == 7)
(*info->fprintf_func) (info->stream, "cr%ld", value);
else
{
static const char *cbnames[4] = { "lt", "gt", "eq", "so" };
int cr;
int cc;
cr = value >> 2;
if (cr != 0)
(*info->fprintf_func) (info->stream, "4*cr%d+", cr);
cc = value & 3;
(*info->fprintf_func) (info->stream, "%s", cbnames[cc]);
}
}
if (need_paren)
{
(*info->fprintf_func) (info->stream, ")");
need_paren = 0;
}
if ((operand->flags & PPC_OPERAND_PARENS) == 0)
need_comma = 1;
else
{
(*info->fprintf_func) (info->stream, "(");
need_paren = 1;
}
}
/* We have found and printed an instruction; return. */
return 4;
}
if ((dialect & PPC_OPCODE_ANY) != 0)
{
dialect = ~PPC_OPCODE_ANY;
goto again;
}
/* We could not find a match. */
(*info->fprintf_func) (info->stream, ".long 0x%lx", insn);
return 4;
}
| gpl-2.0 |
yiancar/qmk_firmware | keyboards/nightly_boards/n60_s/encoder_action.c | 176 | 1751 | /* Copyright 2020 Neil Brian Ramirez
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "encoder_action.h"
#ifdef ENCODERS
static uint8_t encoder_state[ENCODERS] = {0};
static keypos_t encoder_cw[ENCODERS] = ENCODERS_CW_KEY;
static keypos_t encoder_ccw[ENCODERS] = ENCODERS_CCW_KEY;
#endif
void encoder_action_unregister(void) {
#ifdef ENCODERS
for (int index = 0; index < ENCODERS; ++index) {
if (encoder_state[index]) {
keyevent_t encoder_event = (keyevent_t) {
.key = encoder_state[index] >> 1 ? encoder_cw[index] : encoder_ccw[index],
.pressed = false,
.time = (timer_read() | 1)
};
encoder_state[index] = 0;
action_exec(encoder_event);
}
}
#endif
}
void encoder_action_register(uint8_t index, bool clockwise) {
#ifdef ENCODERS
keyevent_t encoder_event = (keyevent_t) {
.key = clockwise ? encoder_cw[index] : encoder_ccw[index],
.pressed = true,
.time = (timer_read() | 1)
};
encoder_state[index] = (clockwise ^ 1) | (clockwise << 1);
action_exec(encoder_event);
#endif
}
| gpl-2.0 |
bio4554/android-4.1 | drivers/media/usb/dvb-usb-v2/af9035.c | 432 | 53740 | /*
* Afatech AF9035 DVB USB driver
*
* Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
* Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "af9035.h"
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 64
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static u16 af9035_checksum(const u8 *buf, size_t len)
{
size_t i;
u16 checksum = 0;
for (i = 1; i < len; i++) {
if (i % 2)
checksum += buf[i] << 8;
else
checksum += buf[i];
}
checksum = ~checksum;
return checksum;
}
static int af9035_ctrl_msg(struct dvb_usb_device *d, struct usb_req *req)
{
#define REQ_HDR_LEN 4 /* send header size */
#define ACK_HDR_LEN 3 /* rece header size */
#define CHECKSUM_LEN 2
#define USB_TIMEOUT 2000
struct state *state = d_to_priv(d);
int ret, wlen, rlen;
u16 checksum, tmp_checksum;
mutex_lock(&d->usb_mutex);
/* buffer overflow check */
if (req->wlen > (BUF_LEN - REQ_HDR_LEN - CHECKSUM_LEN) ||
req->rlen > (BUF_LEN - ACK_HDR_LEN - CHECKSUM_LEN)) {
dev_err(&d->udev->dev, "%s: too much data wlen=%d rlen=%d\n",
KBUILD_MODNAME, req->wlen, req->rlen);
ret = -EINVAL;
goto exit;
}
state->buf[0] = REQ_HDR_LEN + req->wlen + CHECKSUM_LEN - 1;
state->buf[1] = req->mbox;
state->buf[2] = req->cmd;
state->buf[3] = state->seq++;
memcpy(&state->buf[REQ_HDR_LEN], req->wbuf, req->wlen);
wlen = REQ_HDR_LEN + req->wlen + CHECKSUM_LEN;
rlen = ACK_HDR_LEN + req->rlen + CHECKSUM_LEN;
/* calc and add checksum */
checksum = af9035_checksum(state->buf, state->buf[0] - 1);
state->buf[state->buf[0] - 1] = (checksum >> 8);
state->buf[state->buf[0] - 0] = (checksum & 0xff);
/* no ack for these packets */
if (req->cmd == CMD_FW_DL)
rlen = 0;
ret = dvb_usbv2_generic_rw_locked(d,
state->buf, wlen, state->buf, rlen);
if (ret)
goto exit;
/* no ack for those packets */
if (req->cmd == CMD_FW_DL)
goto exit;
/* verify checksum */
checksum = af9035_checksum(state->buf, rlen - 2);
tmp_checksum = (state->buf[rlen - 2] << 8) | state->buf[rlen - 1];
if (tmp_checksum != checksum) {
dev_err(&d->udev->dev,
"%s: command=%02x checksum mismatch (%04x != %04x)\n",
KBUILD_MODNAME, req->cmd, tmp_checksum,
checksum);
ret = -EIO;
goto exit;
}
/* check status */
if (state->buf[2]) {
/* fw returns status 1 when IR code was not received */
if (req->cmd == CMD_IR_GET || state->buf[2] == 1) {
ret = 1;
goto exit;
}
dev_dbg(&d->udev->dev, "%s: command=%02x failed fw error=%d\n",
__func__, req->cmd, state->buf[2]);
ret = -EIO;
goto exit;
}
/* read request, copy returned data to return buf */
if (req->rlen)
memcpy(req->rbuf, &state->buf[ACK_HDR_LEN], req->rlen);
exit:
mutex_unlock(&d->usb_mutex);
if (ret < 0)
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
/* write multiple registers */
static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
{
u8 wbuf[MAX_XFER_SIZE];
u8 mbox = (reg >> 16) & 0xff;
struct usb_req req = { CMD_MEM_WR, mbox, 6 + len, wbuf, 0, NULL };
if (6 + len > sizeof(wbuf)) {
dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n",
KBUILD_MODNAME, len);
return -EOPNOTSUPP;
}
wbuf[0] = len;
wbuf[1] = 2;
wbuf[2] = 0;
wbuf[3] = 0;
wbuf[4] = (reg >> 8) & 0xff;
wbuf[5] = (reg >> 0) & 0xff;
memcpy(&wbuf[6], val, len);
return af9035_ctrl_msg(d, &req);
}
/* read multiple registers */
static int af9035_rd_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
{
u8 wbuf[] = { len, 2, 0, 0, (reg >> 8) & 0xff, reg & 0xff };
u8 mbox = (reg >> 16) & 0xff;
struct usb_req req = { CMD_MEM_RD, mbox, sizeof(wbuf), wbuf, len, val };
return af9035_ctrl_msg(d, &req);
}
/* write single register */
static int af9035_wr_reg(struct dvb_usb_device *d, u32 reg, u8 val)
{
return af9035_wr_regs(d, reg, &val, 1);
}
/* read single register */
static int af9035_rd_reg(struct dvb_usb_device *d, u32 reg, u8 *val)
{
return af9035_rd_regs(d, reg, val, 1);
}
/* write single register with mask */
static int af9035_wr_reg_mask(struct dvb_usb_device *d, u32 reg, u8 val,
u8 mask)
{
int ret;
u8 tmp;
/* no need for read if whole reg is written */
if (mask != 0xff) {
ret = af9035_rd_regs(d, reg, &tmp, 1);
if (ret)
return ret;
val &= mask;
tmp &= ~mask;
val |= tmp;
}
return af9035_wr_regs(d, reg, &val, 1);
}
static int af9035_add_i2c_dev(struct dvb_usb_device *d, const char *type,
u8 addr, void *platform_data, struct i2c_adapter *adapter)
{
int ret, num;
struct state *state = d_to_priv(d);
struct i2c_client *client;
struct i2c_board_info board_info = {
.addr = addr,
.platform_data = platform_data,
};
strlcpy(board_info.type, type, I2C_NAME_SIZE);
/* find first free client */
for (num = 0; num < AF9035_I2C_CLIENT_MAX; num++) {
if (state->i2c_client[num] == NULL)
break;
}
dev_dbg(&d->udev->dev, "%s: num=%d\n", __func__, num);
if (num == AF9035_I2C_CLIENT_MAX) {
dev_err(&d->udev->dev, "%s: I2C client out of index\n",
KBUILD_MODNAME);
ret = -ENODEV;
goto err;
}
request_module("%s", board_info.type);
/* register I2C device */
client = i2c_new_device(adapter, &board_info);
if (client == NULL || client->dev.driver == NULL) {
ret = -ENODEV;
goto err;
}
/* increase I2C driver usage count */
if (!try_module_get(client->dev.driver->owner)) {
i2c_unregister_device(client);
ret = -ENODEV;
goto err;
}
state->i2c_client[num] = client;
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static void af9035_del_i2c_dev(struct dvb_usb_device *d)
{
int num;
struct state *state = d_to_priv(d);
struct i2c_client *client;
/* find last used client */
num = AF9035_I2C_CLIENT_MAX;
while (num--) {
if (state->i2c_client[num] != NULL)
break;
}
dev_dbg(&d->udev->dev, "%s: num=%d\n", __func__, num);
if (num == -1) {
dev_err(&d->udev->dev, "%s: I2C client out of index\n",
KBUILD_MODNAME);
goto err;
}
client = state->i2c_client[num];
/* decrease I2C driver usage count */
module_put(client->dev.driver->owner);
/* unregister I2C device */
i2c_unregister_device(client);
state->i2c_client[num] = NULL;
return;
err:
dev_dbg(&d->udev->dev, "%s: failed\n", __func__);
}
static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
struct state *state = d_to_priv(d);
int ret;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
/*
* AF9035 I2C sub header is 5 bytes long. Meaning of those bytes are:
* 0: data len
* 1: I2C addr << 1
* 2: reg addr len
* byte 3 and 4 can be used as reg addr
* 3: reg addr MSB
* used when reg addr len is set to 2
* 4: reg addr LSB
* used when reg addr len is set to 1 or 2
*
* For the simplify we do not use register addr at all.
* NOTE: As a firmware knows tuner type there is very small possibility
* there could be some tuner I2C hacks done by firmware and this may
* lead problems if firmware expects those bytes are used.
*
* TODO: Here is few hacks. AF9035 chip integrates AF9033 demodulator.
* IT9135 chip integrates AF9033 demodulator and RF tuner. For dual
* tuner devices, there is also external AF9033 demodulator connected
* via external I2C bus. All AF9033 demod I2C traffic, both single and
* dual tuner configuration, is covered by firmware - actual USB IO
* looks just like a memory access.
* In case of IT913x chip, there is own tuner driver. It is implemented
* currently as a I2C driver, even tuner IP block is likely build
* directly into the demodulator memory space and there is no own I2C
* bus. I2C subsystem does not allow register multiple devices to same
* bus, having same slave address. Due to that we reuse demod address,
* shifted by one bit, on that case.
*
* For IT930x we use a different command and the sub header is
* different as well:
* 0: data len
* 1: I2C bus (0x03 seems to be only value used)
* 2: I2C addr << 1
*/
#define AF9035_IS_I2C_XFER_WRITE_READ(_msg, _num) \
(_num == 2 && !(_msg[0].flags & I2C_M_RD) && (_msg[1].flags & I2C_M_RD))
#define AF9035_IS_I2C_XFER_WRITE(_msg, _num) \
(_num == 1 && !(_msg[0].flags & I2C_M_RD))
#define AF9035_IS_I2C_XFER_READ(_msg, _num) \
(_num == 1 && (_msg[0].flags & I2C_M_RD))
if (AF9035_IS_I2C_XFER_WRITE_READ(msg, num)) {
if (msg[0].len > 40 || msg[1].len > 40) {
/* TODO: correct limits > 40 */
ret = -EOPNOTSUPP;
} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
(msg[0].addr == state->af9033_i2c_addr[1]) ||
(state->chip_type == 0x9135)) {
/* demod access via firmware interface */
u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
if (msg[0].addr == state->af9033_i2c_addr[1] ||
msg[0].addr == (state->af9033_i2c_addr[1] >> 1))
reg |= 0x100000;
ret = af9035_rd_regs(d, reg, &msg[1].buf[0],
msg[1].len);
} else {
/* I2C write + read */
u8 buf[MAX_XFER_SIZE];
struct usb_req req = { CMD_I2C_RD, 0, 5 + msg[0].len,
buf, msg[1].len, msg[1].buf };
if (state->chip_type == 0x9306) {
req.cmd = CMD_GENERIC_I2C_RD;
req.wlen = 3 + msg[0].len;
}
req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[1].len;
if (state->chip_type == 0x9306) {
buf[1] = 0x03; /* I2C bus */
buf[2] = msg[0].addr << 1;
memcpy(&buf[3], msg[0].buf, msg[0].len);
} else {
buf[1] = msg[0].addr << 1;
buf[2] = 0x00; /* reg addr len */
buf[3] = 0x00; /* reg addr MSB */
buf[4] = 0x00; /* reg addr LSB */
memcpy(&buf[5], msg[0].buf, msg[0].len);
}
ret = af9035_ctrl_msg(d, &req);
}
} else if (AF9035_IS_I2C_XFER_WRITE(msg, num)) {
if (msg[0].len > 40) {
/* TODO: correct limits > 40 */
ret = -EOPNOTSUPP;
} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
(msg[0].addr == state->af9033_i2c_addr[1]) ||
(state->chip_type == 0x9135)) {
/* demod access via firmware interface */
u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
if (msg[0].addr == state->af9033_i2c_addr[1] ||
msg[0].addr == (state->af9033_i2c_addr[1] >> 1))
reg |= 0x100000;
ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
msg[0].len - 3);
} else {
/* I2C write */
u8 buf[MAX_XFER_SIZE];
struct usb_req req = { CMD_I2C_WR, 0, 5 + msg[0].len,
buf, 0, NULL };
if (state->chip_type == 0x9306) {
req.cmd = CMD_GENERIC_I2C_WR;
req.wlen = 3 + msg[0].len;
}
req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[0].len;
if (state->chip_type == 0x9306) {
buf[1] = 0x03; /* I2C bus */
buf[2] = msg[0].addr << 1;
memcpy(&buf[3], msg[0].buf, msg[0].len);
} else {
buf[1] = msg[0].addr << 1;
buf[2] = 0x00; /* reg addr len */
buf[3] = 0x00; /* reg addr MSB */
buf[4] = 0x00; /* reg addr LSB */
memcpy(&buf[5], msg[0].buf, msg[0].len);
}
ret = af9035_ctrl_msg(d, &req);
}
} else if (AF9035_IS_I2C_XFER_READ(msg, num)) {
if (msg[0].len > 40) {
/* TODO: correct limits > 40 */
ret = -EOPNOTSUPP;
} else {
/* I2C read */
u8 buf[5];
struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf),
buf, msg[0].len, msg[0].buf };
if (state->chip_type == 0x9306) {
req.cmd = CMD_GENERIC_I2C_RD;
req.wlen = 3;
}
req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[0].len;
if (state->chip_type == 0x9306) {
buf[1] = 0x03; /* I2C bus */
buf[2] = msg[0].addr << 1;
} else {
buf[1] = msg[0].addr << 1;
buf[2] = 0x00; /* reg addr len */
buf[3] = 0x00; /* reg addr MSB */
buf[4] = 0x00; /* reg addr LSB */
}
ret = af9035_ctrl_msg(d, &req);
}
} else {
/*
* We support only three kind of I2C transactions:
* 1) 1 x write + 1 x read (repeated start)
* 2) 1 x write
* 3) 1 x read
*/
ret = -EOPNOTSUPP;
}
mutex_unlock(&d->i2c_mutex);
if (ret < 0)
return ret;
else
return num;
}
static u32 af9035_i2c_functionality(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static struct i2c_algorithm af9035_i2c_algo = {
.master_xfer = af9035_i2c_master_xfer,
.functionality = af9035_i2c_functionality,
};
static int af9035_identify_state(struct dvb_usb_device *d, const char **name)
{
struct state *state = d_to_priv(d);
int ret;
u8 wbuf[1] = { 1 };
u8 rbuf[4];
struct usb_req req = { CMD_FW_QUERYINFO, 0, sizeof(wbuf), wbuf,
sizeof(rbuf), rbuf };
ret = af9035_rd_regs(d, 0x1222, rbuf, 3);
if (ret < 0)
goto err;
state->chip_version = rbuf[0];
state->chip_type = rbuf[2] << 8 | rbuf[1] << 0;
ret = af9035_rd_reg(d, 0x384f, &state->prechip_version);
if (ret < 0)
goto err;
dev_info(&d->udev->dev,
"%s: prechip_version=%02x chip_version=%02x chip_type=%04x\n",
KBUILD_MODNAME, state->prechip_version,
state->chip_version, state->chip_type);
if (state->chip_type == 0x9135) {
if (state->chip_version == 0x02)
*name = AF9035_FIRMWARE_IT9135_V2;
else
*name = AF9035_FIRMWARE_IT9135_V1;
state->eeprom_addr = EEPROM_BASE_IT9135;
} else if (state->chip_type == 0x9306) {
*name = AF9035_FIRMWARE_IT9303;
state->eeprom_addr = EEPROM_BASE_IT9135;
} else {
*name = AF9035_FIRMWARE_AF9035;
state->eeprom_addr = EEPROM_BASE_AF9035;
}
ret = af9035_ctrl_msg(d, &req);
if (ret < 0)
goto err;
dev_dbg(&d->udev->dev, "%s: reply=%*ph\n", __func__, 4, rbuf);
if (rbuf[0] || rbuf[1] || rbuf[2] || rbuf[3])
ret = WARM;
else
ret = COLD;
return ret;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9035_download_firmware_old(struct dvb_usb_device *d,
const struct firmware *fw)
{
int ret, i, j, len;
u8 wbuf[1];
struct usb_req req = { 0, 0, 0, NULL, 0, NULL };
struct usb_req req_fw_dl = { CMD_FW_DL, 0, 0, wbuf, 0, NULL };
u8 hdr_core;
u16 hdr_addr, hdr_data_len, hdr_checksum;
#define MAX_DATA 58
#define HDR_SIZE 7
/*
* Thanks to Daniel Glöckner <daniel-gl@gmx.net> about that info!
*
* byte 0: MCS 51 core
* There are two inside the AF9035 (1=Link and 2=OFDM) with separate
* address spaces
* byte 1-2: Big endian destination address
* byte 3-4: Big endian number of data bytes following the header
* byte 5-6: Big endian header checksum, apparently ignored by the chip
* Calculated as ~(h[0]*256+h[1]+h[2]*256+h[3]+h[4]*256)
*/
for (i = fw->size; i > HDR_SIZE;) {
hdr_core = fw->data[fw->size - i + 0];
hdr_addr = fw->data[fw->size - i + 1] << 8;
hdr_addr |= fw->data[fw->size - i + 2] << 0;
hdr_data_len = fw->data[fw->size - i + 3] << 8;
hdr_data_len |= fw->data[fw->size - i + 4] << 0;
hdr_checksum = fw->data[fw->size - i + 5] << 8;
hdr_checksum |= fw->data[fw->size - i + 6] << 0;
dev_dbg(&d->udev->dev,
"%s: core=%d addr=%04x data_len=%d checksum=%04x\n",
__func__, hdr_core, hdr_addr, hdr_data_len,
hdr_checksum);
if (((hdr_core != 1) && (hdr_core != 2)) ||
(hdr_data_len > i)) {
dev_dbg(&d->udev->dev, "%s: bad firmware\n", __func__);
break;
}
/* download begin packet */
req.cmd = CMD_FW_DL_BEGIN;
ret = af9035_ctrl_msg(d, &req);
if (ret < 0)
goto err;
/* download firmware packet(s) */
for (j = HDR_SIZE + hdr_data_len; j > 0; j -= MAX_DATA) {
len = j;
if (len > MAX_DATA)
len = MAX_DATA;
req_fw_dl.wlen = len;
req_fw_dl.wbuf = (u8 *) &fw->data[fw->size - i +
HDR_SIZE + hdr_data_len - j];
ret = af9035_ctrl_msg(d, &req_fw_dl);
if (ret < 0)
goto err;
}
/* download end packet */
req.cmd = CMD_FW_DL_END;
ret = af9035_ctrl_msg(d, &req);
if (ret < 0)
goto err;
i -= hdr_data_len + HDR_SIZE;
dev_dbg(&d->udev->dev, "%s: data uploaded=%zu\n",
__func__, fw->size - i);
}
/* print warn if firmware is bad, continue and see what happens */
if (i)
dev_warn(&d->udev->dev, "%s: bad firmware\n", KBUILD_MODNAME);
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9035_download_firmware_new(struct dvb_usb_device *d,
const struct firmware *fw)
{
int ret, i, i_prev;
struct usb_req req_fw_dl = { CMD_FW_SCATTER_WR, 0, 0, NULL, 0, NULL };
#define HDR_SIZE 7
/*
* There seems to be following firmware header. Meaning of bytes 0-3
* is unknown.
*
* 0: 3
* 1: 0, 1
* 2: 0
* 3: 1, 2, 3
* 4: addr MSB
* 5: addr LSB
* 6: count of data bytes ?
*/
for (i = HDR_SIZE, i_prev = 0; i <= fw->size; i++) {
if (i == fw->size ||
(fw->data[i + 0] == 0x03 &&
(fw->data[i + 1] == 0x00 ||
fw->data[i + 1] == 0x01) &&
fw->data[i + 2] == 0x00)) {
req_fw_dl.wlen = i - i_prev;
req_fw_dl.wbuf = (u8 *) &fw->data[i_prev];
i_prev = i;
ret = af9035_ctrl_msg(d, &req_fw_dl);
if (ret < 0)
goto err;
dev_dbg(&d->udev->dev, "%s: data uploaded=%d\n",
__func__, i);
}
}
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9035_download_firmware(struct dvb_usb_device *d,
const struct firmware *fw)
{
struct state *state = d_to_priv(d);
int ret;
u8 wbuf[1];
u8 rbuf[4];
u8 tmp;
struct usb_req req = { 0, 0, 0, NULL, 0, NULL };
struct usb_req req_fw_ver = { CMD_FW_QUERYINFO, 0, 1, wbuf, 4, rbuf };
dev_dbg(&d->udev->dev, "%s:\n", __func__);
/*
* In case of dual tuner configuration we need to do some extra
* initialization in order to download firmware to slave demod too,
* which is done by master demod.
* Master feeds also clock and controls power via GPIO.
*/
ret = af9035_rd_reg(d, state->eeprom_addr + EEPROM_TS_MODE, &tmp);
if (ret < 0)
goto err;
if (tmp == 1 || tmp == 3) {
/* configure gpioh1, reset & power slave demod */
ret = af9035_wr_reg_mask(d, 0x00d8b0, 0x01, 0x01);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0x00d8b1, 0x01, 0x01);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0x00d8af, 0x00, 0x01);
if (ret < 0)
goto err;
usleep_range(10000, 50000);
ret = af9035_wr_reg_mask(d, 0x00d8af, 0x01, 0x01);
if (ret < 0)
goto err;
/* tell the slave I2C address */
ret = af9035_rd_reg(d,
state->eeprom_addr + EEPROM_2ND_DEMOD_ADDR,
&tmp);
if (ret < 0)
goto err;
/* use default I2C address if eeprom has no address set */
if (!tmp)
tmp = 0x3a;
if ((state->chip_type == 0x9135) ||
(state->chip_type == 0x9306)) {
ret = af9035_wr_reg(d, 0x004bfb, tmp);
if (ret < 0)
goto err;
} else {
ret = af9035_wr_reg(d, 0x00417f, tmp);
if (ret < 0)
goto err;
/* enable clock out */
ret = af9035_wr_reg_mask(d, 0x00d81a, 0x01, 0x01);
if (ret < 0)
goto err;
}
}
if (fw->data[0] == 0x01)
ret = af9035_download_firmware_old(d, fw);
else
ret = af9035_download_firmware_new(d, fw);
if (ret < 0)
goto err;
/* firmware loaded, request boot */
req.cmd = CMD_FW_BOOT;
ret = af9035_ctrl_msg(d, &req);
if (ret < 0)
goto err;
/* ensure firmware starts */
wbuf[0] = 1;
ret = af9035_ctrl_msg(d, &req_fw_ver);
if (ret < 0)
goto err;
if (!(rbuf[0] || rbuf[1] || rbuf[2] || rbuf[3])) {
dev_err(&d->udev->dev, "%s: firmware did not run\n",
KBUILD_MODNAME);
ret = -ENODEV;
goto err;
}
dev_info(&d->udev->dev, "%s: firmware version=%d.%d.%d.%d",
KBUILD_MODNAME, rbuf[0], rbuf[1], rbuf[2], rbuf[3]);
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9035_read_config(struct dvb_usb_device *d)
{
struct state *state = d_to_priv(d);
int ret, i;
u8 tmp;
u16 tmp16, addr;
/* demod I2C "address" */
state->af9033_i2c_addr[0] = 0x38;
state->af9033_i2c_addr[1] = 0x3a;
state->af9033_config[0].adc_multiplier = AF9033_ADC_MULTIPLIER_2X;
state->af9033_config[1].adc_multiplier = AF9033_ADC_MULTIPLIER_2X;
state->af9033_config[0].ts_mode = AF9033_TS_MODE_USB;
state->af9033_config[1].ts_mode = AF9033_TS_MODE_SERIAL;
if (state->chip_type == 0x9135) {
/* feed clock for integrated RF tuner */
state->af9033_config[0].dyn0_clk = true;
state->af9033_config[1].dyn0_clk = true;
if (state->chip_version == 0x02) {
state->af9033_config[0].tuner = AF9033_TUNER_IT9135_60;
state->af9033_config[1].tuner = AF9033_TUNER_IT9135_60;
tmp16 = 0x00461d; /* eeprom memory mapped location */
} else {
state->af9033_config[0].tuner = AF9033_TUNER_IT9135_38;
state->af9033_config[1].tuner = AF9033_TUNER_IT9135_38;
tmp16 = 0x00461b; /* eeprom memory mapped location */
}
/* check if eeprom exists */
ret = af9035_rd_reg(d, tmp16, &tmp);
if (ret < 0)
goto err;
if (tmp == 0x00) {
dev_dbg(&d->udev->dev, "%s: no eeprom\n", __func__);
goto skip_eeprom;
}
} else if (state->chip_type == 0x9306) {
/*
* IT930x is an USB bridge, only single demod-single tuner
* configurations seen so far.
*/
return 0;
}
/* check if there is dual tuners */
ret = af9035_rd_reg(d, state->eeprom_addr + EEPROM_TS_MODE, &tmp);
if (ret < 0)
goto err;
if (tmp == 1 || tmp == 3)
state->dual_mode = true;
dev_dbg(&d->udev->dev, "%s: ts mode=%d dual mode=%d\n", __func__,
tmp, state->dual_mode);
if (state->dual_mode) {
/* read 2nd demodulator I2C address */
ret = af9035_rd_reg(d,
state->eeprom_addr + EEPROM_2ND_DEMOD_ADDR,
&tmp);
if (ret < 0)
goto err;
if (tmp)
state->af9033_i2c_addr[1] = tmp;
dev_dbg(&d->udev->dev, "%s: 2nd demod I2C addr=%02x\n",
__func__, tmp);
}
addr = state->eeprom_addr;
for (i = 0; i < state->dual_mode + 1; i++) {
/* tuner */
ret = af9035_rd_reg(d, addr + EEPROM_1_TUNER_ID, &tmp);
if (ret < 0)
goto err;
dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
__func__, i, tmp);
/* tuner sanity check */
if (state->chip_type == 0x9135) {
if (state->chip_version == 0x02) {
/* IT9135 BX (v2) */
switch (tmp) {
case AF9033_TUNER_IT9135_60:
case AF9033_TUNER_IT9135_61:
case AF9033_TUNER_IT9135_62:
state->af9033_config[i].tuner = tmp;
break;
}
} else {
/* IT9135 AX (v1) */
switch (tmp) {
case AF9033_TUNER_IT9135_38:
case AF9033_TUNER_IT9135_51:
case AF9033_TUNER_IT9135_52:
state->af9033_config[i].tuner = tmp;
break;
}
}
} else {
/* AF9035 */
state->af9033_config[i].tuner = tmp;
}
if (state->af9033_config[i].tuner != tmp) {
dev_info(&d->udev->dev,
"%s: [%d] overriding tuner from %02x to %02x\n",
KBUILD_MODNAME, i, tmp,
state->af9033_config[i].tuner);
}
switch (state->af9033_config[i].tuner) {
case AF9033_TUNER_TUA9001:
case AF9033_TUNER_FC0011:
case AF9033_TUNER_MXL5007T:
case AF9033_TUNER_TDA18218:
case AF9033_TUNER_FC2580:
case AF9033_TUNER_FC0012:
state->af9033_config[i].spec_inv = 1;
break;
case AF9033_TUNER_IT9135_38:
case AF9033_TUNER_IT9135_51:
case AF9033_TUNER_IT9135_52:
case AF9033_TUNER_IT9135_60:
case AF9033_TUNER_IT9135_61:
case AF9033_TUNER_IT9135_62:
break;
default:
dev_warn(&d->udev->dev,
"%s: tuner id=%02x not supported, please report!",
KBUILD_MODNAME, tmp);
}
/* disable dual mode if driver does not support it */
if (i == 1)
switch (state->af9033_config[i].tuner) {
case AF9033_TUNER_FC0012:
case AF9033_TUNER_IT9135_38:
case AF9033_TUNER_IT9135_51:
case AF9033_TUNER_IT9135_52:
case AF9033_TUNER_IT9135_60:
case AF9033_TUNER_IT9135_61:
case AF9033_TUNER_IT9135_62:
case AF9033_TUNER_MXL5007T:
break;
default:
state->dual_mode = false;
dev_info(&d->udev->dev,
"%s: driver does not support 2nd tuner and will disable it",
KBUILD_MODNAME);
}
/* tuner IF frequency */
ret = af9035_rd_reg(d, addr + EEPROM_1_IF_L, &tmp);
if (ret < 0)
goto err;
tmp16 = tmp;
ret = af9035_rd_reg(d, addr + EEPROM_1_IF_H, &tmp);
if (ret < 0)
goto err;
tmp16 |= tmp << 8;
dev_dbg(&d->udev->dev, "%s: [%d]IF=%d\n", __func__, i, tmp16);
addr += 0x10; /* shift for the 2nd tuner params */
}
skip_eeprom:
/* get demod clock */
ret = af9035_rd_reg(d, 0x00d800, &tmp);
if (ret < 0)
goto err;
tmp = (tmp >> 0) & 0x0f;
for (i = 0; i < ARRAY_SIZE(state->af9033_config); i++) {
if (state->chip_type == 0x9135)
state->af9033_config[i].clock = clock_lut_it9135[tmp];
else
state->af9033_config[i].clock = clock_lut_af9035[tmp];
}
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9035_tua9001_tuner_callback(struct dvb_usb_device *d,
int cmd, int arg)
{
int ret;
u8 val;
dev_dbg(&d->udev->dev, "%s: cmd=%d arg=%d\n", __func__, cmd, arg);
/*
* CEN always enabled by hardware wiring
* RESETN GPIOT3
* RXEN GPIOT2
*/
switch (cmd) {
case TUA9001_CMD_RESETN:
if (arg)
val = 0x00;
else
val = 0x01;
ret = af9035_wr_reg_mask(d, 0x00d8e7, val, 0x01);
if (ret < 0)
goto err;
break;
case TUA9001_CMD_RXEN:
if (arg)
val = 0x01;
else
val = 0x00;
ret = af9035_wr_reg_mask(d, 0x00d8eb, val, 0x01);
if (ret < 0)
goto err;
break;
}
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9035_fc0011_tuner_callback(struct dvb_usb_device *d,
int cmd, int arg)
{
int ret;
switch (cmd) {
case FC0011_FE_CALLBACK_POWER:
/* Tuner enable */
ret = af9035_wr_reg_mask(d, 0xd8eb, 1, 1);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0xd8ec, 1, 1);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0xd8ed, 1, 1);
if (ret < 0)
goto err;
/* LED */
ret = af9035_wr_reg_mask(d, 0xd8d0, 1, 1);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0xd8d1, 1, 1);
if (ret < 0)
goto err;
usleep_range(10000, 50000);
break;
case FC0011_FE_CALLBACK_RESET:
ret = af9035_wr_reg(d, 0xd8e9, 1);
if (ret < 0)
goto err;
ret = af9035_wr_reg(d, 0xd8e8, 1);
if (ret < 0)
goto err;
ret = af9035_wr_reg(d, 0xd8e7, 1);
if (ret < 0)
goto err;
usleep_range(10000, 20000);
ret = af9035_wr_reg(d, 0xd8e7, 0);
if (ret < 0)
goto err;
usleep_range(10000, 20000);
break;
default:
ret = -EINVAL;
goto err;
}
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9035_tuner_callback(struct dvb_usb_device *d, int cmd, int arg)
{
struct state *state = d_to_priv(d);
switch (state->af9033_config[0].tuner) {
case AF9033_TUNER_FC0011:
return af9035_fc0011_tuner_callback(d, cmd, arg);
case AF9033_TUNER_TUA9001:
return af9035_tua9001_tuner_callback(d, cmd, arg);
default:
break;
}
return 0;
}
static int af9035_frontend_callback(void *adapter_priv, int component,
int cmd, int arg)
{
struct i2c_adapter *adap = adapter_priv;
struct dvb_usb_device *d = i2c_get_adapdata(adap);
dev_dbg(&d->udev->dev, "%s: component=%d cmd=%d arg=%d\n",
__func__, component, cmd, arg);
switch (component) {
case DVB_FRONTEND_COMPONENT_TUNER:
return af9035_tuner_callback(d, cmd, arg);
default:
break;
}
return 0;
}
static int af9035_get_adapter_count(struct dvb_usb_device *d)
{
struct state *state = d_to_priv(d);
return state->dual_mode + 1;
}
static int af9035_frontend_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
int ret;
dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
if (!state->af9033_config[adap->id].tuner) {
/* unsupported tuner */
ret = -ENODEV;
goto err;
}
state->af9033_config[adap->id].fe = &adap->fe[0];
state->af9033_config[adap->id].ops = &state->ops;
ret = af9035_add_i2c_dev(d, "af9033", state->af9033_i2c_addr[adap->id],
&state->af9033_config[adap->id], &d->i2c_adap);
if (ret)
goto err;
if (adap->fe[0] == NULL) {
ret = -ENODEV;
goto err;
}
/* disable I2C-gate */
adap->fe[0]->ops.i2c_gate_ctrl = NULL;
adap->fe[0]->callback = af9035_frontend_callback;
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
int ret;
struct si2168_config si2168_config;
struct i2c_adapter *adapter;
dev_dbg(&d->udev->dev, "adap->id=%d\n", adap->id);
memset(&si2168_config, 0, sizeof(si2168_config));
si2168_config.i2c_adapter = &adapter;
si2168_config.fe = &adap->fe[0];
si2168_config.ts_mode = SI2168_TS_SERIAL;
state->af9033_config[adap->id].fe = &adap->fe[0];
state->af9033_config[adap->id].ops = &state->ops;
ret = af9035_add_i2c_dev(d, "si2168", 0x67, &si2168_config,
&d->i2c_adap);
if (ret)
goto err;
if (adap->fe[0] == NULL) {
ret = -ENODEV;
goto err;
}
state->i2c_adapter_demod = adapter;
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9035_frontend_detach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
int demod2;
dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
/*
* For dual tuner devices we have to resolve 2nd demod client, as there
* is two different kind of tuner drivers; one is using I2C binding
* and the other is using DVB attach/detach binding.
*/
switch (state->af9033_config[adap->id].tuner) {
case AF9033_TUNER_IT9135_38:
case AF9033_TUNER_IT9135_51:
case AF9033_TUNER_IT9135_52:
case AF9033_TUNER_IT9135_60:
case AF9033_TUNER_IT9135_61:
case AF9033_TUNER_IT9135_62:
demod2 = 2;
break;
default:
demod2 = 1;
}
if (adap->id == 1) {
if (state->i2c_client[demod2])
af9035_del_i2c_dev(d);
} else if (adap->id == 0) {
if (state->i2c_client[0])
af9035_del_i2c_dev(d);
}
return 0;
}
static struct tua9001_config af9035_tua9001_config = {
.i2c_addr = 0x60,
};
static const struct fc0011_config af9035_fc0011_config = {
.i2c_address = 0x60,
};
static struct mxl5007t_config af9035_mxl5007t_config[] = {
{
.xtal_freq_hz = MxL_XTAL_24_MHZ,
.if_freq_hz = MxL_IF_4_57_MHZ,
.invert_if = 0,
.loop_thru_enable = 0,
.clk_out_enable = 0,
.clk_out_amp = MxL_CLKOUT_AMP_0_94V,
}, {
.xtal_freq_hz = MxL_XTAL_24_MHZ,
.if_freq_hz = MxL_IF_4_57_MHZ,
.invert_if = 0,
.loop_thru_enable = 1,
.clk_out_enable = 1,
.clk_out_amp = MxL_CLKOUT_AMP_0_94V,
}
};
static struct tda18218_config af9035_tda18218_config = {
.i2c_address = 0x60,
.i2c_wr_max = 21,
};
static const struct fc2580_config af9035_fc2580_config = {
.i2c_addr = 0x56,
.clock = 16384000,
};
static const struct fc0012_config af9035_fc0012_config[] = {
{
.i2c_address = 0x63,
.xtal_freq = FC_XTAL_36_MHZ,
.dual_master = true,
.loop_through = true,
.clock_out = true,
}, {
.i2c_address = 0x63 | 0x80, /* I2C bus select hack */
.xtal_freq = FC_XTAL_36_MHZ,
.dual_master = true,
}
};
static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
int ret;
struct dvb_frontend *fe;
struct i2c_msg msg[1];
u8 tuner_addr;
dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
/*
* XXX: Hack used in that function: we abuse unused I2C address bit [7]
* to carry info about used I2C bus for dual tuner configuration.
*/
switch (state->af9033_config[adap->id].tuner) {
case AF9033_TUNER_TUA9001:
/* AF9035 gpiot3 = TUA9001 RESETN
AF9035 gpiot2 = TUA9001 RXEN */
/* configure gpiot2 and gpiot2 as output */
ret = af9035_wr_reg_mask(d, 0x00d8ec, 0x01, 0x01);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0x00d8ed, 0x01, 0x01);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0x00d8e8, 0x01, 0x01);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0x00d8e9, 0x01, 0x01);
if (ret < 0)
goto err;
/* attach tuner */
fe = dvb_attach(tua9001_attach, adap->fe[0],
&d->i2c_adap, &af9035_tua9001_config);
break;
case AF9033_TUNER_FC0011:
fe = dvb_attach(fc0011_attach, adap->fe[0],
&d->i2c_adap, &af9035_fc0011_config);
break;
case AF9033_TUNER_MXL5007T:
if (adap->id == 0) {
ret = af9035_wr_reg(d, 0x00d8e0, 1);
if (ret < 0)
goto err;
ret = af9035_wr_reg(d, 0x00d8e1, 1);
if (ret < 0)
goto err;
ret = af9035_wr_reg(d, 0x00d8df, 0);
if (ret < 0)
goto err;
msleep(30);
ret = af9035_wr_reg(d, 0x00d8df, 1);
if (ret < 0)
goto err;
msleep(300);
ret = af9035_wr_reg(d, 0x00d8c0, 1);
if (ret < 0)
goto err;
ret = af9035_wr_reg(d, 0x00d8c1, 1);
if (ret < 0)
goto err;
ret = af9035_wr_reg(d, 0x00d8bf, 0);
if (ret < 0)
goto err;
ret = af9035_wr_reg(d, 0x00d8b4, 1);
if (ret < 0)
goto err;
ret = af9035_wr_reg(d, 0x00d8b5, 1);
if (ret < 0)
goto err;
ret = af9035_wr_reg(d, 0x00d8b3, 1);
if (ret < 0)
goto err;
tuner_addr = 0x60;
} else {
tuner_addr = 0x60 | 0x80; /* I2C bus hack */
}
/* attach tuner */
fe = dvb_attach(mxl5007t_attach, adap->fe[0], &d->i2c_adap,
tuner_addr, &af9035_mxl5007t_config[adap->id]);
break;
case AF9033_TUNER_TDA18218:
/* attach tuner */
fe = dvb_attach(tda18218_attach, adap->fe[0],
&d->i2c_adap, &af9035_tda18218_config);
break;
case AF9033_TUNER_FC2580:
/* Tuner enable using gpiot2_o, gpiot2_en and gpiot2_on */
ret = af9035_wr_reg_mask(d, 0xd8eb, 0x01, 0x01);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0xd8ec, 0x01, 0x01);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0xd8ed, 0x01, 0x01);
if (ret < 0)
goto err;
usleep_range(10000, 50000);
/* attach tuner */
fe = dvb_attach(fc2580_attach, adap->fe[0],
&d->i2c_adap, &af9035_fc2580_config);
break;
case AF9033_TUNER_FC0012:
/*
* AF9035 gpiot2 = FC0012 enable
* XXX: there seems to be something on gpioh8 too, but on my
* my test I didn't find any difference.
*/
if (adap->id == 0) {
/* configure gpiot2 as output and high */
ret = af9035_wr_reg_mask(d, 0xd8eb, 0x01, 0x01);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0xd8ec, 0x01, 0x01);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0xd8ed, 0x01, 0x01);
if (ret < 0)
goto err;
} else {
/*
* FIXME: That belongs for the FC0012 driver.
* Write 02 to FC0012 master tuner register 0d directly
* in order to make slave tuner working.
*/
msg[0].addr = 0x63;
msg[0].flags = 0;
msg[0].len = 2;
msg[0].buf = "\x0d\x02";
ret = i2c_transfer(&d->i2c_adap, msg, 1);
if (ret < 0)
goto err;
}
usleep_range(10000, 50000);
fe = dvb_attach(fc0012_attach, adap->fe[0], &d->i2c_adap,
&af9035_fc0012_config[adap->id]);
break;
case AF9033_TUNER_IT9135_38:
case AF9033_TUNER_IT9135_51:
case AF9033_TUNER_IT9135_52:
{
struct it913x_config it913x_config = {
.fe = adap->fe[0],
.chip_ver = 1,
};
if (state->dual_mode) {
if (adap->id == 0)
it913x_config.role = IT913X_ROLE_DUAL_MASTER;
else
it913x_config.role = IT913X_ROLE_DUAL_SLAVE;
}
ret = af9035_add_i2c_dev(d, "it913x",
state->af9033_i2c_addr[adap->id] >> 1,
&it913x_config, &d->i2c_adap);
if (ret)
goto err;
fe = adap->fe[0];
break;
}
case AF9033_TUNER_IT9135_60:
case AF9033_TUNER_IT9135_61:
case AF9033_TUNER_IT9135_62:
{
struct it913x_config it913x_config = {
.fe = adap->fe[0],
.chip_ver = 2,
};
if (state->dual_mode) {
if (adap->id == 0)
it913x_config.role = IT913X_ROLE_DUAL_MASTER;
else
it913x_config.role = IT913X_ROLE_DUAL_SLAVE;
}
ret = af9035_add_i2c_dev(d, "it913x",
state->af9033_i2c_addr[adap->id] >> 1,
&it913x_config, &d->i2c_adap);
if (ret)
goto err;
fe = adap->fe[0];
break;
}
default:
fe = NULL;
}
if (fe == NULL) {
ret = -ENODEV;
goto err;
}
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int it930x_tuner_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
int ret;
struct si2157_config si2157_config;
dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
/* I2C master bus 2 clock speed 300k */
ret = af9035_wr_reg(d, 0x00f6a7, 0x07);
if (ret < 0)
goto err;
/* I2C master bus 1,3 clock speed 300k */
ret = af9035_wr_reg(d, 0x00f103, 0x07);
if (ret < 0)
goto err;
/* set gpio11 low */
ret = af9035_wr_reg_mask(d, 0xd8d4, 0x01, 0x01);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0xd8d5, 0x01, 0x01);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0xd8d3, 0x01, 0x01);
if (ret < 0)
goto err;
/* Tuner enable using gpiot2_en, gpiot2_on and gpiot2_o (reset) */
ret = af9035_wr_reg_mask(d, 0xd8b8, 0x01, 0x01);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0xd8b9, 0x01, 0x01);
if (ret < 0)
goto err;
ret = af9035_wr_reg_mask(d, 0xd8b7, 0x00, 0x01);
if (ret < 0)
goto err;
msleep(200);
ret = af9035_wr_reg_mask(d, 0xd8b7, 0x01, 0x01);
if (ret < 0)
goto err;
memset(&si2157_config, 0, sizeof(si2157_config));
si2157_config.fe = adap->fe[0];
ret = af9035_add_i2c_dev(d, "si2157", 0x63,
&si2157_config, state->i2c_adapter_demod);
if (ret)
goto err;
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int it930x_tuner_detach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
dev_dbg(&d->udev->dev, "adap->id=%d\n", adap->id);
if (adap->id == 1) {
if (state->i2c_client[3])
af9035_del_i2c_dev(d);
} else if (adap->id == 0) {
if (state->i2c_client[1])
af9035_del_i2c_dev(d);
}
return 0;
}
static int af9035_tuner_detach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
switch (state->af9033_config[adap->id].tuner) {
case AF9033_TUNER_IT9135_38:
case AF9033_TUNER_IT9135_51:
case AF9033_TUNER_IT9135_52:
case AF9033_TUNER_IT9135_60:
case AF9033_TUNER_IT9135_61:
case AF9033_TUNER_IT9135_62:
if (adap->id == 1) {
if (state->i2c_client[3])
af9035_del_i2c_dev(d);
} else if (adap->id == 0) {
if (state->i2c_client[1])
af9035_del_i2c_dev(d);
}
}
return 0;
}
static int af9035_init(struct dvb_usb_device *d)
{
struct state *state = d_to_priv(d);
int ret, i;
u16 frame_size = (d->udev->speed == USB_SPEED_FULL ? 5 : 87) * 188 / 4;
u8 packet_size = (d->udev->speed == USB_SPEED_FULL ? 64 : 512) / 4;
struct reg_val_mask tab[] = {
{ 0x80f99d, 0x01, 0x01 },
{ 0x80f9a4, 0x01, 0x01 },
{ 0x00dd11, 0x00, 0x20 },
{ 0x00dd11, 0x00, 0x40 },
{ 0x00dd13, 0x00, 0x20 },
{ 0x00dd13, 0x00, 0x40 },
{ 0x00dd11, 0x20, 0x20 },
{ 0x00dd88, (frame_size >> 0) & 0xff, 0xff},
{ 0x00dd89, (frame_size >> 8) & 0xff, 0xff},
{ 0x00dd0c, packet_size, 0xff},
{ 0x00dd11, state->dual_mode << 6, 0x40 },
{ 0x00dd8a, (frame_size >> 0) & 0xff, 0xff},
{ 0x00dd8b, (frame_size >> 8) & 0xff, 0xff},
{ 0x00dd0d, packet_size, 0xff },
{ 0x80f9a3, state->dual_mode, 0x01 },
{ 0x80f9cd, state->dual_mode, 0x01 },
{ 0x80f99d, 0x00, 0x01 },
{ 0x80f9a4, 0x00, 0x01 },
};
dev_dbg(&d->udev->dev,
"%s: USB speed=%d frame_size=%04x packet_size=%02x\n",
__func__, d->udev->speed, frame_size, packet_size);
/* init endpoints */
for (i = 0; i < ARRAY_SIZE(tab); i++) {
ret = af9035_wr_reg_mask(d, tab[i].reg, tab[i].val,
tab[i].mask);
if (ret < 0)
goto err;
}
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int it930x_init(struct dvb_usb_device *d)
{
struct state *state = d_to_priv(d);
int ret, i;
u16 frame_size = (d->udev->speed == USB_SPEED_FULL ? 5 : 816) * 188 / 4;
u8 packet_size = (d->udev->speed == USB_SPEED_FULL ? 64 : 512) / 4;
struct reg_val_mask tab[] = {
{ 0x00da1a, 0x00, 0x01 }, /* ignore_sync_byte */
{ 0x00f41f, 0x04, 0x04 }, /* dvbt_inten */
{ 0x00da10, 0x00, 0x01 }, /* mpeg_full_speed */
{ 0x00f41a, 0x01, 0x01 }, /* dvbt_en */
{ 0x00da1d, 0x01, 0x01 }, /* mp2_sw_rst, reset EP4 */
{ 0x00dd11, 0x00, 0x20 }, /* ep4_tx_en, disable EP4 */
{ 0x00dd13, 0x00, 0x20 }, /* ep4_tx_nak, disable EP4 NAK */
{ 0x00dd11, 0x20, 0x20 }, /* ep4_tx_en, enable EP4 */
{ 0x00dd11, 0x00, 0x40 }, /* ep5_tx_en, disable EP5 */
{ 0x00dd13, 0x00, 0x40 }, /* ep5_tx_nak, disable EP5 NAK */
{ 0x00dd11, state->dual_mode << 6, 0x40 }, /* enable EP5 */
{ 0x00dd88, (frame_size >> 0) & 0xff, 0xff},
{ 0x00dd89, (frame_size >> 8) & 0xff, 0xff},
{ 0x00dd0c, packet_size, 0xff},
{ 0x00dd8a, (frame_size >> 0) & 0xff, 0xff},
{ 0x00dd8b, (frame_size >> 8) & 0xff, 0xff},
{ 0x00dd0d, packet_size, 0xff },
{ 0x00da1d, 0x00, 0x01 }, /* mp2_sw_rst, disable */
{ 0x00d833, 0x01, 0xff }, /* slew rate ctrl: slew rate boosts */
{ 0x00d830, 0x00, 0xff }, /* Bit 0 of output driving control */
{ 0x00d831, 0x01, 0xff }, /* Bit 1 of output driving control */
{ 0x00d832, 0x00, 0xff }, /* Bit 2 of output driving control */
/* suspend gpio1 for TS-C */
{ 0x00d8b0, 0x01, 0xff }, /* gpio1 */
{ 0x00d8b1, 0x01, 0xff }, /* gpio1 */
{ 0x00d8af, 0x00, 0xff }, /* gpio1 */
/* suspend gpio7 for TS-D */
{ 0x00d8c4, 0x01, 0xff }, /* gpio7 */
{ 0x00d8c5, 0x01, 0xff }, /* gpio7 */
{ 0x00d8c3, 0x00, 0xff }, /* gpio7 */
/* suspend gpio13 for TS-B */
{ 0x00d8dc, 0x01, 0xff }, /* gpio13 */
{ 0x00d8dd, 0x01, 0xff }, /* gpio13 */
{ 0x00d8db, 0x00, 0xff }, /* gpio13 */
/* suspend gpio14 for TS-E */
{ 0x00d8e4, 0x01, 0xff }, /* gpio14 */
{ 0x00d8e5, 0x01, 0xff }, /* gpio14 */
{ 0x00d8e3, 0x00, 0xff }, /* gpio14 */
/* suspend gpio15 for TS-A */
{ 0x00d8e8, 0x01, 0xff }, /* gpio15 */
{ 0x00d8e9, 0x01, 0xff }, /* gpio15 */
{ 0x00d8e7, 0x00, 0xff }, /* gpio15 */
{ 0x00da58, 0x00, 0x01 }, /* ts_in_src, serial */
{ 0x00da73, 0x01, 0xff }, /* ts0_aggre_mode */
{ 0x00da78, 0x47, 0xff }, /* ts0_sync_byte */
{ 0x00da4c, 0x01, 0xff }, /* ts0_en */
{ 0x00da5a, 0x1f, 0xff }, /* ts_fail_ignore */
};
dev_dbg(&d->udev->dev,
"%s: USB speed=%d frame_size=%04x packet_size=%02x\n",
__func__, d->udev->speed, frame_size, packet_size);
/* init endpoints */
for (i = 0; i < ARRAY_SIZE(tab); i++) {
ret = af9035_wr_reg_mask(d, tab[i].reg,
tab[i].val, tab[i].mask);
if (ret < 0)
goto err;
}
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
#if IS_ENABLED(CONFIG_RC_CORE)
static int af9035_rc_query(struct dvb_usb_device *d)
{
int ret;
u32 key;
u8 buf[4];
struct usb_req req = { CMD_IR_GET, 0, 0, NULL, 4, buf };
ret = af9035_ctrl_msg(d, &req);
if (ret == 1)
return 0;
else if (ret < 0)
goto err;
if ((buf[2] + buf[3]) == 0xff) {
if ((buf[0] + buf[1]) == 0xff) {
/* NEC standard 16bit */
key = RC_SCANCODE_NEC(buf[0], buf[2]);
} else {
/* NEC extended 24bit */
key = RC_SCANCODE_NECX(buf[0] << 8 | buf[1], buf[2]);
}
} else {
/* NEC full code 32bit */
key = RC_SCANCODE_NEC32(buf[0] << 24 | buf[1] << 16 |
buf[2] << 8 | buf[3]);
}
dev_dbg(&d->udev->dev, "%s: %*ph\n", __func__, 4, buf);
rc_keydown(d->rc_dev, RC_TYPE_NEC, key, 0);
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9035_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
{
struct state *state = d_to_priv(d);
int ret;
u8 tmp;
ret = af9035_rd_reg(d, state->eeprom_addr + EEPROM_IR_MODE, &tmp);
if (ret < 0)
goto err;
dev_dbg(&d->udev->dev, "%s: ir_mode=%02x\n", __func__, tmp);
/* don't activate rc if in HID mode or if not available */
if (tmp == 5) {
ret = af9035_rd_reg(d, state->eeprom_addr + EEPROM_IR_TYPE,
&tmp);
if (ret < 0)
goto err;
dev_dbg(&d->udev->dev, "%s: ir_type=%02x\n", __func__, tmp);
switch (tmp) {
case 0: /* NEC */
default:
rc->allowed_protos = RC_BIT_NEC;
break;
case 1: /* RC6 */
rc->allowed_protos = RC_BIT_RC6_MCE;
break;
}
rc->query = af9035_rc_query;
rc->interval = 500;
/* load empty to enable rc */
if (!rc->map_name)
rc->map_name = RC_MAP_EMPTY;
}
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
#else
#define af9035_get_rc_config NULL
#endif
static int af9035_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
struct usb_data_stream_properties *stream)
{
struct dvb_usb_device *d = fe_to_d(fe);
dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, fe_to_adap(fe)->id);
if (d->udev->speed == USB_SPEED_FULL)
stream->u.bulk.buffersize = 5 * 188;
return 0;
}
static int af9035_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
struct state *state = adap_to_priv(adap);
return state->ops.pid_filter_ctrl(adap->fe[0], onoff);
}
static int af9035_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid,
int onoff)
{
struct state *state = adap_to_priv(adap);
return state->ops.pid_filter(adap->fe[0], index, pid, onoff);
}
static int af9035_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
char manufacturer[sizeof("Afatech")];
memset(manufacturer, 0, sizeof(manufacturer));
usb_string(udev, udev->descriptor.iManufacturer,
manufacturer, sizeof(manufacturer));
/*
* There is two devices having same ID but different chipset. One uses
* AF9015 and the other IT9135 chipset. Only difference seen on lsusb
* is iManufacturer string.
*
* idVendor 0x0ccd TerraTec Electronic GmbH
* idProduct 0x0099
* bcdDevice 2.00
* iManufacturer 1 Afatech
* iProduct 2 DVB-T 2
*
* idVendor 0x0ccd TerraTec Electronic GmbH
* idProduct 0x0099
* bcdDevice 2.00
* iManufacturer 1 ITE Technologies, Inc.
* iProduct 2 DVB-T TV Stick
*/
if ((le16_to_cpu(udev->descriptor.idVendor) == USB_VID_TERRATEC) &&
(le16_to_cpu(udev->descriptor.idProduct) == 0x0099)) {
if (!strcmp("Afatech", manufacturer)) {
dev_dbg(&udev->dev, "%s: rejecting device\n", __func__);
return -ENODEV;
}
}
return dvb_usbv2_probe(intf, id);
}
/* interface 0 is used by DVB-T receiver and
interface 1 is for remote controller (HID) */
static const struct dvb_usb_device_properties af9035_props = {
.driver_name = KBUILD_MODNAME,
.owner = THIS_MODULE,
.adapter_nr = adapter_nr,
.size_of_priv = sizeof(struct state),
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
.identify_state = af9035_identify_state,
.download_firmware = af9035_download_firmware,
.i2c_algo = &af9035_i2c_algo,
.read_config = af9035_read_config,
.frontend_attach = af9035_frontend_attach,
.frontend_detach = af9035_frontend_detach,
.tuner_attach = af9035_tuner_attach,
.tuner_detach = af9035_tuner_detach,
.init = af9035_init,
.get_rc_config = af9035_get_rc_config,
.get_stream_config = af9035_get_stream_config,
.get_adapter_count = af9035_get_adapter_count,
.adapter = {
{
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter_ctrl = af9035_pid_filter_ctrl,
.pid_filter = af9035_pid_filter,
.stream = DVB_USB_STREAM_BULK(0x84, 6, 87 * 188),
}, {
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter_ctrl = af9035_pid_filter_ctrl,
.pid_filter = af9035_pid_filter,
.stream = DVB_USB_STREAM_BULK(0x85, 6, 87 * 188),
},
},
};
static const struct dvb_usb_device_properties it930x_props = {
.driver_name = KBUILD_MODNAME,
.owner = THIS_MODULE,
.adapter_nr = adapter_nr,
.size_of_priv = sizeof(struct state),
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
.identify_state = af9035_identify_state,
.download_firmware = af9035_download_firmware,
.i2c_algo = &af9035_i2c_algo,
.read_config = af9035_read_config,
.frontend_attach = it930x_frontend_attach,
.frontend_detach = af9035_frontend_detach,
.tuner_attach = it930x_tuner_attach,
.tuner_detach = it930x_tuner_detach,
.init = it930x_init,
.get_stream_config = af9035_get_stream_config,
.get_adapter_count = af9035_get_adapter_count,
.adapter = {
{
.stream = DVB_USB_STREAM_BULK(0x84, 4, 816 * 188),
}, {
.stream = DVB_USB_STREAM_BULK(0x85, 4, 816 * 188),
},
},
};
static const struct usb_device_id af9035_id_table[] = {
/* AF9035 devices */
{ DVB_USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_9035,
&af9035_props, "Afatech AF9035 reference design", NULL) },
{ DVB_USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1000,
&af9035_props, "Afatech AF9035 reference design", NULL) },
{ DVB_USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1001,
&af9035_props, "Afatech AF9035 reference design", NULL) },
{ DVB_USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1002,
&af9035_props, "Afatech AF9035 reference design", NULL) },
{ DVB_USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1003,
&af9035_props, "Afatech AF9035 reference design", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_STICK,
&af9035_props, "TerraTec Cinergy T Stick", NULL) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835,
&af9035_props, "AVerMedia AVerTV Volar HD/PRO (A835)", NULL) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_B835,
&af9035_props, "AVerMedia AVerTV Volar HD/PRO (A835)", NULL) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_1867,
&af9035_props, "AVerMedia HD Volar (A867)", NULL) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A867,
&af9035_props, "AVerMedia HD Volar (A867)", NULL) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_TWINSTAR,
&af9035_props, "AVerMedia Twinstar (A825)", NULL) },
{ DVB_USB_DEVICE(USB_VID_ASUS, USB_PID_ASUS_U3100MINI_PLUS,
&af9035_props, "Asus U3100Mini Plus", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x00aa,
&af9035_props, "TerraTec Cinergy T Stick (rev. 2)", NULL) },
/* IT9135 devices */
{ DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135,
&af9035_props, "ITE 9135 Generic", RC_MAP_IT913X_V1) },
{ DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135_9005,
&af9035_props, "ITE 9135(9005) Generic", RC_MAP_IT913X_V2) },
{ DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135_9006,
&af9035_props, "ITE 9135(9006) Generic", RC_MAP_IT913X_V1) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_1835,
&af9035_props, "Avermedia A835B(1835)", RC_MAP_IT913X_V2) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_2835,
&af9035_props, "Avermedia A835B(2835)", RC_MAP_IT913X_V2) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_3835,
&af9035_props, "Avermedia A835B(3835)", RC_MAP_IT913X_V2) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_4835,
&af9035_props, "Avermedia A835B(4835)", RC_MAP_IT913X_V2) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_H335,
&af9035_props, "Avermedia H335", RC_MAP_IT913X_V2) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB499_2T_T09,
&af9035_props, "Kworld UB499-2T T09", RC_MAP_IT913X_V1) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV22_IT9137,
&af9035_props, "Sveon STV22 Dual DVB-T HDTV",
RC_MAP_IT913X_V1) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CTVDIGDUAL_V2,
&af9035_props, "Digital Dual TV Receiver CTVDIGDUAL_V2",
RC_MAP_IT913X_V1) },
/* IT930x devices */
{ DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9303,
&it930x_props, "ITE 9303 Generic", NULL) },
/* XXX: that same ID [0ccd:0099] is used by af9015 driver too */
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099,
&af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)",
NULL) },
{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
&af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
{ DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
&af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
{ DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_78E,
&af9035_props, "PCTV AndroiDTV (78e)", RC_MAP_IT913X_V1) },
{ DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_79E,
&af9035_props, "PCTV microStick (79e)", RC_MAP_IT913X_V2) },
{ }
};
MODULE_DEVICE_TABLE(usb, af9035_id_table);
static struct usb_driver af9035_usb_driver = {
.name = KBUILD_MODNAME,
.id_table = af9035_id_table,
.probe = af9035_probe,
.disconnect = dvb_usbv2_disconnect,
.suspend = dvb_usbv2_suspend,
.resume = dvb_usbv2_resume,
.reset_resume = dvb_usbv2_reset_resume,
.no_dynamic_id = 1,
.soft_unbind = 1,
};
module_usb_driver(af9035_usb_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Afatech AF9035 driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(AF9035_FIRMWARE_AF9035);
MODULE_FIRMWARE(AF9035_FIRMWARE_IT9135_V1);
MODULE_FIRMWARE(AF9035_FIRMWARE_IT9135_V2);
MODULE_FIRMWARE(AF9035_FIRMWARE_IT9303);
| gpl-2.0 |
iBluemind/android_kernel_lge_su760 | drivers/staging/comedi/drivers/amplc_pc236.c | 944 | 18478 | /*
comedi/drivers/amplc_pc236.c
Driver for Amplicon PC36AT and PCI236 DIO boards.
Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 2000 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: amplc_pc236
Description: Amplicon PC36AT, PCI236
Author: Ian Abbott <abbotti@mev.co.uk>
Devices: [Amplicon] PC36AT (pc36at), PCI236 (pci236 or amplc_pc236)
Updated: Wed, 01 Apr 2009 15:41:25 +0100
Status: works
Configuration options - PC36AT:
[0] - I/O port base address
[1] - IRQ (optional)
Configuration options - PCI236:
[0] - PCI bus of device (optional)
[1] - PCI slot of device (optional)
If bus/slot is not specified, the first available PCI device will be
used.
The PC36AT ISA board and PCI236 PCI board have a single 8255 appearing
as subdevice 0.
Subdevice 1 pretends to be a digital input device, but it always returns
0 when read. However, if you run a command with scan_begin_src=TRIG_EXT,
a rising edge on port C bit 3 acts as an external trigger, which can be
used to wake up tasks. This is like the comedi_parport device, but the
only way to physically disable the interrupt on the PC36AT is to remove
the IRQ jumper. If no interrupt is connected, then subdevice 1 is
unused.
*/
#include <linux/interrupt.h>
#include "../comedidev.h"
#include "comedi_pci.h"
#include "8255.h"
#include "plx9052.h"
#define PC236_DRIVER_NAME "amplc_pc236"
/* PCI236 PCI configuration register information */
#define PCI_VENDOR_ID_AMPLICON 0x14dc
#define PCI_DEVICE_ID_AMPLICON_PCI236 0x0009
#define PCI_DEVICE_ID_INVALID 0xffff
/* PC36AT / PCI236 registers */
#define PC236_IO_SIZE 4
#define PC236_LCR_IO_SIZE 128
/*
* INTCSR values for PCI236.
*/
/* Disable interrupt, also clear any interrupt there */
#define PCI236_INTR_DISABLE (PLX9052_INTCSR_LI1ENAB_DISABLED \
| PLX9052_INTCSR_LI1POL_HIGH \
| PLX9052_INTCSR_LI2POL_HIGH \
| PLX9052_INTCSR_PCIENAB_DISABLED \
| PLX9052_INTCSR_LI1SEL_EDGE \
| PLX9052_INTCSR_LI1CLRINT_ASSERTED)
/* Enable interrupt, also clear any interrupt there. */
#define PCI236_INTR_ENABLE (PLX9052_INTCSR_LI1ENAB_ENABLED \
| PLX9052_INTCSR_LI1POL_HIGH \
| PLX9052_INTCSR_LI2POL_HIGH \
| PLX9052_INTCSR_PCIENAB_ENABLED \
| PLX9052_INTCSR_LI1SEL_EDGE \
| PLX9052_INTCSR_LI1CLRINT_ASSERTED)
/*
* Board descriptions for Amplicon PC36AT and PCI236.
*/
enum pc236_bustype { isa_bustype, pci_bustype };
enum pc236_model { pc36at_model, pci236_model, anypci_model };
struct pc236_board {
const char *name;
const char *fancy_name;
unsigned short devid;
enum pc236_bustype bustype;
enum pc236_model model;
};
static const struct pc236_board pc236_boards[] = {
{
.name = "pc36at",
.fancy_name = "PC36AT",
.bustype = isa_bustype,
.model = pc36at_model,
},
#ifdef CONFIG_COMEDI_PCI
{
.name = "pci236",
.fancy_name = "PCI236",
.devid = PCI_DEVICE_ID_AMPLICON_PCI236,
.bustype = pci_bustype,
.model = pci236_model,
},
#endif
#ifdef CONFIG_COMEDI_PCI
{
.name = PC236_DRIVER_NAME,
.fancy_name = PC236_DRIVER_NAME,
.devid = PCI_DEVICE_ID_INVALID,
.bustype = pci_bustype,
.model = anypci_model, /* wildcard */
},
#endif
};
#ifdef CONFIG_COMEDI_PCI
static DEFINE_PCI_DEVICE_TABLE(pc236_pci_table) = {
{
PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI236,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
0}
};
MODULE_DEVICE_TABLE(pci, pc236_pci_table);
#endif /* CONFIG_COMEDI_PCI */
/*
* Useful for shorthand access to the particular board structure
*/
#define thisboard ((const struct pc236_board *)dev->board_ptr)
/* this structure is for data unique to this hardware driver. If
several hardware drivers keep similar information in this structure,
feel free to suggest moving the variable to the struct comedi_device struct.
*/
struct pc236_private {
#ifdef CONFIG_COMEDI_PCI
/* PCI device */
struct pci_dev *pci_dev;
unsigned long lcr_iobase; /* PLX PCI9052 config registers in PCIBAR1 */
#endif
int enable_irq;
};
#define devpriv ((struct pc236_private *)dev->private)
/*
* The struct comedi_driver structure tells the Comedi core module
* which functions to call to configure/deconfigure (attach/detach)
* the board, and also about the kernel module that contains
* the device code.
*/
static int pc236_attach(struct comedi_device *dev, struct comedi_devconfig *it);
static int pc236_detach(struct comedi_device *dev);
static struct comedi_driver driver_amplc_pc236 = {
.driver_name = PC236_DRIVER_NAME,
.module = THIS_MODULE,
.attach = pc236_attach,
.detach = pc236_detach,
.board_name = &pc236_boards[0].name,
.offset = sizeof(struct pc236_board),
.num_names = ARRAY_SIZE(pc236_boards),
};
#ifdef CONFIG_COMEDI_PCI
static int __devinit driver_amplc_pc236_pci_probe(struct pci_dev *dev,
const struct pci_device_id
*ent)
{
return comedi_pci_auto_config(dev, driver_amplc_pc236.driver_name);
}
static void __devexit driver_amplc_pc236_pci_remove(struct pci_dev *dev)
{
comedi_pci_auto_unconfig(dev);
}
static struct pci_driver driver_amplc_pc236_pci_driver = {
.id_table = pc236_pci_table,
.probe = &driver_amplc_pc236_pci_probe,
.remove = __devexit_p(&driver_amplc_pc236_pci_remove)
};
static int __init driver_amplc_pc236_init_module(void)
{
int retval;
retval = comedi_driver_register(&driver_amplc_pc236);
if (retval < 0)
return retval;
driver_amplc_pc236_pci_driver.name =
(char *)driver_amplc_pc236.driver_name;
return pci_register_driver(&driver_amplc_pc236_pci_driver);
}
static void __exit driver_amplc_pc236_cleanup_module(void)
{
pci_unregister_driver(&driver_amplc_pc236_pci_driver);
comedi_driver_unregister(&driver_amplc_pc236);
}
module_init(driver_amplc_pc236_init_module);
module_exit(driver_amplc_pc236_cleanup_module);
#else
static int __init driver_amplc_pc236_init_module(void)
{
return comedi_driver_register(&driver_amplc_pc236);
}
static void __exit driver_amplc_pc236_cleanup_module(void)
{
comedi_driver_unregister(&driver_amplc_pc236);
}
module_init(driver_amplc_pc236_init_module);
module_exit(driver_amplc_pc236_cleanup_module);
#endif
static int pc236_request_region(unsigned minor, unsigned long from,
unsigned long extent);
static void pc236_intr_disable(struct comedi_device *dev);
static void pc236_intr_enable(struct comedi_device *dev);
static int pc236_intr_check(struct comedi_device *dev);
static int pc236_intr_insn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data);
static int pc236_intr_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd);
static int pc236_intr_cmd(struct comedi_device *dev,
struct comedi_subdevice *s);
static int pc236_intr_cancel(struct comedi_device *dev,
struct comedi_subdevice *s);
static irqreturn_t pc236_interrupt(int irq, void *d);
/*
* This function looks for a PCI device matching the requested board name,
* bus and slot.
*/
#ifdef CONFIG_COMEDI_PCI
static int
pc236_find_pci(struct comedi_device *dev, int bus, int slot,
struct pci_dev **pci_dev_p)
{
struct pci_dev *pci_dev = NULL;
*pci_dev_p = NULL;
/* Look for matching PCI device. */
for (pci_dev = pci_get_device(PCI_VENDOR_ID_AMPLICON, PCI_ANY_ID, NULL);
pci_dev != NULL;
pci_dev = pci_get_device(PCI_VENDOR_ID_AMPLICON,
PCI_ANY_ID, pci_dev)) {
/* If bus/slot specified, check them. */
if (bus || slot) {
if (bus != pci_dev->bus->number
|| slot != PCI_SLOT(pci_dev->devfn))
continue;
}
if (thisboard->model == anypci_model) {
/* Match any supported model. */
int i;
for (i = 0; i < ARRAY_SIZE(pc236_boards); i++) {
if (pc236_boards[i].bustype != pci_bustype)
continue;
if (pci_dev->device == pc236_boards[i].devid) {
/* Change board_ptr to matched board. */
dev->board_ptr = &pc236_boards[i];
break;
}
}
if (i == ARRAY_SIZE(pc236_boards))
continue;
} else {
/* Match specific model name. */
if (pci_dev->device != thisboard->devid)
continue;
}
/* Found a match. */
*pci_dev_p = pci_dev;
return 0;
}
/* No match found. */
if (bus || slot) {
printk(KERN_ERR
"comedi%d: error! no %s found at pci %02x:%02x!\n",
dev->minor, thisboard->name, bus, slot);
} else {
printk(KERN_ERR "comedi%d: error! no %s found!\n",
dev->minor, thisboard->name);
}
return -EIO;
}
#endif
/*
* Attach is called by the Comedi core to configure the driver
* for a particular board. If you specified a board_name array
* in the driver structure, dev->board_ptr contains that
* address.
*/
static int pc236_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
unsigned long iobase = 0;
unsigned int irq = 0;
#ifdef CONFIG_COMEDI_PCI
struct pci_dev *pci_dev = NULL;
int bus = 0, slot = 0;
#endif
int share_irq = 0;
int ret;
printk(KERN_DEBUG "comedi%d: %s: attach\n", dev->minor,
PC236_DRIVER_NAME);
/*
* Allocate the private structure area. alloc_private() is a
* convenient macro defined in comedidev.h.
*/
ret = alloc_private(dev, sizeof(struct pc236_private));
if (ret < 0) {
printk(KERN_ERR "comedi%d: error! out of memory!\n",
dev->minor);
return ret;
}
/* Process options. */
switch (thisboard->bustype) {
case isa_bustype:
iobase = it->options[0];
irq = it->options[1];
share_irq = 0;
break;
#ifdef CONFIG_COMEDI_PCI
case pci_bustype:
bus = it->options[0];
slot = it->options[1];
share_irq = 1;
ret = pc236_find_pci(dev, bus, slot, &pci_dev);
if (ret < 0)
return ret;
devpriv->pci_dev = pci_dev;
break;
#endif /* CONFIG_COMEDI_PCI */
default:
printk(KERN_ERR
"comedi%d: %s: BUG! cannot determine board type!\n",
dev->minor, PC236_DRIVER_NAME);
return -EINVAL;
break;
}
/*
* Initialize dev->board_name.
*/
dev->board_name = thisboard->name;
/* Enable device and reserve I/O spaces. */
#ifdef CONFIG_COMEDI_PCI
if (pci_dev) {
ret = comedi_pci_enable(pci_dev, PC236_DRIVER_NAME);
if (ret < 0) {
printk(KERN_ERR
"comedi%d: error! cannot enable PCI device and request regions!\n",
dev->minor);
return ret;
}
devpriv->lcr_iobase = pci_resource_start(pci_dev, 1);
iobase = pci_resource_start(pci_dev, 2);
irq = pci_dev->irq;
} else
#endif
{
ret = pc236_request_region(dev->minor, iobase, PC236_IO_SIZE);
if (ret < 0)
return ret;
}
dev->iobase = iobase;
/*
* Allocate the subdevice structures. alloc_subdevice() is a
* convenient macro defined in comedidev.h.
*/
ret = alloc_subdevices(dev, 2);
if (ret < 0) {
printk(KERN_ERR "comedi%d: error! out of memory!\n",
dev->minor);
return ret;
}
s = dev->subdevices + 0;
/* digital i/o subdevice (8255) */
ret = subdev_8255_init(dev, s, NULL, iobase);
if (ret < 0) {
printk(KERN_ERR "comedi%d: error! out of memory!\n",
dev->minor);
return ret;
}
s = dev->subdevices + 1;
dev->read_subdev = s;
s->type = COMEDI_SUBD_UNUSED;
pc236_intr_disable(dev);
if (irq) {
unsigned long flags = share_irq ? IRQF_SHARED : 0;
if (request_irq(irq, pc236_interrupt, flags,
PC236_DRIVER_NAME, dev) >= 0) {
dev->irq = irq;
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
s->n_chan = 1;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = pc236_intr_insn;
s->do_cmdtest = pc236_intr_cmdtest;
s->do_cmd = pc236_intr_cmd;
s->cancel = pc236_intr_cancel;
}
}
printk(KERN_INFO "comedi%d: %s ", dev->minor, dev->board_name);
if (thisboard->bustype == isa_bustype) {
printk("(base %#lx) ", iobase);
} else {
#ifdef CONFIG_COMEDI_PCI
printk("(pci %s) ", pci_name(pci_dev));
#endif
}
if (irq)
printk("(irq %u%s) ", irq, (dev->irq ? "" : " UNAVAILABLE"));
else
printk("(no irq) ");
printk("attached\n");
return 1;
}
/*
* _detach is called to deconfigure a device. It should deallocate
* resources.
* This function is also called when _attach() fails, so it should be
* careful not to release resources that were not necessarily
* allocated by _attach(). dev->private and dev->subdevices are
* deallocated automatically by the core.
*/
static int pc236_detach(struct comedi_device *dev)
{
printk(KERN_DEBUG "comedi%d: %s: detach\n", dev->minor,
PC236_DRIVER_NAME);
if (dev->iobase)
pc236_intr_disable(dev);
if (dev->irq)
free_irq(dev->irq, dev);
if (dev->subdevices)
subdev_8255_cleanup(dev, dev->subdevices + 0);
if (devpriv) {
#ifdef CONFIG_COMEDI_PCI
if (devpriv->pci_dev) {
if (dev->iobase)
comedi_pci_disable(devpriv->pci_dev);
pci_dev_put(devpriv->pci_dev);
} else
#endif
{
if (dev->iobase)
release_region(dev->iobase, PC236_IO_SIZE);
}
}
if (dev->board_name) {
printk(KERN_INFO "comedi%d: %s removed\n",
dev->minor, dev->board_name);
}
return 0;
}
/*
* This function checks and requests an I/O region, reporting an error
* if there is a conflict.
*/
static int pc236_request_region(unsigned minor, unsigned long from,
unsigned long extent)
{
if (!from || !request_region(from, extent, PC236_DRIVER_NAME)) {
printk(KERN_ERR "comedi%d: I/O port conflict (%#lx,%lu)!\n",
minor, from, extent);
return -EIO;
}
return 0;
}
/*
* This function is called to mark the interrupt as disabled (no command
* configured on subdevice 1) and to physically disable the interrupt
* (not possible on the PC36AT, except by removing the IRQ jumper!).
*/
static void pc236_intr_disable(struct comedi_device *dev)
{
unsigned long flags;
spin_lock_irqsave(&dev->spinlock, flags);
devpriv->enable_irq = 0;
#ifdef CONFIG_COMEDI_PCI
if (devpriv->lcr_iobase)
outl(PCI236_INTR_DISABLE, devpriv->lcr_iobase + PLX9052_INTCSR);
#endif
spin_unlock_irqrestore(&dev->spinlock, flags);
}
/*
* This function is called to mark the interrupt as enabled (a command
* configured on subdevice 1) and to physically enable the interrupt
* (not possible on the PC36AT, except by (re)connecting the IRQ jumper!).
*/
static void pc236_intr_enable(struct comedi_device *dev)
{
unsigned long flags;
spin_lock_irqsave(&dev->spinlock, flags);
devpriv->enable_irq = 1;
#ifdef CONFIG_COMEDI_PCI
if (devpriv->lcr_iobase)
outl(PCI236_INTR_ENABLE, devpriv->lcr_iobase + PLX9052_INTCSR);
#endif
spin_unlock_irqrestore(&dev->spinlock, flags);
}
/*
* This function is called when an interrupt occurs to check whether
* the interrupt has been marked as enabled and was generated by the
* board. If so, the function prepares the hardware for the next
* interrupt.
* Returns 0 if the interrupt should be ignored.
*/
static int pc236_intr_check(struct comedi_device *dev)
{
int retval = 0;
unsigned long flags;
spin_lock_irqsave(&dev->spinlock, flags);
if (devpriv->enable_irq) {
retval = 1;
#ifdef CONFIG_COMEDI_PCI
if (devpriv->lcr_iobase) {
if ((inl(devpriv->lcr_iobase + PLX9052_INTCSR)
& PLX9052_INTCSR_LI1STAT_MASK)
== PLX9052_INTCSR_LI1STAT_INACTIVE) {
retval = 0;
} else {
/* Clear interrupt and keep it enabled. */
outl(PCI236_INTR_ENABLE,
devpriv->lcr_iobase + PLX9052_INTCSR);
}
}
#endif
}
spin_unlock_irqrestore(&dev->spinlock, flags);
return retval;
}
/*
* Input from subdevice 1.
* Copied from the comedi_parport driver.
*/
static int pc236_intr_insn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
data[1] = 0;
return 2;
}
/*
* Subdevice 1 command test.
* Copied from the comedi_parport driver.
*/
static int pc236_intr_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
int err = 0;
int tmp;
/* step 1 */
tmp = cmd->start_src;
cmd->start_src &= TRIG_NOW;
if (!cmd->start_src || tmp != cmd->start_src)
err++;
tmp = cmd->scan_begin_src;
cmd->scan_begin_src &= TRIG_EXT;
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
tmp = cmd->convert_src;
cmd->convert_src &= TRIG_FOLLOW;
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
tmp = cmd->scan_end_src;
cmd->scan_end_src &= TRIG_COUNT;
if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
err++;
tmp = cmd->stop_src;
cmd->stop_src &= TRIG_NONE;
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
if (err)
return 1;
/* step 2: ignored */
if (err)
return 2;
/* step 3: */
if (cmd->start_arg != 0) {
cmd->start_arg = 0;
err++;
}
if (cmd->scan_begin_arg != 0) {
cmd->scan_begin_arg = 0;
err++;
}
if (cmd->convert_arg != 0) {
cmd->convert_arg = 0;
err++;
}
if (cmd->scan_end_arg != 1) {
cmd->scan_end_arg = 1;
err++;
}
if (cmd->stop_arg != 0) {
cmd->stop_arg = 0;
err++;
}
if (err)
return 3;
/* step 4: ignored */
if (err)
return 4;
return 0;
}
/*
* Subdevice 1 command.
*/
static int pc236_intr_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
pc236_intr_enable(dev);
return 0;
}
/*
* Subdevice 1 cancel command.
*/
static int pc236_intr_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
pc236_intr_disable(dev);
return 0;
}
/*
* Interrupt service routine.
* Based on the comedi_parport driver.
*/
static irqreturn_t pc236_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->subdevices + 1;
int handled;
handled = pc236_intr_check(dev);
if (dev->attached && handled) {
comedi_buf_put(s->async, 0);
s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
comedi_event(dev, s);
}
return IRQ_RETVAL(handled);
}
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
cometzero/cometzero_e210s | arch/x86/mm/srat.c | 1200 | 4330 | /*
* ACPI 3.0 based NUMA setup
* Copyright 2004 Andi Kleen, SuSE Labs.
*
* Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
*
* Called from acpi_numa_init while reading the SRAT and SLIT tables.
* Assumes all memory regions belonging to a single proximity domain
* are in one chunk. Holes between them will be included in the node.
*/
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/mmzone.h>
#include <linux/bitmap.h>
#include <linux/module.h>
#include <linux/topology.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <asm/proto.h>
#include <asm/numa.h>
#include <asm/e820.h>
#include <asm/apic.h>
#include <asm/uv/uv.h>
int acpi_numa __initdata;
static __init int setup_node(int pxm)
{
return acpi_map_pxm_to_node(pxm);
}
static __init void bad_srat(void)
{
printk(KERN_ERR "SRAT: SRAT not used.\n");
acpi_numa = -1;
}
static __init inline int srat_disabled(void)
{
return acpi_numa < 0;
}
/* Callback for SLIT parsing */
void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
{
int i, j;
for (i = 0; i < slit->locality_count; i++)
for (j = 0; j < slit->locality_count; j++)
numa_set_distance(pxm_to_node(i), pxm_to_node(j),
slit->entry[slit->locality_count * i + j]);
}
/* Callback for Proximity Domain -> x2APIC mapping */
void __init
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
{
int pxm, node;
int apic_id;
if (srat_disabled())
return;
if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) {
bad_srat();
return;
}
if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return;
pxm = pa->proximity_domain;
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
bad_srat();
return;
}
apic_id = pa->apic_id;
if (apic_id >= MAX_LOCAL_APIC) {
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
return;
}
set_apicid_to_node(apic_id, node);
node_set(node, numa_nodes_parsed);
acpi_numa = 1;
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n",
pxm, apic_id, node);
}
/* Callback for Proximity Domain -> LAPIC mapping */
void __init
acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
{
int pxm, node;
int apic_id;
if (srat_disabled())
return;
if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
bad_srat();
return;
}
if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return;
pxm = pa->proximity_domain_lo;
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
bad_srat();
return;
}
if (get_uv_system_type() >= UV_X2APIC)
apic_id = (pa->apic_id << 8) | pa->local_sapic_eid;
else
apic_id = pa->apic_id;
if (apic_id >= MAX_LOCAL_APIC) {
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
return;
}
set_apicid_to_node(apic_id, node);
node_set(node, numa_nodes_parsed);
acpi_numa = 1;
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n",
pxm, apic_id, node);
}
#ifdef CONFIG_MEMORY_HOTPLUG
static inline int save_add_info(void) {return 1;}
#else
static inline int save_add_info(void) {return 0;}
#endif
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
void __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
u64 start, end;
int node, pxm;
if (srat_disabled())
return;
if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
bad_srat();
return;
}
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
return;
if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
return;
start = ma->base_address;
end = start + ma->length;
pxm = ma->proximity_domain;
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains.\n");
bad_srat();
return;
}
if (numa_add_memblk(node, start, end) < 0) {
bad_srat();
return;
}
printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
start, end);
}
void __init acpi_numa_arch_fixup(void) {}
int __init x86_acpi_numa_init(void)
{
int ret;
ret = acpi_numa_init();
if (ret < 0)
return ret;
return srat_disabled() ? -EINVAL : 0;
}
| gpl-2.0 |
HydraCompany/HydraKernel | drivers/usb/usbip/stub_rx.c | 1456 | 15026 | /*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#include <asm/byteorder.h>
#include <linux/kthread.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "usbip_common.h"
#include "stub.h"
static int is_clear_halt_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
req = (struct usb_ctrlrequest *) urb->setup_packet;
return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
(req->bRequestType == USB_RECIP_ENDPOINT) &&
(req->wValue == USB_ENDPOINT_HALT);
}
static int is_set_interface_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
req = (struct usb_ctrlrequest *) urb->setup_packet;
return (req->bRequest == USB_REQ_SET_INTERFACE) &&
(req->bRequestType == USB_RECIP_INTERFACE);
}
static int is_set_configuration_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
req = (struct usb_ctrlrequest *) urb->setup_packet;
return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
(req->bRequestType == USB_RECIP_DEVICE);
}
static int is_reset_device_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
__u16 value;
__u16 index;
req = (struct usb_ctrlrequest *) urb->setup_packet;
value = le16_to_cpu(req->wValue);
index = le16_to_cpu(req->wIndex);
if ((req->bRequest == USB_REQ_SET_FEATURE) &&
(req->bRequestType == USB_RT_PORT) &&
(value == USB_PORT_FEAT_RESET)) {
usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
return 1;
} else
return 0;
}
static int tweak_clear_halt_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
int target_endp;
int target_dir;
int target_pipe;
int ret;
req = (struct usb_ctrlrequest *) urb->setup_packet;
/*
* The stalled endpoint is specified in the wIndex value. The endpoint
* of the urb is the target of this clear_halt request (i.e., control
* endpoint).
*/
target_endp = le16_to_cpu(req->wIndex) & 0x000f;
/* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */
target_dir = le16_to_cpu(req->wIndex) & 0x0080;
if (target_dir)
target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
else
target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
ret = usb_clear_halt(urb->dev, target_pipe);
if (ret < 0)
dev_err(&urb->dev->dev,
"usb_clear_halt error: devnum %d endp %d ret %d\n",
urb->dev->devnum, target_endp, ret);
else
dev_info(&urb->dev->dev,
"usb_clear_halt done: devnum %d endp %d\n",
urb->dev->devnum, target_endp);
return ret;
}
static int tweak_set_interface_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
__u16 alternate;
__u16 interface;
int ret;
req = (struct usb_ctrlrequest *) urb->setup_packet;
alternate = le16_to_cpu(req->wValue);
interface = le16_to_cpu(req->wIndex);
usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
interface, alternate);
ret = usb_set_interface(urb->dev, interface, alternate);
if (ret < 0)
dev_err(&urb->dev->dev,
"usb_set_interface error: inf %u alt %u ret %d\n",
interface, alternate, ret);
else
dev_info(&urb->dev->dev,
"usb_set_interface done: inf %u alt %u\n",
interface, alternate);
return ret;
}
static int tweak_set_configuration_cmd(struct urb *urb)
{
struct stub_priv *priv = (struct stub_priv *) urb->context;
struct stub_device *sdev = priv->sdev;
struct usb_ctrlrequest *req;
__u16 config;
int err;
req = (struct usb_ctrlrequest *) urb->setup_packet;
config = le16_to_cpu(req->wValue);
err = usb_set_configuration(sdev->udev, config);
if (err && err != -ENODEV)
dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
config, err);
return 0;
}
static int tweak_reset_device_cmd(struct urb *urb)
{
struct stub_priv *priv = (struct stub_priv *) urb->context;
struct stub_device *sdev = priv->sdev;
dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
/*
* With the implementation of pre_reset and post_reset the driver no
* longer unbinds. This allows the use of synchronous reset.
*/
if (usb_lock_device_for_reset(sdev->udev, sdev->interface) < 0) {
dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
return 0;
}
usb_reset_device(sdev->udev);
usb_unlock_device(sdev->udev);
return 0;
}
/*
* clear_halt, set_interface, and set_configuration require special tricks.
*/
static void tweak_special_requests(struct urb *urb)
{
if (!urb || !urb->setup_packet)
return;
if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
return;
if (is_clear_halt_cmd(urb))
/* tweak clear_halt */
tweak_clear_halt_cmd(urb);
else if (is_set_interface_cmd(urb))
/* tweak set_interface */
tweak_set_interface_cmd(urb);
else if (is_set_configuration_cmd(urb))
/* tweak set_configuration */
tweak_set_configuration_cmd(urb);
else if (is_reset_device_cmd(urb))
tweak_reset_device_cmd(urb);
else
usbip_dbg_stub_rx("no need to tweak\n");
}
/*
* stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
* By unlinking the urb asynchronously, stub_rx can continuously
* process coming urbs. Even if the urb is unlinked, its completion
* handler will be called and stub_tx will send a return pdu.
*
* See also comments about unlinking strategy in vhci_hcd.c.
*/
static int stub_recv_cmd_unlink(struct stub_device *sdev,
struct usbip_header *pdu)
{
int ret;
unsigned long flags;
struct stub_priv *priv;
spin_lock_irqsave(&sdev->priv_lock, flags);
list_for_each_entry(priv, &sdev->priv_init, list) {
if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
continue;
dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
priv->urb);
/*
* This matched urb is not completed yet (i.e., be in
* flight in usb hcd hardware/driver). Now we are
* cancelling it. The unlinking flag means that we are
* now not going to return the normal result pdu of a
* submission request, but going to return a result pdu
* of the unlink request.
*/
priv->unlinking = 1;
/*
* In the case that unlinking flag is on, prev->seqnum
* is changed from the seqnum of the cancelling urb to
* the seqnum of the unlink request. This will be used
* to make the result pdu of the unlink request.
*/
priv->seqnum = pdu->base.seqnum;
spin_unlock_irqrestore(&sdev->priv_lock, flags);
/*
* usb_unlink_urb() is now out of spinlocking to avoid
* spinlock recursion since stub_complete() is
* sometimes called in this context but not in the
* interrupt context. If stub_complete() is executed
* before we call usb_unlink_urb(), usb_unlink_urb()
* will return an error value. In this case, stub_tx
* will return the result pdu of this unlink request
* though submission is completed and actual unlinking
* is not executed. OK?
*/
/* In the above case, urb->status is not -ECONNRESET,
* so a driver in a client host will know the failure
* of the unlink request ?
*/
ret = usb_unlink_urb(priv->urb);
if (ret != -EINPROGRESS)
dev_err(&priv->urb->dev->dev,
"failed to unlink a urb %p, ret %d\n",
priv->urb, ret);
return 0;
}
usbip_dbg_stub_rx("seqnum %d is not pending\n",
pdu->u.cmd_unlink.seqnum);
/*
* The urb of the unlink target is not found in priv_init queue. It was
* already completed and its results is/was going to be sent by a
* CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
* return the completeness of this unlink request to vhci_hcd.
*/
stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return 0;
}
static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
{
struct usbip_device *ud = &sdev->ud;
int valid = 0;
if (pdu->base.devid == sdev->devid) {
spin_lock_irq(&ud->lock);
if (ud->status == SDEV_ST_USED) {
/* A request is valid. */
valid = 1;
}
spin_unlock_irq(&ud->lock);
}
return valid;
}
static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
struct usbip_header *pdu)
{
struct stub_priv *priv;
struct usbip_device *ud = &sdev->ud;
unsigned long flags;
spin_lock_irqsave(&sdev->priv_lock, flags);
priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
if (!priv) {
dev_err(&sdev->interface->dev, "alloc stub_priv\n");
spin_unlock_irqrestore(&sdev->priv_lock, flags);
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return NULL;
}
priv->seqnum = pdu->base.seqnum;
priv->sdev = sdev;
/*
* After a stub_priv is linked to a list_head,
* our error handler can free allocated data.
*/
list_add_tail(&priv->list, &sdev->priv_init);
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return priv;
}
static int get_pipe(struct stub_device *sdev, int epnum, int dir)
{
struct usb_device *udev = sdev->udev;
struct usb_host_endpoint *ep;
struct usb_endpoint_descriptor *epd = NULL;
if (dir == USBIP_DIR_IN)
ep = udev->ep_in[epnum & 0x7f];
else
ep = udev->ep_out[epnum & 0x7f];
if (!ep) {
dev_err(&sdev->interface->dev, "no such endpoint?, %d\n",
epnum);
BUG();
}
epd = &ep->desc;
if (usb_endpoint_xfer_control(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndctrlpipe(udev, epnum);
else
return usb_rcvctrlpipe(udev, epnum);
}
if (usb_endpoint_xfer_bulk(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndbulkpipe(udev, epnum);
else
return usb_rcvbulkpipe(udev, epnum);
}
if (usb_endpoint_xfer_int(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndintpipe(udev, epnum);
else
return usb_rcvintpipe(udev, epnum);
}
if (usb_endpoint_xfer_isoc(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndisocpipe(udev, epnum);
else
return usb_rcvisocpipe(udev, epnum);
}
/* NOT REACHED */
dev_err(&sdev->interface->dev, "get pipe, epnum %d\n", epnum);
return 0;
}
static void masking_bogus_flags(struct urb *urb)
{
int xfertype;
struct usb_device *dev;
struct usb_host_endpoint *ep;
int is_out;
unsigned int allowed;
if (!urb || urb->hcpriv || !urb->complete)
return;
dev = urb->dev;
if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
return;
ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
[usb_pipeendpoint(urb->pipe)];
if (!ep)
return;
xfertype = usb_endpoint_type(&ep->desc);
if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
struct usb_ctrlrequest *setup =
(struct usb_ctrlrequest *) urb->setup_packet;
if (!setup)
return;
is_out = !(setup->bRequestType & USB_DIR_IN) ||
!setup->wLength;
} else {
is_out = usb_endpoint_dir_out(&ep->desc);
}
/* enforce simple/standard policy */
allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
URB_DIR_MASK | URB_FREE_BUFFER);
switch (xfertype) {
case USB_ENDPOINT_XFER_BULK:
if (is_out)
allowed |= URB_ZERO_PACKET;
/* FALLTHROUGH */
case USB_ENDPOINT_XFER_CONTROL:
allowed |= URB_NO_FSBR; /* only affects UHCI */
/* FALLTHROUGH */
default: /* all non-iso endpoints */
if (!is_out)
allowed |= URB_SHORT_NOT_OK;
break;
case USB_ENDPOINT_XFER_ISOC:
allowed |= URB_ISO_ASAP;
break;
}
urb->transfer_flags &= allowed;
}
static void stub_recv_cmd_submit(struct stub_device *sdev,
struct usbip_header *pdu)
{
int ret;
struct stub_priv *priv;
struct usbip_device *ud = &sdev->ud;
struct usb_device *udev = sdev->udev;
int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
priv = stub_priv_alloc(sdev, pdu);
if (!priv)
return;
/* setup a urb */
if (usb_pipeisoc(pipe))
priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets,
GFP_KERNEL);
else
priv->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!priv->urb) {
dev_err(&sdev->interface->dev, "malloc urb\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
/* allocate urb transfer buffer, if needed */
if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
priv->urb->transfer_buffer =
kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
GFP_KERNEL);
if (!priv->urb->transfer_buffer) {
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
}
/* copy urb setup packet */
priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
GFP_KERNEL);
if (!priv->urb->setup_packet) {
dev_err(&sdev->interface->dev, "allocate setup_packet\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
/* set other members from the base header of pdu */
priv->urb->context = (void *) priv;
priv->urb->dev = udev;
priv->urb->pipe = pipe;
priv->urb->complete = stub_complete;
usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0);
if (usbip_recv_xbuff(ud, priv->urb) < 0)
return;
if (usbip_recv_iso(ud, priv->urb) < 0)
return;
/* no need to submit an intercepted request, but harmless? */
tweak_special_requests(priv->urb);
masking_bogus_flags(priv->urb);
/* urb is now ready to submit */
ret = usb_submit_urb(priv->urb, GFP_KERNEL);
if (ret == 0)
usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
pdu->base.seqnum);
else {
dev_err(&sdev->interface->dev, "submit_urb error, %d\n", ret);
usbip_dump_header(pdu);
usbip_dump_urb(priv->urb);
/*
* Pessimistic.
* This connection will be discarded.
*/
usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
}
usbip_dbg_stub_rx("Leave\n");
}
/* recv a pdu */
static void stub_rx_pdu(struct usbip_device *ud)
{
int ret;
struct usbip_header pdu;
struct stub_device *sdev = container_of(ud, struct stub_device, ud);
struct device *dev = &sdev->udev->dev;
usbip_dbg_stub_rx("Enter\n");
memset(&pdu, 0, sizeof(pdu));
/* receive a pdu header */
ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
if (ret != sizeof(pdu)) {
dev_err(dev, "recv a header, %d\n", ret);
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
return;
}
usbip_header_correct_endian(&pdu, 0);
if (usbip_dbg_flag_stub_rx)
usbip_dump_header(&pdu);
if (!valid_request(sdev, &pdu)) {
dev_err(dev, "recv invalid request\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
return;
}
switch (pdu.base.command) {
case USBIP_CMD_UNLINK:
stub_recv_cmd_unlink(sdev, &pdu);
break;
case USBIP_CMD_SUBMIT:
stub_recv_cmd_submit(sdev, &pdu);
break;
default:
/* NOTREACHED */
dev_err(dev, "unknown pdu\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
break;
}
}
int stub_rx_loop(void *data)
{
struct usbip_device *ud = data;
while (!kthread_should_stop()) {
if (usbip_event_happened(ud))
break;
stub_rx_pdu(ud);
}
return 0;
}
| gpl-2.0 |
HRTKernel/Hacker_Kernel_SM-G920P | drivers/usb/phy/phy-omap-usb2.c | 2224 | 6486 | /*
* omap-usb2.c - USB PHY, talking to musb controller in OMAP.
*
* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/usb/omap_usb.h>
#include <linux/usb/phy_companion.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
#include <linux/usb/omap_control_usb.h>
/**
* omap_usb2_set_comparator - links the comparator present in the sytem with
* this phy
* @comparator - the companion phy(comparator) for this phy
*
* The phy companion driver should call this API passing the phy_companion
* filled with set_vbus and start_srp to be used by usb phy.
*
* For use by phy companion driver
*/
int omap_usb2_set_comparator(struct phy_companion *comparator)
{
struct omap_usb *phy;
struct usb_phy *x = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR(x))
return -ENODEV;
phy = phy_to_omapusb(x);
phy->comparator = comparator;
return 0;
}
EXPORT_SYMBOL_GPL(omap_usb2_set_comparator);
static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled)
{
struct omap_usb *phy = phy_to_omapusb(otg->phy);
if (!phy->comparator)
return -ENODEV;
return phy->comparator->set_vbus(phy->comparator, enabled);
}
static int omap_usb_start_srp(struct usb_otg *otg)
{
struct omap_usb *phy = phy_to_omapusb(otg->phy);
if (!phy->comparator)
return -ENODEV;
return phy->comparator->start_srp(phy->comparator);
}
static int omap_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
{
struct usb_phy *phy = otg->phy;
otg->host = host;
if (!host)
phy->state = OTG_STATE_UNDEFINED;
return 0;
}
static int omap_usb_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
struct usb_phy *phy = otg->phy;
otg->gadget = gadget;
if (!gadget)
phy->state = OTG_STATE_UNDEFINED;
return 0;
}
static int omap_usb2_suspend(struct usb_phy *x, int suspend)
{
u32 ret;
struct omap_usb *phy = phy_to_omapusb(x);
if (suspend && !phy->is_suspended) {
omap_control_usb_phy_power(phy->control_dev, 0);
pm_runtime_put_sync(phy->dev);
phy->is_suspended = 1;
} else if (!suspend && phy->is_suspended) {
ret = pm_runtime_get_sync(phy->dev);
if (ret < 0) {
dev_err(phy->dev, "get_sync failed with err %d\n",
ret);
return ret;
}
omap_control_usb_phy_power(phy->control_dev, 1);
phy->is_suspended = 0;
}
return 0;
}
static int omap_usb2_probe(struct platform_device *pdev)
{
struct omap_usb *phy;
struct usb_otg *otg;
phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
if (!phy) {
dev_err(&pdev->dev, "unable to allocate memory for USB2 PHY\n");
return -ENOMEM;
}
otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg) {
dev_err(&pdev->dev, "unable to allocate memory for USB OTG\n");
return -ENOMEM;
}
phy->dev = &pdev->dev;
phy->phy.dev = phy->dev;
phy->phy.label = "omap-usb2";
phy->phy.set_suspend = omap_usb2_suspend;
phy->phy.otg = otg;
phy->phy.type = USB_PHY_TYPE_USB2;
phy->control_dev = omap_get_control_dev();
if (IS_ERR(phy->control_dev)) {
dev_dbg(&pdev->dev, "Failed to get control device\n");
return -ENODEV;
}
phy->is_suspended = 1;
omap_control_usb_phy_power(phy->control_dev, 0);
otg->set_host = omap_usb_set_host;
otg->set_peripheral = omap_usb_set_peripheral;
otg->set_vbus = omap_usb_set_vbus;
otg->start_srp = omap_usb_start_srp;
otg->phy = &phy->phy;
phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
if (IS_ERR(phy->wkupclk)) {
dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
return PTR_ERR(phy->wkupclk);
}
clk_prepare(phy->wkupclk);
phy->optclk = devm_clk_get(phy->dev, "usb_otg_ss_refclk960m");
if (IS_ERR(phy->optclk))
dev_vdbg(&pdev->dev, "unable to get refclk960m\n");
else
clk_prepare(phy->optclk);
usb_add_phy_dev(&phy->phy);
platform_set_drvdata(pdev, phy);
pm_runtime_enable(phy->dev);
return 0;
}
static int omap_usb2_remove(struct platform_device *pdev)
{
struct omap_usb *phy = platform_get_drvdata(pdev);
clk_unprepare(phy->wkupclk);
if (!IS_ERR(phy->optclk))
clk_unprepare(phy->optclk);
usb_remove_phy(&phy->phy);
return 0;
}
#ifdef CONFIG_PM_RUNTIME
static int omap_usb2_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_usb *phy = platform_get_drvdata(pdev);
clk_disable(phy->wkupclk);
if (!IS_ERR(phy->optclk))
clk_disable(phy->optclk);
return 0;
}
static int omap_usb2_runtime_resume(struct device *dev)
{
u32 ret = 0;
struct platform_device *pdev = to_platform_device(dev);
struct omap_usb *phy = platform_get_drvdata(pdev);
ret = clk_enable(phy->wkupclk);
if (ret < 0) {
dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
goto err0;
}
if (!IS_ERR(phy->optclk)) {
ret = clk_enable(phy->optclk);
if (ret < 0) {
dev_err(phy->dev, "Failed to enable optclk %d\n", ret);
goto err1;
}
}
return 0;
err1:
clk_disable(phy->wkupclk);
err0:
return ret;
}
static const struct dev_pm_ops omap_usb2_pm_ops = {
SET_RUNTIME_PM_OPS(omap_usb2_runtime_suspend, omap_usb2_runtime_resume,
NULL)
};
#define DEV_PM_OPS (&omap_usb2_pm_ops)
#else
#define DEV_PM_OPS NULL
#endif
#ifdef CONFIG_OF
static const struct of_device_id omap_usb2_id_table[] = {
{ .compatible = "ti,omap-usb2" },
{}
};
MODULE_DEVICE_TABLE(of, omap_usb2_id_table);
#endif
static struct platform_driver omap_usb2_driver = {
.probe = omap_usb2_probe,
.remove = omap_usb2_remove,
.driver = {
.name = "omap-usb2",
.owner = THIS_MODULE,
.pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(omap_usb2_id_table),
},
};
module_platform_driver(omap_usb2_driver);
MODULE_ALIAS("platform: omap_usb2");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("OMAP USB2 phy driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
qwerty97/rk3066 | drivers/bluetooth/btuart_cs.c | 2480 | 15159 | /*
*
* Driver for Bluetooth PCMCIA cards with HCI UART interface
*
* Copyright (C) 2001-2002 Marcel Holtmann <marcel@holtmann.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The initial developer of the original code is David A. Hinds
* <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
* are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/io.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
/* ======================== Module parameters ======================== */
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth driver for Bluetooth PCMCIA cards with HCI UART interface");
MODULE_LICENSE("GPL");
/* ======================== Local structures ======================== */
typedef struct btuart_info_t {
struct pcmcia_device *p_dev;
struct hci_dev *hdev;
spinlock_t lock; /* For serializing operations */
struct sk_buff_head txq;
unsigned long tx_state;
unsigned long rx_state;
unsigned long rx_count;
struct sk_buff *rx_skb;
} btuart_info_t;
static int btuart_config(struct pcmcia_device *link);
static void btuart_release(struct pcmcia_device *link);
static void btuart_detach(struct pcmcia_device *p_dev);
/* Maximum baud rate */
#define SPEED_MAX 115200
/* Default baud rate: 57600, 115200, 230400 or 460800 */
#define DEFAULT_BAUD_RATE 115200
/* Transmit states */
#define XMIT_SENDING 1
#define XMIT_WAKEUP 2
#define XMIT_WAITING 8
/* Receiver states */
#define RECV_WAIT_PACKET_TYPE 0
#define RECV_WAIT_EVENT_HEADER 1
#define RECV_WAIT_ACL_HEADER 2
#define RECV_WAIT_SCO_HEADER 3
#define RECV_WAIT_DATA 4
/* ======================== Interrupt handling ======================== */
static int btuart_write(unsigned int iobase, int fifo_size, __u8 *buf, int len)
{
int actual = 0;
/* Tx FIFO should be empty */
if (!(inb(iobase + UART_LSR) & UART_LSR_THRE))
return 0;
/* Fill FIFO with current frame */
while ((fifo_size-- > 0) && (actual < len)) {
/* Transmit next byte */
outb(buf[actual], iobase + UART_TX);
actual++;
}
return actual;
}
static void btuart_write_wakeup(btuart_info_t *info)
{
if (!info) {
BT_ERR("Unknown device");
return;
}
if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) {
set_bit(XMIT_WAKEUP, &(info->tx_state));
return;
}
do {
register unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
register int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
if (!pcmcia_dev_present(info->p_dev))
return;
if (!(skb = skb_dequeue(&(info->txq))))
break;
/* Send frame */
len = btuart_write(iobase, 16, skb->data, skb->len);
set_bit(XMIT_WAKEUP, &(info->tx_state));
if (len == skb->len) {
kfree_skb(skb);
} else {
skb_pull(skb, len);
skb_queue_head(&(info->txq), skb);
}
info->hdev->stat.byte_tx += len;
} while (test_bit(XMIT_WAKEUP, &(info->tx_state)));
clear_bit(XMIT_SENDING, &(info->tx_state));
}
static void btuart_receive(btuart_info_t *info)
{
unsigned int iobase;
int boguscount = 0;
if (!info) {
BT_ERR("Unknown device");
return;
}
iobase = info->p_dev->resource[0]->start;
do {
info->hdev->stat.byte_rx++;
/* Allocate packet */
if (info->rx_skb == NULL) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
BT_ERR("Can't allocate mem for new packet");
return;
}
}
if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
info->rx_skb->dev = (void *) info->hdev;
bt_cb(info->rx_skb)->pkt_type = inb(iobase + UART_RX);
switch (bt_cb(info->rx_skb)->pkt_type) {
case HCI_EVENT_PKT:
info->rx_state = RECV_WAIT_EVENT_HEADER;
info->rx_count = HCI_EVENT_HDR_SIZE;
break;
case HCI_ACLDATA_PKT:
info->rx_state = RECV_WAIT_ACL_HEADER;
info->rx_count = HCI_ACL_HDR_SIZE;
break;
case HCI_SCODATA_PKT:
info->rx_state = RECV_WAIT_SCO_HEADER;
info->rx_count = HCI_SCO_HDR_SIZE;
break;
default:
/* Unknown packet */
BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
info->hdev->stat.err_rx++;
clear_bit(HCI_RUNNING, &(info->hdev->flags));
kfree_skb(info->rx_skb);
info->rx_skb = NULL;
break;
}
} else {
*skb_put(info->rx_skb, 1) = inb(iobase + UART_RX);
info->rx_count--;
if (info->rx_count == 0) {
int dlen;
struct hci_event_hdr *eh;
struct hci_acl_hdr *ah;
struct hci_sco_hdr *sh;
switch (info->rx_state) {
case RECV_WAIT_EVENT_HEADER:
eh = hci_event_hdr(info->rx_skb);
info->rx_state = RECV_WAIT_DATA;
info->rx_count = eh->plen;
break;
case RECV_WAIT_ACL_HEADER:
ah = hci_acl_hdr(info->rx_skb);
dlen = __le16_to_cpu(ah->dlen);
info->rx_state = RECV_WAIT_DATA;
info->rx_count = dlen;
break;
case RECV_WAIT_SCO_HEADER:
sh = hci_sco_hdr(info->rx_skb);
info->rx_state = RECV_WAIT_DATA;
info->rx_count = sh->dlen;
break;
case RECV_WAIT_DATA:
hci_recv_frame(info->rx_skb);
info->rx_skb = NULL;
break;
}
}
}
/* Make sure we don't stay here too long */
if (boguscount++ > 16)
break;
} while (inb(iobase + UART_LSR) & UART_LSR_DR);
}
static irqreturn_t btuart_interrupt(int irq, void *dev_inst)
{
btuart_info_t *info = dev_inst;
unsigned int iobase;
int boguscount = 0;
int iir, lsr;
irqreturn_t r = IRQ_NONE;
if (!info || !info->hdev)
/* our irq handler is shared */
return IRQ_NONE;
iobase = info->p_dev->resource[0]->start;
spin_lock(&(info->lock));
iir = inb(iobase + UART_IIR) & UART_IIR_ID;
while (iir) {
r = IRQ_HANDLED;
/* Clear interrupt */
lsr = inb(iobase + UART_LSR);
switch (iir) {
case UART_IIR_RLSI:
BT_ERR("RLSI");
break;
case UART_IIR_RDI:
/* Receive interrupt */
btuart_receive(info);
break;
case UART_IIR_THRI:
if (lsr & UART_LSR_THRE) {
/* Transmitter ready for data */
btuart_write_wakeup(info);
}
break;
default:
BT_ERR("Unhandled IIR=%#x", iir);
break;
}
/* Make sure we don't stay here too long */
if (boguscount++ > 100)
break;
iir = inb(iobase + UART_IIR) & UART_IIR_ID;
}
spin_unlock(&(info->lock));
return r;
}
static void btuart_change_speed(btuart_info_t *info, unsigned int speed)
{
unsigned long flags;
unsigned int iobase;
int fcr; /* FIFO control reg */
int lcr; /* Line control reg */
int divisor;
if (!info) {
BT_ERR("Unknown device");
return;
}
iobase = info->p_dev->resource[0]->start;
spin_lock_irqsave(&(info->lock), flags);
/* Turn off interrupts */
outb(0, iobase + UART_IER);
divisor = SPEED_MAX / speed;
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT;
/*
* Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
* almost 1,7 ms at 19200 bps. At speeds above that we can just forget
* about this timeout since it will always be fast enough.
*/
if (speed < 38400)
fcr |= UART_FCR_TRIGGER_1;
else
fcr |= UART_FCR_TRIGGER_14;
/* Bluetooth cards use 8N1 */
lcr = UART_LCR_WLEN8;
outb(UART_LCR_DLAB | lcr, iobase + UART_LCR); /* Set DLAB */
outb(divisor & 0xff, iobase + UART_DLL); /* Set speed */
outb(divisor >> 8, iobase + UART_DLM);
outb(lcr, iobase + UART_LCR); /* Set 8N1 */
outb(fcr, iobase + UART_FCR); /* Enable FIFO's */
/* Turn on interrupts */
outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
spin_unlock_irqrestore(&(info->lock), flags);
}
/* ======================== HCI interface ======================== */
static int btuart_hci_flush(struct hci_dev *hdev)
{
btuart_info_t *info = (btuart_info_t *)(hdev->driver_data);
/* Drop TX queue */
skb_queue_purge(&(info->txq));
return 0;
}
static int btuart_hci_open(struct hci_dev *hdev)
{
set_bit(HCI_RUNNING, &(hdev->flags));
return 0;
}
static int btuart_hci_close(struct hci_dev *hdev)
{
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
btuart_hci_flush(hdev);
return 0;
}
static int btuart_hci_send_frame(struct sk_buff *skb)
{
btuart_info_t *info;
struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
if (!hdev) {
BT_ERR("Frame for unknown HCI device (hdev=NULL)");
return -ENODEV;
}
info = (btuart_info_t *)(hdev->driver_data);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
break;
case HCI_ACLDATA_PKT:
hdev->stat.acl_tx++;
break;
case HCI_SCODATA_PKT:
hdev->stat.sco_tx++;
break;
};
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
skb_queue_tail(&(info->txq), skb);
btuart_write_wakeup(info);
return 0;
}
static void btuart_hci_destruct(struct hci_dev *hdev)
{
}
static int btuart_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
}
/* ======================== Card services HCI interaction ======================== */
static int btuart_open(btuart_info_t *info)
{
unsigned long flags;
unsigned int iobase = info->p_dev->resource[0]->start;
struct hci_dev *hdev;
spin_lock_init(&(info->lock));
skb_queue_head_init(&(info->txq));
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
info->rx_skb = NULL;
/* Initialize HCI device */
hdev = hci_alloc_dev();
if (!hdev) {
BT_ERR("Can't allocate HCI device");
return -ENOMEM;
}
info->hdev = hdev;
hdev->bus = HCI_PCCARD;
hdev->driver_data = info;
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
hdev->open = btuart_hci_open;
hdev->close = btuart_hci_close;
hdev->flush = btuart_hci_flush;
hdev->send = btuart_hci_send_frame;
hdev->destruct = btuart_hci_destruct;
hdev->ioctl = btuart_hci_ioctl;
hdev->owner = THIS_MODULE;
spin_lock_irqsave(&(info->lock), flags);
/* Reset UART */
outb(0, iobase + UART_MCR);
/* Turn off interrupts */
outb(0, iobase + UART_IER);
/* Initialize UART */
outb(UART_LCR_WLEN8, iobase + UART_LCR); /* Reset DLAB */
outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase + UART_MCR);
/* Turn on interrupts */
// outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
spin_unlock_irqrestore(&(info->lock), flags);
btuart_change_speed(info, DEFAULT_BAUD_RATE);
/* Timeout before it is safe to send the first HCI packet */
msleep(1000);
/* Register HCI device */
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
info->hdev = NULL;
hci_free_dev(hdev);
return -ENODEV;
}
return 0;
}
static int btuart_close(btuart_info_t *info)
{
unsigned long flags;
unsigned int iobase = info->p_dev->resource[0]->start;
struct hci_dev *hdev = info->hdev;
if (!hdev)
return -ENODEV;
btuart_hci_close(hdev);
spin_lock_irqsave(&(info->lock), flags);
/* Reset UART */
outb(0, iobase + UART_MCR);
/* Turn off interrupts */
outb(0, iobase + UART_IER);
spin_unlock_irqrestore(&(info->lock), flags);
if (hci_unregister_dev(hdev) < 0)
BT_ERR("Can't unregister HCI device %s", hdev->name);
hci_free_dev(hdev);
return 0;
}
static int btuart_probe(struct pcmcia_device *link)
{
btuart_info_t *info;
/* Create new info device */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->p_dev = link;
link->priv = info;
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
CONF_AUTO_SET_IO;
return btuart_config(link);
}
static void btuart_detach(struct pcmcia_device *link)
{
btuart_info_t *info = link->priv;
btuart_release(link);
kfree(info);
}
static int btuart_check_config(struct pcmcia_device *p_dev, void *priv_data)
{
int *try = priv_data;
if (try == 0)
p_dev->io_lines = 16;
if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0))
return -EINVAL;
p_dev->resource[0]->end = 8;
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
return pcmcia_request_io(p_dev);
}
static int btuart_check_config_notpicky(struct pcmcia_device *p_dev,
void *priv_data)
{
static unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
int j;
if (p_dev->io_lines > 3)
return -ENODEV;
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
p_dev->resource[0]->end = 8;
for (j = 0; j < 5; j++) {
p_dev->resource[0]->start = base[j];
p_dev->io_lines = base[j] ? 16 : 3;
if (!pcmcia_request_io(p_dev))
return 0;
}
return -ENODEV;
}
static int btuart_config(struct pcmcia_device *link)
{
btuart_info_t *info = link->priv;
int i;
int try;
/* First pass: look for a config entry that looks normal.
Two tries: without IO aliases, then with aliases */
for (try = 0; try < 2; try++)
if (!pcmcia_loop_config(link, btuart_check_config, &try))
goto found_port;
/* Second pass: try to find an entry that isn't picky about
its base address, then try to grab any standard serial port
address, and finally try to get any free port. */
if (!pcmcia_loop_config(link, btuart_check_config_notpicky, NULL))
goto found_port;
BT_ERR("No usable port range found");
goto failed;
found_port:
i = pcmcia_request_irq(link, btuart_interrupt);
if (i != 0)
goto failed;
i = pcmcia_enable_device(link);
if (i != 0)
goto failed;
if (btuart_open(info) != 0)
goto failed;
return 0;
failed:
btuart_release(link);
return -ENODEV;
}
static void btuart_release(struct pcmcia_device *link)
{
btuart_info_t *info = link->priv;
btuart_close(info);
pcmcia_disable_device(link);
}
static const struct pcmcia_device_id btuart_ids[] = {
/* don't use this driver. Use serial_cs + hci_uart instead */
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, btuart_ids);
static struct pcmcia_driver btuart_driver = {
.owner = THIS_MODULE,
.name = "btuart_cs",
.probe = btuart_probe,
.remove = btuart_detach,
.id_table = btuart_ids,
};
static int __init init_btuart_cs(void)
{
return pcmcia_register_driver(&btuart_driver);
}
static void __exit exit_btuart_cs(void)
{
pcmcia_unregister_driver(&btuart_driver);
}
module_init(init_btuart_cs);
module_exit(exit_btuart_cs);
| gpl-2.0 |
c0d3x42/P8000-Kernel | arch/x86/kernel/dumpstack_32.c | 2480 | 2889 | /*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
*/
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/kexec.h>
#include <linux/sysfs.h>
#include <linux/bug.h>
#include <linux/nmi.h>
#include <asm/stacktrace.h>
void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
int graph = 0;
if (!task)
task = current;
if (!stack) {
unsigned long dummy;
stack = &dummy;
if (task && task != current)
stack = (unsigned long *)task->thread.sp;
}
if (!bp)
bp = stack_frame(task, regs);
for (;;) {
struct thread_info *context;
context = (struct thread_info *)
((unsigned long)stack & (~(THREAD_SIZE - 1)));
bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
stack = (unsigned long *)context->previous_esp;
if (!stack)
break;
if (ops->stack(data, "IRQ") < 0)
break;
touch_nmi_watchdog();
}
}
EXPORT_SYMBOL(dump_trace);
void
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *sp, unsigned long bp, char *log_lvl)
{
unsigned long *stack;
int i;
if (sp == NULL) {
if (task)
sp = (unsigned long *)task->thread.sp;
else
sp = (unsigned long *)&sp;
}
stack = sp;
for (i = 0; i < kstack_depth_to_print; i++) {
if (kstack_end(stack))
break;
if (i && ((i % STACKSLOTS_PER_LINE) == 0))
pr_cont("\n");
pr_cont(" %08lx", *stack++);
touch_nmi_watchdog();
}
pr_cont("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
void show_regs(struct pt_regs *regs)
{
int i;
show_regs_print_info(KERN_EMERG);
__show_regs(regs, !user_mode_vm(regs));
/*
* When in-kernel, we also print out the stack and code at the
* time of the fault..
*/
if (!user_mode_vm(regs)) {
unsigned int code_prologue = code_bytes * 43 / 64;
unsigned int code_len = code_bytes;
unsigned char c;
u8 *ip;
pr_emerg("Stack:\n");
show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG);
pr_emerg("Code:");
ip = (u8 *)regs->ip - code_prologue;
if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
/* try starting at IP */
ip = (u8 *)regs->ip;
code_len = code_len - code_prologue + 1;
}
for (i = 0; i < code_len; i++, ip++) {
if (ip < (u8 *)PAGE_OFFSET ||
probe_kernel_address(ip, c)) {
pr_cont(" Bad EIP value.");
break;
}
if (ip == (u8 *)regs->ip)
pr_cont(" <%02x>", c);
else
pr_cont(" %02x", c);
}
}
pr_cont("\n");
}
int is_valid_bugaddr(unsigned long ip)
{
unsigned short ud2;
if (ip < PAGE_OFFSET)
return 0;
if (probe_kernel_address((unsigned short *)ip, ud2))
return 0;
return ud2 == 0x0b0f;
}
| gpl-2.0 |
aospo/platform_kernel_oneplus_msm8974 | arch/arm/mach-msm/qdsp6v2/q6audio_v1.c | 3248 | 2832 | /*
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <asm/atomic.h>
#include <asm/ioctls.h>
#include "audio_utils.h"
void q6asm_in_cb(uint32_t opcode, uint32_t token,
uint32_t *payload, void *priv)
{
struct q6audio_in * audio = (struct q6audio_in *)priv;
unsigned long flags;
pr_debug("%s:session id %d: opcode[0x%x]\n", __func__,
audio->ac->session, opcode);
spin_lock_irqsave(&audio->dsp_lock, flags);
switch (opcode) {
case ASM_DATA_EVENT_READ_DONE:
audio_in_get_dsp_frames(audio, token, payload);
break;
case ASM_DATA_EVENT_WRITE_DONE:
atomic_inc(&audio->in_count);
wake_up(&audio->write_wait);
break;
case ASM_DATA_CMDRSP_EOS:
audio->eos_rsp = 1;
wake_up(&audio->read_wait);
break;
case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM:
break;
case ASM_STREAM_CMDRSP_GET_PP_PARAMS:
break;
case ASM_SESSION_EVENT_TX_OVERFLOW:
pr_err("%s:session id %d: ASM_SESSION_EVENT_TX_OVERFLOW\n",
__func__, audio->ac->session);
break;
default:
pr_debug("%s:session id %d: Ignore opcode[0x%x]\n", __func__,
audio->ac->session, opcode);
break;
}
spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
void audio_in_get_dsp_frames(/*struct q6audio_in *audio,*/void *aud,
uint32_t token, uint32_t *payload)
{
struct q6audio_in *audio = (struct q6audio_in *)aud;
uint32_t index;
index = token;
pr_debug("%s:session id %d: index=%d nr frames=%d offset[%d]\n",
__func__, audio->ac->session, token, payload[7],
payload[3]);
pr_debug("%s:session id %d: timemsw=%d lsw=%d\n", __func__,
audio->ac->session, payload[4], payload[5]);
pr_debug("%s:session id %d: uflags=0x%8x uid=0x%8x\n", __func__,
audio->ac->session, payload[6], payload[8]);
pr_debug("%s:session id %d: enc frame size=0x%8x\n", __func__,
audio->ac->session, payload[2]);
audio->out_frame_info[index][0] = payload[7];
audio->out_frame_info[index][1] = payload[3];
/* statistics of read */
atomic_add(payload[2], &audio->in_bytes);
atomic_add(payload[7], &audio->in_samples);
if (atomic_read(&audio->out_count) <= audio->str_cfg.buffer_count) {
atomic_inc(&audio->out_count);
wake_up(&audio->read_wait);
}
}
| gpl-2.0 |
rogersb11/android_kernel_samsung_smdk4412 | drivers/gpu/drm/drm_scatter.c | 6064 | 5255 | /**
* \file drm_scatter.c
* IOCTLs to manage scatter/gather memory
*
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
*
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "drmP.h"
#define DEBUG_SCATTER 0
static inline void *drm_vmalloc_dma(unsigned long size)
{
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
#else
return vmalloc_32(size);
#endif
}
void drm_sg_cleanup(struct drm_sg_mem * entry)
{
struct page *page;
int i;
for (i = 0; i < entry->pages; i++) {
page = entry->pagelist[i];
if (page)
ClearPageReserved(page);
}
vfree(entry->virtual);
kfree(entry->busaddr);
kfree(entry->pagelist);
kfree(entry);
}
#ifdef _LP64
# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
#else
# define ScatterHandle(x) (unsigned int)(x)
#endif
int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
{
struct drm_sg_mem *entry;
unsigned long pages, i, j;
DRM_DEBUG("\n");
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
if (dev->sg)
return -EINVAL;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
entry->pages = pages;
entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL);
if (!entry->pagelist) {
kfree(entry);
return -ENOMEM;
}
entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL);
if (!entry->busaddr) {
kfree(entry->pagelist);
kfree(entry);
return -ENOMEM;
}
entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
if (!entry->virtual) {
kfree(entry->busaddr);
kfree(entry->pagelist);
kfree(entry);
return -ENOMEM;
}
/* This also forces the mapping of COW pages, so our page list
* will be valid. Please don't remove it...
*/
memset(entry->virtual, 0, pages << PAGE_SHIFT);
entry->handle = ScatterHandle((unsigned long)entry->virtual);
DRM_DEBUG("handle = %08lx\n", entry->handle);
DRM_DEBUG("virtual = %p\n", entry->virtual);
for (i = (unsigned long)entry->virtual, j = 0; j < pages;
i += PAGE_SIZE, j++) {
entry->pagelist[j] = vmalloc_to_page((void *)i);
if (!entry->pagelist[j])
goto failed;
SetPageReserved(entry->pagelist[j]);
}
request->handle = entry->handle;
dev->sg = entry;
#if DEBUG_SCATTER
/* Verify that each page points to its virtual address, and vice
* versa.
*/
{
int error = 0;
for (i = 0; i < pages; i++) {
unsigned long *tmp;
tmp = page_address(entry->pagelist[i]);
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
*tmp = 0xcafebabe;
}
tmp = (unsigned long *)((u8 *) entry->virtual +
(PAGE_SIZE * i));
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
if (*tmp != 0xcafebabe && error == 0) {
error = 1;
DRM_ERROR("Scatter allocation error, "
"pagelist does not match "
"virtual mapping\n");
}
}
tmp = page_address(entry->pagelist[i]);
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
*tmp = 0;
}
}
if (error == 0)
DRM_ERROR("Scatter allocation matches pagelist\n");
}
#endif
return 0;
failed:
drm_sg_cleanup(entry);
return -ENOMEM;
}
int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
return drm_sg_alloc(dev, request);
}
int drm_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
entry = dev->sg;
dev->sg = NULL;
if (!entry || entry->handle != request->handle)
return -EINVAL;
DRM_DEBUG("virtual = %p\n", entry->virtual);
drm_sg_cleanup(entry);
return 0;
}
| gpl-2.0 |
gundal/zerofltetmo | arch/frv/kernel/irq-mb93091.c | 7344 | 3687 | /* irq-mb93091.c: MB93091 FPGA interrupt handling
*
* Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/bitops.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/irc-regs.h>
#define __reg16(ADDR) (*(volatile unsigned short *)(ADDR))
#define __get_IMR() ({ __reg16(0xffc00004); })
#define __set_IMR(M) do { __reg16(0xffc00004) = (M); wmb(); } while(0)
#define __get_IFR() ({ __reg16(0xffc0000c); })
#define __clr_IFR(M) do { __reg16(0xffc0000c) = ~(M); wmb(); } while(0)
/*
* on-motherboard FPGA PIC operations
*/
static void frv_fpga_mask(struct irq_data *d)
{
uint16_t imr = __get_IMR();
imr |= 1 << (d->irq - IRQ_BASE_FPGA);
__set_IMR(imr);
}
static void frv_fpga_ack(struct irq_data *d)
{
__clr_IFR(1 << (d->irq - IRQ_BASE_FPGA));
}
static void frv_fpga_mask_ack(struct irq_data *d)
{
uint16_t imr = __get_IMR();
imr |= 1 << (d->irq - IRQ_BASE_FPGA);
__set_IMR(imr);
__clr_IFR(1 << (d->irq - IRQ_BASE_FPGA));
}
static void frv_fpga_unmask(struct irq_data *d)
{
uint16_t imr = __get_IMR();
imr &= ~(1 << (d->irq - IRQ_BASE_FPGA));
__set_IMR(imr);
}
static struct irq_chip frv_fpga_pic = {
.name = "mb93091",
.irq_ack = frv_fpga_ack,
.irq_mask = frv_fpga_mask,
.irq_mask_ack = frv_fpga_mask_ack,
.irq_unmask = frv_fpga_unmask,
};
/*
* FPGA PIC interrupt handler
*/
static irqreturn_t fpga_interrupt(int irq, void *_mask)
{
uint16_t imr, mask = (unsigned long) _mask;
imr = __get_IMR();
mask = mask & ~imr & __get_IFR();
/* poll all the triggered IRQs */
while (mask) {
int irq;
asm("scan %1,gr0,%0" : "=r"(irq) : "r"(mask));
irq = 31 - irq;
mask &= ~(1 << irq);
generic_handle_irq(IRQ_BASE_FPGA + irq);
}
return IRQ_HANDLED;
}
/*
* define an interrupt action for each FPGA PIC output
* - use dev_id to indicate the FPGA PIC input to output mappings
*/
static struct irqaction fpga_irq[4] = {
[0] = {
.handler = fpga_interrupt,
.flags = IRQF_DISABLED | IRQF_SHARED,
.name = "fpga.0",
.dev_id = (void *) 0x0028UL,
},
[1] = {
.handler = fpga_interrupt,
.flags = IRQF_DISABLED | IRQF_SHARED,
.name = "fpga.1",
.dev_id = (void *) 0x0050UL,
},
[2] = {
.handler = fpga_interrupt,
.flags = IRQF_DISABLED | IRQF_SHARED,
.name = "fpga.2",
.dev_id = (void *) 0x1c00UL,
},
[3] = {
.handler = fpga_interrupt,
.flags = IRQF_DISABLED | IRQF_SHARED,
.name = "fpga.3",
.dev_id = (void *) 0x6386UL,
}
};
/*
* initialise the motherboard FPGA's PIC
*/
void __init fpga_init(void)
{
int irq;
/* all PIC inputs are all set to be low-level driven, apart from the
* NMI button (15) which is fixed at falling-edge
*/
__set_IMR(0x7ffe);
__clr_IFR(0x0000);
for (irq = IRQ_BASE_FPGA + 1; irq <= IRQ_BASE_FPGA + 14; irq++)
irq_set_chip_and_handler(irq, &frv_fpga_pic, handle_level_irq);
irq_set_chip_and_handler(IRQ_FPGA_NMI, &frv_fpga_pic, handle_edge_irq);
/* the FPGA drives the first four external IRQ inputs on the CPU PIC */
setup_irq(IRQ_CPU_EXTERNAL0, &fpga_irq[0]);
setup_irq(IRQ_CPU_EXTERNAL1, &fpga_irq[1]);
setup_irq(IRQ_CPU_EXTERNAL2, &fpga_irq[2]);
setup_irq(IRQ_CPU_EXTERNAL3, &fpga_irq[3]);
}
| gpl-2.0 |
warped-rudi/linux-linaro-stable-mx6 | arch/um/kernel/physmem.c | 7600 | 5479 | /*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <linux/module.h>
#include <linux/bootmem.h>
#include <linux/mm.h>
#include <linux/pfn.h>
#include <asm/page.h>
#include <as-layout.h>
#include <init.h>
#include <kern.h>
#include <mem_user.h>
#include <os.h>
static int physmem_fd = -1;
/* Changed during early boot */
unsigned long high_physmem;
EXPORT_SYMBOL(high_physmem);
extern unsigned long long physmem_size;
int __init init_maps(unsigned long physmem, unsigned long iomem,
unsigned long highmem)
{
struct page *p, *map;
unsigned long phys_len, phys_pages, highmem_len, highmem_pages;
unsigned long iomem_len, iomem_pages, total_len, total_pages;
int i;
phys_pages = physmem >> PAGE_SHIFT;
phys_len = phys_pages * sizeof(struct page);
iomem_pages = iomem >> PAGE_SHIFT;
iomem_len = iomem_pages * sizeof(struct page);
highmem_pages = highmem >> PAGE_SHIFT;
highmem_len = highmem_pages * sizeof(struct page);
total_pages = phys_pages + iomem_pages + highmem_pages;
total_len = phys_len + iomem_len + highmem_len;
map = alloc_bootmem_low_pages(total_len);
if (map == NULL)
return -ENOMEM;
for (i = 0; i < total_pages; i++) {
p = &map[i];
memset(p, 0, sizeof(struct page));
SetPageReserved(p);
INIT_LIST_HEAD(&p->lru);
}
max_mapnr = total_pages;
return 0;
}
void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
int r, int w, int x)
{
__u64 offset;
int fd, err;
fd = phys_mapping(phys, &offset);
err = os_map_memory((void *) virt, fd, offset, len, r, w, x);
if (err) {
if (err == -ENOMEM)
printk(KERN_ERR "try increasing the host's "
"/proc/sys/vm/max_map_count to <physical "
"memory size>/4096\n");
panic("map_memory(0x%lx, %d, 0x%llx, %ld, %d, %d, %d) failed, "
"err = %d\n", virt, fd, offset, len, r, w, x, err);
}
}
extern int __syscall_stub_start;
void __init setup_physmem(unsigned long start, unsigned long reserve_end,
unsigned long len, unsigned long long highmem)
{
unsigned long reserve = reserve_end - start;
int pfn = PFN_UP(__pa(reserve_end));
int delta = (len - reserve) >> PAGE_SHIFT;
int err, offset, bootmap_size;
physmem_fd = create_mem_file(len + highmem);
offset = uml_reserved - uml_physmem;
err = os_map_memory((void *) uml_reserved, physmem_fd, offset,
len - offset, 1, 1, 1);
if (err < 0) {
printf("setup_physmem - mapping %ld bytes of memory at 0x%p "
"failed - errno = %d\n", len - offset,
(void *) uml_reserved, err);
exit(1);
}
/*
* Special kludge - This page will be mapped in to userspace processes
* from physmem_fd, so it needs to be written out there.
*/
os_seek_file(physmem_fd, __pa(&__syscall_stub_start));
os_write_file(physmem_fd, &__syscall_stub_start, PAGE_SIZE);
bootmap_size = init_bootmem(pfn, pfn + delta);
free_bootmem(__pa(reserve_end) + bootmap_size,
len - bootmap_size - reserve);
}
int phys_mapping(unsigned long phys, unsigned long long *offset_out)
{
int fd = -1;
if (phys < physmem_size) {
fd = physmem_fd;
*offset_out = phys;
}
else if (phys < __pa(end_iomem)) {
struct iomem_region *region = iomem_regions;
while (region != NULL) {
if ((phys >= region->phys) &&
(phys < region->phys + region->size)) {
fd = region->fd;
*offset_out = phys - region->phys;
break;
}
region = region->next;
}
}
else if (phys < __pa(end_iomem) + highmem) {
fd = physmem_fd;
*offset_out = phys - iomem_size;
}
return fd;
}
static int __init uml_mem_setup(char *line, int *add)
{
char *retptr;
physmem_size = memparse(line,&retptr);
return 0;
}
__uml_setup("mem=", uml_mem_setup,
"mem=<Amount of desired ram>\n"
" This controls how much \"physical\" memory the kernel allocates\n"
" for the system. The size is specified as a number followed by\n"
" one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
" This is not related to the amount of memory in the host. It can\n"
" be more, and the excess, if it's ever used, will just be swapped out.\n"
" Example: mem=64M\n\n"
);
extern int __init parse_iomem(char *str, int *add);
__uml_setup("iomem=", parse_iomem,
"iomem=<name>,<file>\n"
" Configure <file> as an IO memory region named <name>.\n\n"
);
/*
* This list is constructed in parse_iomem and addresses filled in in
* setup_iomem, both of which run during early boot. Afterwards, it's
* unchanged.
*/
struct iomem_region *iomem_regions;
/* Initialized in parse_iomem and unchanged thereafter */
int iomem_size;
unsigned long find_iomem(char *driver, unsigned long *len_out)
{
struct iomem_region *region = iomem_regions;
while (region != NULL) {
if (!strcmp(region->driver, driver)) {
*len_out = region->size;
return region->virt;
}
region = region->next;
}
return 0;
}
EXPORT_SYMBOL(find_iomem);
static int setup_iomem(void)
{
struct iomem_region *region = iomem_regions;
unsigned long iomem_start = high_physmem + PAGE_SIZE;
int err;
while (region != NULL) {
err = os_map_memory((void *) iomem_start, region->fd, 0,
region->size, 1, 1, 0);
if (err)
printk(KERN_ERR "Mapping iomem region for driver '%s' "
"failed, errno = %d\n", region->driver, -err);
else {
region->virt = iomem_start;
region->phys = __pa(region->virt);
}
iomem_start += region->size + PAGE_SIZE;
region = region->next;
}
return 0;
}
__initcall(setup_iomem);
| gpl-2.0 |
sudosurootdev/kernel_lge_g3-1 | drivers/acpi/custom_method.c | 8112 | 2074 | /*
* debugfs.c - ACPI debugfs interface to userspace.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <acpi/acpi_drivers.h>
#include "internal.h"
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("custom_method");
MODULE_LICENSE("GPL");
static struct dentry *cm_dentry;
/* /sys/kernel/debug/acpi/custom_method */
static ssize_t cm_write(struct file *file, const char __user * user_buf,
size_t count, loff_t *ppos)
{
static char *buf;
static u32 max_size;
static u32 uncopied_bytes;
struct acpi_table_header table;
acpi_status status;
if (!(*ppos)) {
/* parse the table header to get the table length */
if (count <= sizeof(struct acpi_table_header))
return -EINVAL;
if (copy_from_user(&table, user_buf,
sizeof(struct acpi_table_header)))
return -EFAULT;
uncopied_bytes = max_size = table.length;
buf = kzalloc(max_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
}
if (buf == NULL)
return -EINVAL;
if ((*ppos > max_size) ||
(*ppos + count > max_size) ||
(*ppos + count < count) ||
(count > uncopied_bytes))
return -EINVAL;
if (copy_from_user(buf + (*ppos), user_buf, count)) {
kfree(buf);
buf = NULL;
return -EFAULT;
}
uncopied_bytes -= count;
*ppos += count;
if (!uncopied_bytes) {
status = acpi_install_method(buf);
kfree(buf);
buf = NULL;
if (ACPI_FAILURE(status))
return -EINVAL;
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
}
return count;
}
static const struct file_operations cm_fops = {
.write = cm_write,
.llseek = default_llseek,
};
static int __init acpi_custom_method_init(void)
{
if (acpi_debugfs_dir == NULL)
return -ENOENT;
cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
acpi_debugfs_dir, NULL, &cm_fops);
if (cm_dentry == NULL)
return -ENODEV;
return 0;
}
static void __exit acpi_custom_method_exit(void)
{
if (cm_dentry)
debugfs_remove(cm_dentry);
}
module_init(acpi_custom_method_init);
module_exit(acpi_custom_method_exit);
| gpl-2.0 |
cmotc/android_kernel_samsung_schS738c | drivers/leds/ledtrig-gpio.c | 8112 | 6209 | /*
* ledtrig-gio.c - LED Trigger Based on GPIO events
*
* Copyright 2009 Felipe Balbi <me@felipebalbi.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include "leds.h"
struct gpio_trig_data {
struct led_classdev *led;
struct work_struct work;
unsigned desired_brightness; /* desired brightness when led is on */
unsigned inverted; /* true when gpio is inverted */
unsigned gpio; /* gpio that triggers the leds */
};
static irqreturn_t gpio_trig_irq(int irq, void *_led)
{
struct led_classdev *led = _led;
struct gpio_trig_data *gpio_data = led->trigger_data;
/* just schedule_work since gpio_get_value can sleep */
schedule_work(&gpio_data->work);
return IRQ_HANDLED;
};
static void gpio_trig_work(struct work_struct *work)
{
struct gpio_trig_data *gpio_data = container_of(work,
struct gpio_trig_data, work);
int tmp;
if (!gpio_data->gpio)
return;
tmp = gpio_get_value(gpio_data->gpio);
if (gpio_data->inverted)
tmp = !tmp;
if (tmp) {
if (gpio_data->desired_brightness)
led_set_brightness(gpio_data->led,
gpio_data->desired_brightness);
else
led_set_brightness(gpio_data->led, LED_FULL);
} else {
led_set_brightness(gpio_data->led, LED_OFF);
}
}
static ssize_t gpio_trig_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led = dev_get_drvdata(dev);
struct gpio_trig_data *gpio_data = led->trigger_data;
return sprintf(buf, "%u\n", gpio_data->desired_brightness);
}
static ssize_t gpio_trig_brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct led_classdev *led = dev_get_drvdata(dev);
struct gpio_trig_data *gpio_data = led->trigger_data;
unsigned desired_brightness;
int ret;
ret = sscanf(buf, "%u", &desired_brightness);
if (ret < 1 || desired_brightness > 255) {
dev_err(dev, "invalid value\n");
return -EINVAL;
}
gpio_data->desired_brightness = desired_brightness;
return n;
}
static DEVICE_ATTR(desired_brightness, 0644, gpio_trig_brightness_show,
gpio_trig_brightness_store);
static ssize_t gpio_trig_inverted_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led = dev_get_drvdata(dev);
struct gpio_trig_data *gpio_data = led->trigger_data;
return sprintf(buf, "%u\n", gpio_data->inverted);
}
static ssize_t gpio_trig_inverted_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct led_classdev *led = dev_get_drvdata(dev);
struct gpio_trig_data *gpio_data = led->trigger_data;
unsigned long inverted;
int ret;
ret = strict_strtoul(buf, 10, &inverted);
if (ret < 0)
return ret;
if (inverted > 1)
return -EINVAL;
gpio_data->inverted = inverted;
/* After inverting, we need to update the LED. */
schedule_work(&gpio_data->work);
return n;
}
static DEVICE_ATTR(inverted, 0644, gpio_trig_inverted_show,
gpio_trig_inverted_store);
static ssize_t gpio_trig_gpio_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led = dev_get_drvdata(dev);
struct gpio_trig_data *gpio_data = led->trigger_data;
return sprintf(buf, "%u\n", gpio_data->gpio);
}
static ssize_t gpio_trig_gpio_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct led_classdev *led = dev_get_drvdata(dev);
struct gpio_trig_data *gpio_data = led->trigger_data;
unsigned gpio;
int ret;
ret = sscanf(buf, "%u", &gpio);
if (ret < 1) {
dev_err(dev, "couldn't read gpio number\n");
flush_work(&gpio_data->work);
return -EINVAL;
}
if (gpio_data->gpio == gpio)
return n;
if (!gpio) {
if (gpio_data->gpio != 0)
free_irq(gpio_to_irq(gpio_data->gpio), led);
gpio_data->gpio = 0;
return n;
}
ret = request_irq(gpio_to_irq(gpio), gpio_trig_irq,
IRQF_SHARED | IRQF_TRIGGER_RISING
| IRQF_TRIGGER_FALLING, "ledtrig-gpio", led);
if (ret) {
dev_err(dev, "request_irq failed with error %d\n", ret);
} else {
if (gpio_data->gpio != 0)
free_irq(gpio_to_irq(gpio_data->gpio), led);
gpio_data->gpio = gpio;
}
return ret ? ret : n;
}
static DEVICE_ATTR(gpio, 0644, gpio_trig_gpio_show, gpio_trig_gpio_store);
static void gpio_trig_activate(struct led_classdev *led)
{
struct gpio_trig_data *gpio_data;
int ret;
gpio_data = kzalloc(sizeof(*gpio_data), GFP_KERNEL);
if (!gpio_data)
return;
ret = device_create_file(led->dev, &dev_attr_gpio);
if (ret)
goto err_gpio;
ret = device_create_file(led->dev, &dev_attr_inverted);
if (ret)
goto err_inverted;
ret = device_create_file(led->dev, &dev_attr_desired_brightness);
if (ret)
goto err_brightness;
gpio_data->led = led;
led->trigger_data = gpio_data;
INIT_WORK(&gpio_data->work, gpio_trig_work);
return;
err_brightness:
device_remove_file(led->dev, &dev_attr_inverted);
err_inverted:
device_remove_file(led->dev, &dev_attr_gpio);
err_gpio:
kfree(gpio_data);
}
static void gpio_trig_deactivate(struct led_classdev *led)
{
struct gpio_trig_data *gpio_data = led->trigger_data;
if (gpio_data) {
device_remove_file(led->dev, &dev_attr_gpio);
device_remove_file(led->dev, &dev_attr_inverted);
device_remove_file(led->dev, &dev_attr_desired_brightness);
flush_work(&gpio_data->work);
if (gpio_data->gpio != 0)
free_irq(gpio_to_irq(gpio_data->gpio), led);
kfree(gpio_data);
}
}
static struct led_trigger gpio_led_trigger = {
.name = "gpio",
.activate = gpio_trig_activate,
.deactivate = gpio_trig_deactivate,
};
static int __init gpio_trig_init(void)
{
return led_trigger_register(&gpio_led_trigger);
}
module_init(gpio_trig_init);
static void __exit gpio_trig_exit(void)
{
led_trigger_unregister(&gpio_led_trigger);
}
module_exit(gpio_trig_exit);
MODULE_AUTHOR("Felipe Balbi <me@felipebalbi.com>");
MODULE_DESCRIPTION("GPIO LED trigger");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Kurre/kernel_exynos_OLD | arch/mips/mti-malta/malta-cmdline.c | 9648 | 1571 | /*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Kernel command line creation using the prom monitor (YAMON) argc/argv.
*/
#include <linux/init.h>
#include <linux/string.h>
#include <asm/bootinfo.h>
extern int prom_argc;
extern int *_prom_argv;
/*
* YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
* This macro take care of sign extension.
*/
#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
char * __init prom_getcmdline(void)
{
return &(arcs_cmdline[0]);
}
void __init prom_init_cmdline(void)
{
char *cp;
int actr;
actr = 1; /* Always ignore argv[0] */
cp = &(arcs_cmdline[0]);
while(actr < prom_argc) {
strcpy(cp, prom_argv(actr));
cp += strlen(prom_argv(actr));
*cp++ = ' ';
actr++;
}
if (cp != &(arcs_cmdline[0])) {
/* get rid of trailing space */
--cp;
*cp = '\0';
}
}
| gpl-2.0 |
aranb/linux | drivers/infiniband/hw/qib/qib_verbs_mcast.c | 12208 | 8560 | /*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/rculist.h>
#include "qib.h"
/**
* qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
* @qp: the QP to link
*/
static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
{
struct qib_mcast_qp *mqp;
mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
if (!mqp)
goto bail;
mqp->qp = qp;
atomic_inc(&qp->refcount);
bail:
return mqp;
}
static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
{
struct qib_qp *qp = mqp->qp;
/* Notify qib_destroy_qp() if it is waiting. */
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
kfree(mqp);
}
/**
* qib_mcast_alloc - allocate the multicast GID structure
* @mgid: the multicast GID
*
* A list of QPs will be attached to this structure.
*/
static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
{
struct qib_mcast *mcast;
mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
if (!mcast)
goto bail;
mcast->mgid = *mgid;
INIT_LIST_HEAD(&mcast->qp_list);
init_waitqueue_head(&mcast->wait);
atomic_set(&mcast->refcount, 0);
mcast->n_attached = 0;
bail:
return mcast;
}
static void qib_mcast_free(struct qib_mcast *mcast)
{
struct qib_mcast_qp *p, *tmp;
list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
qib_mcast_qp_free(p);
kfree(mcast);
}
/**
* qib_mcast_find - search the global table for the given multicast GID
* @ibp: the IB port structure
* @mgid: the multicast GID to search for
*
* Returns NULL if not found.
*
* The caller is responsible for decrementing the reference count if found.
*/
struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
{
struct rb_node *n;
unsigned long flags;
struct qib_mcast *mcast;
spin_lock_irqsave(&ibp->lock, flags);
n = ibp->mcast_tree.rb_node;
while (n) {
int ret;
mcast = rb_entry(n, struct qib_mcast, rb_node);
ret = memcmp(mgid->raw, mcast->mgid.raw,
sizeof(union ib_gid));
if (ret < 0)
n = n->rb_left;
else if (ret > 0)
n = n->rb_right;
else {
atomic_inc(&mcast->refcount);
spin_unlock_irqrestore(&ibp->lock, flags);
goto bail;
}
}
spin_unlock_irqrestore(&ibp->lock, flags);
mcast = NULL;
bail:
return mcast;
}
/**
* qib_mcast_add - insert mcast GID into table and attach QP struct
* @mcast: the mcast GID table
* @mqp: the QP to attach
*
* Return zero if both were added. Return EEXIST if the GID was already in
* the table but the QP was added. Return ESRCH if the QP was already
* attached and neither structure was added.
*/
static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
{
struct rb_node **n = &ibp->mcast_tree.rb_node;
struct rb_node *pn = NULL;
int ret;
spin_lock_irq(&ibp->lock);
while (*n) {
struct qib_mcast *tmcast;
struct qib_mcast_qp *p;
pn = *n;
tmcast = rb_entry(pn, struct qib_mcast, rb_node);
ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
sizeof(union ib_gid));
if (ret < 0) {
n = &pn->rb_left;
continue;
}
if (ret > 0) {
n = &pn->rb_right;
continue;
}
/* Search the QP list to see if this is already there. */
list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
if (p->qp == mqp->qp) {
ret = ESRCH;
goto bail;
}
}
if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
ret = ENOMEM;
goto bail;
}
tmcast->n_attached++;
list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
ret = EEXIST;
goto bail;
}
spin_lock(&dev->n_mcast_grps_lock);
if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
spin_unlock(&dev->n_mcast_grps_lock);
ret = ENOMEM;
goto bail;
}
dev->n_mcast_grps_allocated++;
spin_unlock(&dev->n_mcast_grps_lock);
mcast->n_attached++;
list_add_tail_rcu(&mqp->list, &mcast->qp_list);
atomic_inc(&mcast->refcount);
rb_link_node(&mcast->rb_node, pn, n);
rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
ret = 0;
bail:
spin_unlock_irq(&ibp->lock);
return ret;
}
int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct qib_qp *qp = to_iqp(ibqp);
struct qib_ibdev *dev = to_idev(ibqp->device);
struct qib_ibport *ibp;
struct qib_mcast *mcast;
struct qib_mcast_qp *mqp;
int ret;
if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
ret = -EINVAL;
goto bail;
}
/*
* Allocate data structures since its better to do this outside of
* spin locks and it will most likely be needed.
*/
mcast = qib_mcast_alloc(gid);
if (mcast == NULL) {
ret = -ENOMEM;
goto bail;
}
mqp = qib_mcast_qp_alloc(qp);
if (mqp == NULL) {
qib_mcast_free(mcast);
ret = -ENOMEM;
goto bail;
}
ibp = to_iport(ibqp->device, qp->port_num);
switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
case ESRCH:
/* Neither was used: OK to attach the same QP twice. */
qib_mcast_qp_free(mqp);
qib_mcast_free(mcast);
break;
case EEXIST: /* The mcast wasn't used */
qib_mcast_free(mcast);
break;
case ENOMEM:
/* Exceeded the maximum number of mcast groups. */
qib_mcast_qp_free(mqp);
qib_mcast_free(mcast);
ret = -ENOMEM;
goto bail;
default:
break;
}
ret = 0;
bail:
return ret;
}
int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct qib_qp *qp = to_iqp(ibqp);
struct qib_ibdev *dev = to_idev(ibqp->device);
struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
struct qib_mcast *mcast = NULL;
struct qib_mcast_qp *p, *tmp;
struct rb_node *n;
int last = 0;
int ret;
if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
ret = -EINVAL;
goto bail;
}
spin_lock_irq(&ibp->lock);
/* Find the GID in the mcast table. */
n = ibp->mcast_tree.rb_node;
while (1) {
if (n == NULL) {
spin_unlock_irq(&ibp->lock);
ret = -EINVAL;
goto bail;
}
mcast = rb_entry(n, struct qib_mcast, rb_node);
ret = memcmp(gid->raw, mcast->mgid.raw,
sizeof(union ib_gid));
if (ret < 0)
n = n->rb_left;
else if (ret > 0)
n = n->rb_right;
else
break;
}
/* Search the QP list. */
list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
if (p->qp != qp)
continue;
/*
* We found it, so remove it, but don't poison the forward
* link until we are sure there are no list walkers.
*/
list_del_rcu(&p->list);
mcast->n_attached--;
/* If this was the last attached QP, remove the GID too. */
if (list_empty(&mcast->qp_list)) {
rb_erase(&mcast->rb_node, &ibp->mcast_tree);
last = 1;
}
break;
}
spin_unlock_irq(&ibp->lock);
if (p) {
/*
* Wait for any list walkers to finish before freeing the
* list element.
*/
wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
qib_mcast_qp_free(p);
}
if (last) {
atomic_dec(&mcast->refcount);
wait_event(mcast->wait, !atomic_read(&mcast->refcount));
qib_mcast_free(mcast);
spin_lock_irq(&dev->n_mcast_grps_lock);
dev->n_mcast_grps_allocated--;
spin_unlock_irq(&dev->n_mcast_grps_lock);
}
ret = 0;
bail:
return ret;
}
int qib_mcast_tree_empty(struct qib_ibport *ibp)
{
return ibp->mcast_tree.rb_node == NULL;
}
| gpl-2.0 |
sonicxml/furry-octo-lana | drivers/infiniband/hw/qib/qib_verbs_mcast.c | 12208 | 8560 | /*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/rculist.h>
#include "qib.h"
/**
* qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
* @qp: the QP to link
*/
static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
{
struct qib_mcast_qp *mqp;
mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
if (!mqp)
goto bail;
mqp->qp = qp;
atomic_inc(&qp->refcount);
bail:
return mqp;
}
static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
{
struct qib_qp *qp = mqp->qp;
/* Notify qib_destroy_qp() if it is waiting. */
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
kfree(mqp);
}
/**
* qib_mcast_alloc - allocate the multicast GID structure
* @mgid: the multicast GID
*
* A list of QPs will be attached to this structure.
*/
static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
{
struct qib_mcast *mcast;
mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
if (!mcast)
goto bail;
mcast->mgid = *mgid;
INIT_LIST_HEAD(&mcast->qp_list);
init_waitqueue_head(&mcast->wait);
atomic_set(&mcast->refcount, 0);
mcast->n_attached = 0;
bail:
return mcast;
}
static void qib_mcast_free(struct qib_mcast *mcast)
{
struct qib_mcast_qp *p, *tmp;
list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
qib_mcast_qp_free(p);
kfree(mcast);
}
/**
* qib_mcast_find - search the global table for the given multicast GID
* @ibp: the IB port structure
* @mgid: the multicast GID to search for
*
* Returns NULL if not found.
*
* The caller is responsible for decrementing the reference count if found.
*/
struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
{
struct rb_node *n;
unsigned long flags;
struct qib_mcast *mcast;
spin_lock_irqsave(&ibp->lock, flags);
n = ibp->mcast_tree.rb_node;
while (n) {
int ret;
mcast = rb_entry(n, struct qib_mcast, rb_node);
ret = memcmp(mgid->raw, mcast->mgid.raw,
sizeof(union ib_gid));
if (ret < 0)
n = n->rb_left;
else if (ret > 0)
n = n->rb_right;
else {
atomic_inc(&mcast->refcount);
spin_unlock_irqrestore(&ibp->lock, flags);
goto bail;
}
}
spin_unlock_irqrestore(&ibp->lock, flags);
mcast = NULL;
bail:
return mcast;
}
/**
* qib_mcast_add - insert mcast GID into table and attach QP struct
* @mcast: the mcast GID table
* @mqp: the QP to attach
*
* Return zero if both were added. Return EEXIST if the GID was already in
* the table but the QP was added. Return ESRCH if the QP was already
* attached and neither structure was added.
*/
static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
{
struct rb_node **n = &ibp->mcast_tree.rb_node;
struct rb_node *pn = NULL;
int ret;
spin_lock_irq(&ibp->lock);
while (*n) {
struct qib_mcast *tmcast;
struct qib_mcast_qp *p;
pn = *n;
tmcast = rb_entry(pn, struct qib_mcast, rb_node);
ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
sizeof(union ib_gid));
if (ret < 0) {
n = &pn->rb_left;
continue;
}
if (ret > 0) {
n = &pn->rb_right;
continue;
}
/* Search the QP list to see if this is already there. */
list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
if (p->qp == mqp->qp) {
ret = ESRCH;
goto bail;
}
}
if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
ret = ENOMEM;
goto bail;
}
tmcast->n_attached++;
list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
ret = EEXIST;
goto bail;
}
spin_lock(&dev->n_mcast_grps_lock);
if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
spin_unlock(&dev->n_mcast_grps_lock);
ret = ENOMEM;
goto bail;
}
dev->n_mcast_grps_allocated++;
spin_unlock(&dev->n_mcast_grps_lock);
mcast->n_attached++;
list_add_tail_rcu(&mqp->list, &mcast->qp_list);
atomic_inc(&mcast->refcount);
rb_link_node(&mcast->rb_node, pn, n);
rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
ret = 0;
bail:
spin_unlock_irq(&ibp->lock);
return ret;
}
int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct qib_qp *qp = to_iqp(ibqp);
struct qib_ibdev *dev = to_idev(ibqp->device);
struct qib_ibport *ibp;
struct qib_mcast *mcast;
struct qib_mcast_qp *mqp;
int ret;
if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
ret = -EINVAL;
goto bail;
}
/*
* Allocate data structures since its better to do this outside of
* spin locks and it will most likely be needed.
*/
mcast = qib_mcast_alloc(gid);
if (mcast == NULL) {
ret = -ENOMEM;
goto bail;
}
mqp = qib_mcast_qp_alloc(qp);
if (mqp == NULL) {
qib_mcast_free(mcast);
ret = -ENOMEM;
goto bail;
}
ibp = to_iport(ibqp->device, qp->port_num);
switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
case ESRCH:
/* Neither was used: OK to attach the same QP twice. */
qib_mcast_qp_free(mqp);
qib_mcast_free(mcast);
break;
case EEXIST: /* The mcast wasn't used */
qib_mcast_free(mcast);
break;
case ENOMEM:
/* Exceeded the maximum number of mcast groups. */
qib_mcast_qp_free(mqp);
qib_mcast_free(mcast);
ret = -ENOMEM;
goto bail;
default:
break;
}
ret = 0;
bail:
return ret;
}
int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct qib_qp *qp = to_iqp(ibqp);
struct qib_ibdev *dev = to_idev(ibqp->device);
struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
struct qib_mcast *mcast = NULL;
struct qib_mcast_qp *p, *tmp;
struct rb_node *n;
int last = 0;
int ret;
if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
ret = -EINVAL;
goto bail;
}
spin_lock_irq(&ibp->lock);
/* Find the GID in the mcast table. */
n = ibp->mcast_tree.rb_node;
while (1) {
if (n == NULL) {
spin_unlock_irq(&ibp->lock);
ret = -EINVAL;
goto bail;
}
mcast = rb_entry(n, struct qib_mcast, rb_node);
ret = memcmp(gid->raw, mcast->mgid.raw,
sizeof(union ib_gid));
if (ret < 0)
n = n->rb_left;
else if (ret > 0)
n = n->rb_right;
else
break;
}
/* Search the QP list. */
list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
if (p->qp != qp)
continue;
/*
* We found it, so remove it, but don't poison the forward
* link until we are sure there are no list walkers.
*/
list_del_rcu(&p->list);
mcast->n_attached--;
/* If this was the last attached QP, remove the GID too. */
if (list_empty(&mcast->qp_list)) {
rb_erase(&mcast->rb_node, &ibp->mcast_tree);
last = 1;
}
break;
}
spin_unlock_irq(&ibp->lock);
if (p) {
/*
* Wait for any list walkers to finish before freeing the
* list element.
*/
wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
qib_mcast_qp_free(p);
}
if (last) {
atomic_dec(&mcast->refcount);
wait_event(mcast->wait, !atomic_read(&mcast->refcount));
qib_mcast_free(mcast);
spin_lock_irq(&dev->n_mcast_grps_lock);
dev->n_mcast_grps_allocated--;
spin_unlock_irq(&dev->n_mcast_grps_lock);
}
ret = 0;
bail:
return ret;
}
int qib_mcast_tree_empty(struct qib_ibport *ibp)
{
return ibp->mcast_tree.rb_node == NULL;
}
| gpl-2.0 |
JijonHyuni/HyperKernel-JB | drivers/parport/probe.c | 13744 | 7589 | /*
* Parallel port device probing code
*
* Authors: Carsten Gross, carsten@sol.wohnheim.uni-ulm.de
* Philip Blundell <philb@gnu.org>
*/
#include <linux/module.h>
#include <linux/parport.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
static const struct {
const char *token;
const char *descr;
} classes[] = {
{ "", "Legacy device" },
{ "PRINTER", "Printer" },
{ "MODEM", "Modem" },
{ "NET", "Network device" },
{ "HDC", "Hard disk" },
{ "PCMCIA", "PCMCIA" },
{ "MEDIA", "Multimedia device" },
{ "FDC", "Floppy disk" },
{ "PORTS", "Ports" },
{ "SCANNER", "Scanner" },
{ "DIGICAM", "Digital camera" },
{ "", "Unknown device" },
{ "", "Unspecified" },
{ "SCSIADAPTER", "SCSI adapter" },
{ NULL, NULL }
};
static void pretty_print(struct parport *port, int device)
{
struct parport_device_info *info = &port->probe_info[device + 1];
printk(KERN_INFO "%s", port->name);
if (device >= 0)
printk (" (addr %d)", device);
printk (": %s", classes[info->class].descr);
if (info->class)
printk(", %s %s", info->mfr, info->model);
printk("\n");
}
static void parse_data(struct parport *port, int device, char *str)
{
char *txt = kmalloc(strlen(str)+1, GFP_KERNEL);
char *p = txt, *q;
int guessed_class = PARPORT_CLASS_UNSPEC;
struct parport_device_info *info = &port->probe_info[device + 1];
if (!txt) {
printk(KERN_WARNING "%s probe: memory squeeze\n", port->name);
return;
}
strcpy(txt, str);
while (p) {
char *sep;
q = strchr(p, ';');
if (q) *q = 0;
sep = strchr(p, ':');
if (sep) {
char *u;
*(sep++) = 0;
/* Get rid of trailing blanks */
u = sep + strlen (sep) - 1;
while (u >= p && *u == ' ')
*u-- = '\0';
u = p;
while (*u) {
*u = toupper(*u);
u++;
}
if (!strcmp(p, "MFG") || !strcmp(p, "MANUFACTURER")) {
kfree(info->mfr);
info->mfr = kstrdup(sep, GFP_KERNEL);
} else if (!strcmp(p, "MDL") || !strcmp(p, "MODEL")) {
kfree(info->model);
info->model = kstrdup(sep, GFP_KERNEL);
} else if (!strcmp(p, "CLS") || !strcmp(p, "CLASS")) {
int i;
kfree(info->class_name);
info->class_name = kstrdup(sep, GFP_KERNEL);
for (u = sep; *u; u++)
*u = toupper(*u);
for (i = 0; classes[i].token; i++) {
if (!strcmp(classes[i].token, sep)) {
info->class = i;
goto rock_on;
}
}
printk(KERN_WARNING "%s probe: warning, class '%s' not understood.\n", port->name, sep);
info->class = PARPORT_CLASS_OTHER;
} else if (!strcmp(p, "CMD") ||
!strcmp(p, "COMMAND SET")) {
kfree(info->cmdset);
info->cmdset = kstrdup(sep, GFP_KERNEL);
/* if it speaks printer language, it's
probably a printer */
if (strstr(sep, "PJL") || strstr(sep, "PCL"))
guessed_class = PARPORT_CLASS_PRINTER;
} else if (!strcmp(p, "DES") || !strcmp(p, "DESCRIPTION")) {
kfree(info->description);
info->description = kstrdup(sep, GFP_KERNEL);
}
}
rock_on:
if (q)
p = q + 1;
else
p = NULL;
}
/* If the device didn't tell us its class, maybe we have managed to
guess one from the things it did say. */
if (info->class == PARPORT_CLASS_UNSPEC)
info->class = guessed_class;
pretty_print (port, device);
kfree(txt);
}
/* Read up to count-1 bytes of device id. Terminate buffer with
* '\0'. Buffer begins with two Device ID length bytes as given by
* device. */
static ssize_t parport_read_device_id (struct parport *port, char *buffer,
size_t count)
{
unsigned char length[2];
unsigned lelen, belen;
size_t idlens[4];
unsigned numidlens;
unsigned current_idlen;
ssize_t retval;
size_t len;
/* First two bytes are MSB,LSB of inclusive length. */
retval = parport_read (port, length, 2);
if (retval < 0)
return retval;
if (retval != 2)
return -EIO;
if (count < 2)
return 0;
memcpy(buffer, length, 2);
len = 2;
/* Some devices wrongly send LE length, and some send it two
* bytes short. Construct a sorted array of lengths to try. */
belen = (length[0] << 8) + length[1];
lelen = (length[1] << 8) + length[0];
idlens[0] = min(belen, lelen);
idlens[1] = idlens[0]+2;
if (belen != lelen) {
int off = 2;
/* Don't try lengths of 0x100 and 0x200 as 1 and 2 */
if (idlens[0] <= 2)
off = 0;
idlens[off] = max(belen, lelen);
idlens[off+1] = idlens[off]+2;
numidlens = off+2;
}
else {
/* Some devices don't truly implement Device ID, but
* just return constant nibble forever. This catches
* also those cases. */
if (idlens[0] == 0 || idlens[0] > 0xFFF) {
printk (KERN_DEBUG "%s: reported broken Device ID"
" length of %#zX bytes\n",
port->name, idlens[0]);
return -EIO;
}
numidlens = 2;
}
/* Try to respect the given ID length despite all the bugs in
* the ID length. Read according to shortest possible ID
* first. */
for (current_idlen = 0; current_idlen < numidlens; ++current_idlen) {
size_t idlen = idlens[current_idlen];
if (idlen+1 >= count)
break;
retval = parport_read (port, buffer+len, idlen-len);
if (retval < 0)
return retval;
len += retval;
if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) {
if (belen != len) {
printk (KERN_DEBUG "%s: Device ID was %zd bytes"
" while device told it would be %d"
" bytes\n",
port->name, len, belen);
}
goto done;
}
/* This might end reading the Device ID too
* soon. Hopefully the needed fields were already in
* the first 256 bytes or so that we must have read so
* far. */
if (buffer[len-1] == ';') {
printk (KERN_DEBUG "%s: Device ID reading stopped"
" before device told data not available. "
"Current idlen %u of %u, len bytes %02X %02X\n",
port->name, current_idlen, numidlens,
length[0], length[1]);
goto done;
}
}
if (current_idlen < numidlens) {
/* Buffer not large enough, read to end of buffer. */
size_t idlen, len2;
if (len+1 < count) {
retval = parport_read (port, buffer+len, count-len-1);
if (retval < 0)
return retval;
len += retval;
}
/* Read the whole ID since some devices would not
* otherwise give back the Device ID from beginning
* next time when asked. */
idlen = idlens[current_idlen];
len2 = len;
while(len2 < idlen && retval > 0) {
char tmp[4];
retval = parport_read (port, tmp,
min(sizeof tmp, idlen-len2));
if (retval < 0)
return retval;
len2 += retval;
}
}
/* In addition, there are broken devices out there that don't
even finish off with a semi-colon. We do not need to care
about those at this time. */
done:
buffer[len] = '\0';
return len;
}
/* Get Std 1284 Device ID. */
ssize_t parport_device_id (int devnum, char *buffer, size_t count)
{
ssize_t retval = -ENXIO;
struct pardevice *dev = parport_open (devnum, "Device ID probe");
if (!dev)
return -ENXIO;
parport_claim_or_block (dev);
/* Negotiate to compatibility mode, and then to device ID
* mode. (This so that we start form beginning of device ID if
* already in device ID mode.) */
parport_negotiate (dev->port, IEEE1284_MODE_COMPAT);
retval = parport_negotiate (dev->port,
IEEE1284_MODE_NIBBLE | IEEE1284_DEVICEID);
if (!retval) {
retval = parport_read_device_id (dev->port, buffer, count);
parport_negotiate (dev->port, IEEE1284_MODE_COMPAT);
if (retval > 2)
parse_data (dev->port, dev->daisy, buffer+2);
}
parport_release (dev);
parport_close (dev);
return retval;
}
| gpl-2.0 |
kzlin129/tt-gpl | go12/linux-2.6.28.10/arch/s390/kvm/intercept.c | 177 | 5390 | /*
* intercept.c - in-kernel handling for sie intercepts
*
* Copyright IBM Corp. 2008
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
*/
#include <linux/kvm_host.h>
#include <linux/errno.h>
#include <linux/pagemap.h>
#include <asm/kvm_host.h>
#include "kvm-s390.h"
#include "gaccess.h"
static int handle_lctlg(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
int base2 = vcpu->arch.sie_block->ipb >> 28;
int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
((vcpu->arch.sie_block->ipb & 0xff00) << 4);
u64 useraddr;
int reg, rc;
vcpu->stat.instruction_lctlg++;
if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
return -ENOTSUPP;
useraddr = disp2;
if (base2)
useraddr += vcpu->arch.guest_gprs[base2];
if (useraddr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
reg = reg1;
VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
disp2);
do {
rc = get_guest_u64(vcpu, useraddr,
&vcpu->arch.sie_block->gcr[reg]);
if (rc == -EFAULT) {
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
break;
}
useraddr += 8;
if (reg == reg3)
break;
reg = (reg + 1) % 16;
} while (1);
return 0;
}
static int handle_lctl(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
int base2 = vcpu->arch.sie_block->ipb >> 28;
int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u64 useraddr;
u32 val = 0;
int reg, rc;
vcpu->stat.instruction_lctl++;
useraddr = disp2;
if (base2)
useraddr += vcpu->arch.guest_gprs[base2];
if (useraddr & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
disp2);
reg = reg1;
do {
rc = get_guest_u32(vcpu, useraddr, &val);
if (rc == -EFAULT) {
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
break;
}
vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
vcpu->arch.sie_block->gcr[reg] |= val;
useraddr += 4;
if (reg == reg3)
break;
reg = (reg + 1) % 16;
} while (1);
return 0;
}
static intercept_handler_t instruction_handlers[256] = {
[0x83] = kvm_s390_handle_diag,
[0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_priv,
[0xb7] = handle_lctl,
[0xeb] = handle_lctlg,
};
static int handle_noop(struct kvm_vcpu *vcpu)
{
switch (vcpu->arch.sie_block->icptcode) {
case 0x0:
vcpu->stat.exit_null++;
break;
case 0x10:
vcpu->stat.exit_external_request++;
break;
case 0x14:
vcpu->stat.exit_external_interrupt++;
break;
default:
break; /* nothing */
}
return 0;
}
static int handle_stop(struct kvm_vcpu *vcpu)
{
int rc;
vcpu->stat.exit_stop_request++;
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
spin_lock_bh(&vcpu->arch.local_int.lock);
if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
rc = __kvm_s390_vcpu_store_status(vcpu,
KVM_S390_STORE_STATUS_NOADDR);
if (rc >= 0)
rc = -ENOTSUPP;
}
if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
rc = -ENOTSUPP;
} else
rc = 0;
spin_unlock_bh(&vcpu->arch.local_int.lock);
return rc;
}
static int handle_validity(struct kvm_vcpu *vcpu)
{
int viwhy = vcpu->arch.sie_block->ipb >> 16;
vcpu->stat.exit_validity++;
if (viwhy == 0x37) {
fault_in_pages_writeable((char __user *)
vcpu->kvm->arch.guest_origin +
vcpu->arch.sie_block->prefix,
PAGE_SIZE);
return 0;
}
VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
viwhy);
return -ENOTSUPP;
}
static int handle_instruction(struct kvm_vcpu *vcpu)
{
intercept_handler_t handler;
vcpu->stat.exit_instruction++;
handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
if (handler)
return handler(vcpu);
return -ENOTSUPP;
}
static int handle_prog(struct kvm_vcpu *vcpu)
{
vcpu->stat.exit_program_interruption++;
return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
}
static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
{
int rc, rc2;
vcpu->stat.exit_instr_and_program++;
rc = handle_instruction(vcpu);
rc2 = handle_prog(vcpu);
if (rc == -ENOTSUPP)
vcpu->arch.sie_block->icptcode = 0x04;
if (rc)
return rc;
return rc2;
}
static const intercept_handler_t intercept_funcs[0x48 >> 2] = {
[0x00 >> 2] = handle_noop,
[0x04 >> 2] = handle_instruction,
[0x08 >> 2] = handle_prog,
[0x0C >> 2] = handle_instruction_and_prog,
[0x10 >> 2] = handle_noop,
[0x14 >> 2] = handle_noop,
[0x1C >> 2] = kvm_s390_handle_wait,
[0x20 >> 2] = handle_validity,
[0x28 >> 2] = handle_stop,
};
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{
intercept_handler_t func;
u8 code = vcpu->arch.sie_block->icptcode;
if (code & 3 || code > 0x48)
return -ENOTSUPP;
func = intercept_funcs[code >> 2];
if (func)
return func(vcpu);
return -ENOTSUPP;
}
| gpl-2.0 |
f-andrew-beal/whoi-3.2-kernel | arch/arm/mach-mx5/clock-mx51-mx53.c | 177 | 44569 | /*
* Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright (C) 2009-2010 Amit Kucheria <amit.kucheria@canonical.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <linux/of.h>
#include <asm/div64.h>
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/clock.h>
#include "crm_regs.h"
/* External clock values passed-in by the board code */
static unsigned long external_high_reference, external_low_reference;
static unsigned long oscillator_reference, ckih2_reference;
static struct clk osc_clk;
static struct clk pll1_main_clk;
static struct clk pll1_sw_clk;
static struct clk pll2_sw_clk;
static struct clk pll3_sw_clk;
static struct clk mx53_pll4_sw_clk;
static struct clk lp_apm_clk;
static struct clk periph_apm_clk;
static struct clk ahb_clk;
static struct clk ipg_clk;
static struct clk usboh3_clk;
static struct clk emi_fast_clk;
static struct clk ipu_clk;
static struct clk mipi_hsc1_clk;
static struct clk esdhc1_clk;
static struct clk esdhc2_clk;
static struct clk esdhc3_mx53_clk;
#define MAX_DPLL_WAIT_TRIES 1000 /* 1000 * udelay(1) = 1ms */
/* calculate best pre and post dividers to get the required divider */
static void __calc_pre_post_dividers(u32 div, u32 *pre, u32 *post,
u32 max_pre, u32 max_post)
{
if (div >= max_pre * max_post) {
*pre = max_pre;
*post = max_post;
} else if (div >= max_pre) {
u32 min_pre, temp_pre, old_err, err;
min_pre = DIV_ROUND_UP(div, max_post);
old_err = max_pre;
for (temp_pre = max_pre; temp_pre >= min_pre; temp_pre--) {
err = div % temp_pre;
if (err == 0) {
*pre = temp_pre;
break;
}
err = temp_pre - err;
if (err < old_err) {
old_err = err;
*pre = temp_pre;
}
}
*post = DIV_ROUND_UP(div, *pre);
} else {
*pre = div;
*post = 1;
}
}
static void _clk_ccgr_setclk(struct clk *clk, unsigned mode)
{
u32 reg = __raw_readl(clk->enable_reg);
reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
reg |= mode << clk->enable_shift;
__raw_writel(reg, clk->enable_reg);
}
static int _clk_ccgr_enable(struct clk *clk)
{
_clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_ON);
return 0;
}
static void _clk_ccgr_disable(struct clk *clk)
{
_clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_OFF);
}
static int _clk_ccgr_enable_inrun(struct clk *clk)
{
_clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_IDLE);
return 0;
}
static void _clk_ccgr_disable_inwait(struct clk *clk)
{
_clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_IDLE);
}
/*
* For the 4-to-1 muxed input clock
*/
static inline u32 _get_mux(struct clk *parent, struct clk *m0,
struct clk *m1, struct clk *m2, struct clk *m3)
{
if (parent == m0)
return 0;
else if (parent == m1)
return 1;
else if (parent == m2)
return 2;
else if (parent == m3)
return 3;
else
BUG();
return -EINVAL;
}
static inline void __iomem *_mx51_get_pll_base(struct clk *pll)
{
if (pll == &pll1_main_clk)
return MX51_DPLL1_BASE;
else if (pll == &pll2_sw_clk)
return MX51_DPLL2_BASE;
else if (pll == &pll3_sw_clk)
return MX51_DPLL3_BASE;
else
BUG();
return NULL;
}
static inline void __iomem *_mx53_get_pll_base(struct clk *pll)
{
if (pll == &pll1_main_clk)
return MX53_DPLL1_BASE;
else if (pll == &pll2_sw_clk)
return MX53_DPLL2_BASE;
else if (pll == &pll3_sw_clk)
return MX53_DPLL3_BASE;
else if (pll == &mx53_pll4_sw_clk)
return MX53_DPLL4_BASE;
else
BUG();
return NULL;
}
static inline void __iomem *_get_pll_base(struct clk *pll)
{
if (cpu_is_mx51())
return _mx51_get_pll_base(pll);
else
return _mx53_get_pll_base(pll);
}
static unsigned long clk_pll_get_rate(struct clk *clk)
{
long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
void __iomem *pllbase;
s64 temp;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
pllbase = _get_pll_base(clk);
dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
if (pll_hfsm == 0) {
dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
} else {
dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
}
pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
mfi = (mfi <= 5) ? 5 : mfi;
mfd = dp_mfd & MXC_PLL_DP_MFD_MASK;
mfn = mfn_abs = dp_mfn & MXC_PLL_DP_MFN_MASK;
/* Sign extend to 32-bits */
if (mfn >= 0x04000000) {
mfn |= 0xFC000000;
mfn_abs = -mfn;
}
ref_clk = 2 * parent_rate;
if (dbl != 0)
ref_clk *= 2;
ref_clk /= (pdf + 1);
temp = (u64) ref_clk * mfn_abs;
do_div(temp, mfd + 1);
if (mfn < 0)
temp = -temp;
temp = (ref_clk * mfi) + temp;
return temp;
}
static int _clk_pll_set_rate(struct clk *clk, unsigned long rate)
{
u32 reg;
void __iomem *pllbase;
long mfi, pdf, mfn, mfd = 999999;
s64 temp64;
unsigned long quad_parent_rate;
unsigned long pll_hfsm, dp_ctl;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
pllbase = _get_pll_base(clk);
quad_parent_rate = 4 * parent_rate;
pdf = mfi = -1;
while (++pdf < 16 && mfi < 5)
mfi = rate * (pdf+1) / quad_parent_rate;
if (mfi > 15)
return -EINVAL;
pdf--;
temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
do_div(temp64, quad_parent_rate/1000000);
mfn = (long)temp64;
dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
/* use dpdck0_2 */
__raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
if (pll_hfsm == 0) {
reg = mfi << 4 | pdf;
__raw_writel(reg, pllbase + MXC_PLL_DP_OP);
__raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
__raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
} else {
reg = mfi << 4 | pdf;
__raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
__raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
__raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
}
return 0;
}
static int _clk_pll_enable(struct clk *clk)
{
u32 reg;
void __iomem *pllbase;
int i = 0;
pllbase = _get_pll_base(clk);
reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
if (reg & MXC_PLL_DP_CTL_UPEN)
return 0;
reg |= MXC_PLL_DP_CTL_UPEN;
__raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
/* Wait for lock */
do {
reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
if (reg & MXC_PLL_DP_CTL_LRF)
break;
udelay(1);
} while (++i < MAX_DPLL_WAIT_TRIES);
if (i == MAX_DPLL_WAIT_TRIES) {
pr_err("MX5: pll locking failed\n");
return -EINVAL;
}
return 0;
}
static void _clk_pll_disable(struct clk *clk)
{
u32 reg;
void __iomem *pllbase;
pllbase = _get_pll_base(clk);
reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) & ~MXC_PLL_DP_CTL_UPEN;
__raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
}
static int _clk_pll1_sw_set_parent(struct clk *clk, struct clk *parent)
{
u32 reg, step;
reg = __raw_readl(MXC_CCM_CCSR);
/* When switching from pll_main_clk to a bypass clock, first select a
* multiplexed clock in 'step_sel', then shift the glitchless mux
* 'pll1_sw_clk_sel'.
*
* When switching back, do it in reverse order
*/
if (parent == &pll1_main_clk) {
/* Switch to pll1_main_clk */
reg &= ~MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
__raw_writel(reg, MXC_CCM_CCSR);
/* step_clk mux switched to lp_apm, to save power. */
reg = __raw_readl(MXC_CCM_CCSR);
reg &= ~MXC_CCM_CCSR_STEP_SEL_MASK;
reg |= (MXC_CCM_CCSR_STEP_SEL_LP_APM <<
MXC_CCM_CCSR_STEP_SEL_OFFSET);
} else {
if (parent == &lp_apm_clk) {
step = MXC_CCM_CCSR_STEP_SEL_LP_APM;
} else if (parent == &pll2_sw_clk) {
step = MXC_CCM_CCSR_STEP_SEL_PLL2_DIVIDED;
} else if (parent == &pll3_sw_clk) {
step = MXC_CCM_CCSR_STEP_SEL_PLL3_DIVIDED;
} else
return -EINVAL;
reg &= ~MXC_CCM_CCSR_STEP_SEL_MASK;
reg |= (step << MXC_CCM_CCSR_STEP_SEL_OFFSET);
__raw_writel(reg, MXC_CCM_CCSR);
/* Switch to step_clk */
reg = __raw_readl(MXC_CCM_CCSR);
reg |= MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
}
__raw_writel(reg, MXC_CCM_CCSR);
return 0;
}
static unsigned long clk_pll1_sw_get_rate(struct clk *clk)
{
u32 reg, div;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
reg = __raw_readl(MXC_CCM_CCSR);
if (clk->parent == &pll2_sw_clk) {
div = ((reg & MXC_CCM_CCSR_PLL2_PODF_MASK) >>
MXC_CCM_CCSR_PLL2_PODF_OFFSET) + 1;
} else if (clk->parent == &pll3_sw_clk) {
div = ((reg & MXC_CCM_CCSR_PLL3_PODF_MASK) >>
MXC_CCM_CCSR_PLL3_PODF_OFFSET) + 1;
} else
div = 1;
return parent_rate / div;
}
static int _clk_pll2_sw_set_parent(struct clk *clk, struct clk *parent)
{
u32 reg;
reg = __raw_readl(MXC_CCM_CCSR);
if (parent == &pll2_sw_clk)
reg &= ~MXC_CCM_CCSR_PLL2_SW_CLK_SEL;
else
reg |= MXC_CCM_CCSR_PLL2_SW_CLK_SEL;
__raw_writel(reg, MXC_CCM_CCSR);
return 0;
}
static int _clk_lp_apm_set_parent(struct clk *clk, struct clk *parent)
{
u32 reg;
if (parent == &osc_clk)
reg = __raw_readl(MXC_CCM_CCSR) & ~MXC_CCM_CCSR_LP_APM_SEL;
else
return -EINVAL;
__raw_writel(reg, MXC_CCM_CCSR);
return 0;
}
static unsigned long clk_cpu_get_rate(struct clk *clk)
{
u32 cacrr, div;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
cacrr = __raw_readl(MXC_CCM_CACRR);
div = (cacrr & MXC_CCM_CACRR_ARM_PODF_MASK) + 1;
return parent_rate / div;
}
static int clk_cpu_set_rate(struct clk *clk, unsigned long rate)
{
u32 reg, cpu_podf;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
cpu_podf = parent_rate / rate - 1;
/* use post divider to change freq */
reg = __raw_readl(MXC_CCM_CACRR);
reg &= ~MXC_CCM_CACRR_ARM_PODF_MASK;
reg |= cpu_podf << MXC_CCM_CACRR_ARM_PODF_OFFSET;
__raw_writel(reg, MXC_CCM_CACRR);
return 0;
}
static int _clk_periph_apm_set_parent(struct clk *clk, struct clk *parent)
{
u32 reg, mux;
int i = 0;
mux = _get_mux(parent, &pll1_sw_clk, &pll3_sw_clk, &lp_apm_clk, NULL);
reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_PERIPH_CLK_SEL_MASK;
reg |= mux << MXC_CCM_CBCMR_PERIPH_CLK_SEL_OFFSET;
__raw_writel(reg, MXC_CCM_CBCMR);
/* Wait for lock */
do {
reg = __raw_readl(MXC_CCM_CDHIPR);
if (!(reg & MXC_CCM_CDHIPR_PERIPH_CLK_SEL_BUSY))
break;
udelay(1);
} while (++i < MAX_DPLL_WAIT_TRIES);
if (i == MAX_DPLL_WAIT_TRIES) {
pr_err("MX5: Set parent for periph_apm clock failed\n");
return -EINVAL;
}
return 0;
}
static int _clk_main_bus_set_parent(struct clk *clk, struct clk *parent)
{
u32 reg;
reg = __raw_readl(MXC_CCM_CBCDR);
if (parent == &pll2_sw_clk)
reg &= ~MXC_CCM_CBCDR_PERIPH_CLK_SEL;
else if (parent == &periph_apm_clk)
reg |= MXC_CCM_CBCDR_PERIPH_CLK_SEL;
else
return -EINVAL;
__raw_writel(reg, MXC_CCM_CBCDR);
return 0;
}
static struct clk main_bus_clk = {
.parent = &pll2_sw_clk,
.set_parent = _clk_main_bus_set_parent,
};
static unsigned long clk_ahb_get_rate(struct clk *clk)
{
u32 reg, div;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
reg = __raw_readl(MXC_CCM_CBCDR);
div = ((reg & MXC_CCM_CBCDR_AHB_PODF_MASK) >>
MXC_CCM_CBCDR_AHB_PODF_OFFSET) + 1;
return parent_rate / div;
}
static int _clk_ahb_set_rate(struct clk *clk, unsigned long rate)
{
u32 reg, div;
unsigned long parent_rate;
int i = 0;
parent_rate = clk_get_rate(clk->parent);
div = parent_rate / rate;
if (div > 8 || div < 1 || ((parent_rate / div) != rate))
return -EINVAL;
reg = __raw_readl(MXC_CCM_CBCDR);
reg &= ~MXC_CCM_CBCDR_AHB_PODF_MASK;
reg |= (div - 1) << MXC_CCM_CBCDR_AHB_PODF_OFFSET;
__raw_writel(reg, MXC_CCM_CBCDR);
/* Wait for lock */
do {
reg = __raw_readl(MXC_CCM_CDHIPR);
if (!(reg & MXC_CCM_CDHIPR_AHB_PODF_BUSY))
break;
udelay(1);
} while (++i < MAX_DPLL_WAIT_TRIES);
if (i == MAX_DPLL_WAIT_TRIES) {
pr_err("MX5: clk_ahb_set_rate failed\n");
return -EINVAL;
}
return 0;
}
static unsigned long _clk_ahb_round_rate(struct clk *clk,
unsigned long rate)
{
u32 div;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
div = parent_rate / rate;
if (div > 8)
div = 8;
else if (div == 0)
div++;
return parent_rate / div;
}
static int _clk_max_enable(struct clk *clk)
{
u32 reg;
_clk_ccgr_enable(clk);
/* Handshake with MAX when LPM is entered. */
reg = __raw_readl(MXC_CCM_CLPCR);
if (cpu_is_mx51())
reg &= ~MX51_CCM_CLPCR_BYPASS_MAX_LPM_HS;
else if (cpu_is_mx53())
reg &= ~MX53_CCM_CLPCR_BYPASS_MAX_LPM_HS;
__raw_writel(reg, MXC_CCM_CLPCR);
return 0;
}
static void _clk_max_disable(struct clk *clk)
{
u32 reg;
_clk_ccgr_disable_inwait(clk);
/* No Handshake with MAX when LPM is entered as its disabled. */
reg = __raw_readl(MXC_CCM_CLPCR);
if (cpu_is_mx51())
reg |= MX51_CCM_CLPCR_BYPASS_MAX_LPM_HS;
else if (cpu_is_mx53())
reg &= ~MX53_CCM_CLPCR_BYPASS_MAX_LPM_HS;
__raw_writel(reg, MXC_CCM_CLPCR);
}
static unsigned long clk_ipg_get_rate(struct clk *clk)
{
u32 reg, div;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
reg = __raw_readl(MXC_CCM_CBCDR);
div = ((reg & MXC_CCM_CBCDR_IPG_PODF_MASK) >>
MXC_CCM_CBCDR_IPG_PODF_OFFSET) + 1;
return parent_rate / div;
}
static unsigned long clk_ipg_per_get_rate(struct clk *clk)
{
u32 reg, prediv1, prediv2, podf;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
if (clk->parent == &main_bus_clk || clk->parent == &lp_apm_clk) {
/* the main_bus_clk is the one before the DVFS engine */
reg = __raw_readl(MXC_CCM_CBCDR);
prediv1 = ((reg & MXC_CCM_CBCDR_PERCLK_PRED1_MASK) >>
MXC_CCM_CBCDR_PERCLK_PRED1_OFFSET) + 1;
prediv2 = ((reg & MXC_CCM_CBCDR_PERCLK_PRED2_MASK) >>
MXC_CCM_CBCDR_PERCLK_PRED2_OFFSET) + 1;
podf = ((reg & MXC_CCM_CBCDR_PERCLK_PODF_MASK) >>
MXC_CCM_CBCDR_PERCLK_PODF_OFFSET) + 1;
return parent_rate / (prediv1 * prediv2 * podf);
} else if (clk->parent == &ipg_clk)
return parent_rate;
else
BUG();
}
static int _clk_ipg_per_set_parent(struct clk *clk, struct clk *parent)
{
u32 reg;
reg = __raw_readl(MXC_CCM_CBCMR);
reg &= ~MXC_CCM_CBCMR_PERCLK_LP_APM_CLK_SEL;
reg &= ~MXC_CCM_CBCMR_PERCLK_IPG_CLK_SEL;
if (parent == &ipg_clk)
reg |= MXC_CCM_CBCMR_PERCLK_IPG_CLK_SEL;
else if (parent == &lp_apm_clk)
reg |= MXC_CCM_CBCMR_PERCLK_LP_APM_CLK_SEL;
else if (parent != &main_bus_clk)
return -EINVAL;
__raw_writel(reg, MXC_CCM_CBCMR);
return 0;
}
#define clk_nfc_set_parent NULL
static unsigned long clk_nfc_get_rate(struct clk *clk)
{
unsigned long rate;
u32 reg, div;
reg = __raw_readl(MXC_CCM_CBCDR);
div = ((reg & MXC_CCM_CBCDR_NFC_PODF_MASK) >>
MXC_CCM_CBCDR_NFC_PODF_OFFSET) + 1;
rate = clk_get_rate(clk->parent) / div;
WARN_ON(rate == 0);
return rate;
}
static unsigned long clk_nfc_round_rate(struct clk *clk,
unsigned long rate)
{
u32 div;
unsigned long parent_rate = clk_get_rate(clk->parent);
if (!rate)
return -EINVAL;
div = parent_rate / rate;
if (parent_rate % rate)
div++;
if (div > 8)
return -EINVAL;
return parent_rate / div;
}
static int clk_nfc_set_rate(struct clk *clk, unsigned long rate)
{
u32 reg, div;
div = clk_get_rate(clk->parent) / rate;
if (div == 0)
div++;
if (((clk_get_rate(clk->parent) / div) != rate) || (div > 8))
return -EINVAL;
reg = __raw_readl(MXC_CCM_CBCDR);
reg &= ~MXC_CCM_CBCDR_NFC_PODF_MASK;
reg |= (div - 1) << MXC_CCM_CBCDR_NFC_PODF_OFFSET;
__raw_writel(reg, MXC_CCM_CBCDR);
while (__raw_readl(MXC_CCM_CDHIPR) &
MXC_CCM_CDHIPR_NFC_IPG_INT_MEM_PODF_BUSY){
}
return 0;
}
static unsigned long get_high_reference_clock_rate(struct clk *clk)
{
return external_high_reference;
}
static unsigned long get_low_reference_clock_rate(struct clk *clk)
{
return external_low_reference;
}
static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
{
return oscillator_reference;
}
static unsigned long get_ckih2_reference_clock_rate(struct clk *clk)
{
return ckih2_reference;
}
static unsigned long clk_emi_slow_get_rate(struct clk *clk)
{
u32 reg, div;
reg = __raw_readl(MXC_CCM_CBCDR);
div = ((reg & MXC_CCM_CBCDR_EMI_PODF_MASK) >>
MXC_CCM_CBCDR_EMI_PODF_OFFSET) + 1;
return clk_get_rate(clk->parent) / div;
}
static unsigned long _clk_ddr_hf_get_rate(struct clk *clk)
{
unsigned long rate;
u32 reg, div;
reg = __raw_readl(MXC_CCM_CBCDR);
div = ((reg & MXC_CCM_CBCDR_DDR_PODF_MASK) >>
MXC_CCM_CBCDR_DDR_PODF_OFFSET) + 1;
rate = clk_get_rate(clk->parent) / div;
return rate;
}
/* External high frequency clock */
static struct clk ckih_clk = {
.get_rate = get_high_reference_clock_rate,
};
static struct clk ckih2_clk = {
.get_rate = get_ckih2_reference_clock_rate,
};
static struct clk osc_clk = {
.get_rate = get_oscillator_reference_clock_rate,
};
/* External low frequency (32kHz) clock */
static struct clk ckil_clk = {
.get_rate = get_low_reference_clock_rate,
};
static struct clk pll1_main_clk = {
.parent = &osc_clk,
.get_rate = clk_pll_get_rate,
.enable = _clk_pll_enable,
.disable = _clk_pll_disable,
};
/* Clock tree block diagram (WIP):
* CCM: Clock Controller Module
*
* PLL output -> |
* | CCM Switcher -> CCM_CLK_ROOT_GEN ->
* PLL bypass -> |
*
*/
/* PLL1 SW supplies to ARM core */
static struct clk pll1_sw_clk = {
.parent = &pll1_main_clk,
.set_parent = _clk_pll1_sw_set_parent,
.get_rate = clk_pll1_sw_get_rate,
};
/* PLL2 SW supplies to AXI/AHB/IP buses */
static struct clk pll2_sw_clk = {
.parent = &osc_clk,
.get_rate = clk_pll_get_rate,
.set_rate = _clk_pll_set_rate,
.set_parent = _clk_pll2_sw_set_parent,
.enable = _clk_pll_enable,
.disable = _clk_pll_disable,
};
/* PLL3 SW supplies to serial clocks like USB, SSI, etc. */
static struct clk pll3_sw_clk = {
.parent = &osc_clk,
.set_rate = _clk_pll_set_rate,
.get_rate = clk_pll_get_rate,
.enable = _clk_pll_enable,
.disable = _clk_pll_disable,
};
/* PLL4 SW supplies to LVDS Display Bridge(LDB) */
static struct clk mx53_pll4_sw_clk = {
.parent = &osc_clk,
.set_rate = _clk_pll_set_rate,
.enable = _clk_pll_enable,
.disable = _clk_pll_disable,
};
/* Low-power Audio Playback Mode clock */
static struct clk lp_apm_clk = {
.parent = &osc_clk,
.set_parent = _clk_lp_apm_set_parent,
};
static struct clk periph_apm_clk = {
.parent = &pll1_sw_clk,
.set_parent = _clk_periph_apm_set_parent,
};
static struct clk cpu_clk = {
.parent = &pll1_sw_clk,
.get_rate = clk_cpu_get_rate,
.set_rate = clk_cpu_set_rate,
};
static struct clk ahb_clk = {
.parent = &main_bus_clk,
.get_rate = clk_ahb_get_rate,
.set_rate = _clk_ahb_set_rate,
.round_rate = _clk_ahb_round_rate,
};
static struct clk iim_clk = {
.parent = &ipg_clk,
.enable_reg = MXC_CCM_CCGR0,
.enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
};
/* Main IP interface clock for access to registers */
static struct clk ipg_clk = {
.parent = &ahb_clk,
.get_rate = clk_ipg_get_rate,
};
static struct clk ipg_perclk = {
.parent = &lp_apm_clk,
.get_rate = clk_ipg_per_get_rate,
.set_parent = _clk_ipg_per_set_parent,
};
static struct clk ahb_max_clk = {
.parent = &ahb_clk,
.enable_reg = MXC_CCM_CCGR0,
.enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
.enable = _clk_max_enable,
.disable = _clk_max_disable,
};
static struct clk aips_tz1_clk = {
.parent = &ahb_clk,
.secondary = &ahb_max_clk,
.enable_reg = MXC_CCM_CCGR0,
.enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
.enable = _clk_ccgr_enable,
.disable = _clk_ccgr_disable_inwait,
};
static struct clk aips_tz2_clk = {
.parent = &ahb_clk,
.secondary = &ahb_max_clk,
.enable_reg = MXC_CCM_CCGR0,
.enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
.enable = _clk_ccgr_enable,
.disable = _clk_ccgr_disable_inwait,
};
static struct clk gpc_dvfs_clk = {
.enable_reg = MXC_CCM_CCGR5,
.enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
.enable = _clk_ccgr_enable,
.disable = _clk_ccgr_disable,
};
static struct clk gpt_32k_clk = {
.id = 0,
.parent = &ckil_clk,
};
static struct clk dummy_clk = {
.id = 0,
};
static struct clk emi_slow_clk = {
.parent = &pll2_sw_clk,
.enable_reg = MXC_CCM_CCGR5,
.enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
.enable = _clk_ccgr_enable,
.disable = _clk_ccgr_disable_inwait,
.get_rate = clk_emi_slow_get_rate,
};
static int clk_ipu_enable(struct clk *clk)
{
u32 reg;
_clk_ccgr_enable(clk);
/* Enable handshake with IPU when certain clock rates are changed */
reg = __raw_readl(MXC_CCM_CCDR);
reg &= ~MXC_CCM_CCDR_IPU_HS_MASK;
__raw_writel(reg, MXC_CCM_CCDR);
/* Enable handshake with IPU when LPM is entered */
reg = __raw_readl(MXC_CCM_CLPCR);
reg &= ~MXC_CCM_CLPCR_BYPASS_IPU_LPM_HS;
__raw_writel(reg, MXC_CCM_CLPCR);
return 0;
}
static void clk_ipu_disable(struct clk *clk)
{
u32 reg;
_clk_ccgr_disable(clk);
/* Disable handshake with IPU whe dividers are changed */
reg = __raw_readl(MXC_CCM_CCDR);
reg |= MXC_CCM_CCDR_IPU_HS_MASK;
__raw_writel(reg, MXC_CCM_CCDR);
/* Disable handshake with IPU when LPM is entered */
reg = __raw_readl(MXC_CCM_CLPCR);
reg |= MXC_CCM_CLPCR_BYPASS_IPU_LPM_HS;
__raw_writel(reg, MXC_CCM_CLPCR);
}
static struct clk ahbmux1_clk = {
.parent = &ahb_clk,
.secondary = &ahb_max_clk,
.enable_reg = MXC_CCM_CCGR0,
.enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
.enable = _clk_ccgr_enable,
.disable = _clk_ccgr_disable_inwait,
};
static struct clk ipu_sec_clk = {
.parent = &emi_fast_clk,
.secondary = &ahbmux1_clk,
};
static struct clk ddr_hf_clk = {
.parent = &pll1_sw_clk,
.get_rate = _clk_ddr_hf_get_rate,
};
static struct clk ddr_clk = {
.parent = &ddr_hf_clk,
};
/* clock definitions for MIPI HSC unit which has been removed
* from documentation, but not from hardware
*/
static int _clk_hsc_enable(struct clk *clk)
{
u32 reg;
_clk_ccgr_enable(clk);
/* Handshake with IPU when certain clock rates are changed. */
reg = __raw_readl(MXC_CCM_CCDR);
reg &= ~MXC_CCM_CCDR_HSC_HS_MASK;
__raw_writel(reg, MXC_CCM_CCDR);
reg = __raw_readl(MXC_CCM_CLPCR);
reg &= ~MXC_CCM_CLPCR_BYPASS_HSC_LPM_HS;
__raw_writel(reg, MXC_CCM_CLPCR);
return 0;
}
static void _clk_hsc_disable(struct clk *clk)
{
u32 reg;
_clk_ccgr_disable(clk);
/* No handshake with HSC as its not enabled. */
reg = __raw_readl(MXC_CCM_CCDR);
reg |= MXC_CCM_CCDR_HSC_HS_MASK;
__raw_writel(reg, MXC_CCM_CCDR);
reg = __raw_readl(MXC_CCM_CLPCR);
reg |= MXC_CCM_CLPCR_BYPASS_HSC_LPM_HS;
__raw_writel(reg, MXC_CCM_CLPCR);
}
static struct clk mipi_hsp_clk = {
.parent = &ipu_clk,
.enable_reg = MXC_CCM_CCGR4,
.enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
.enable = _clk_hsc_enable,
.disable = _clk_hsc_disable,
.secondary = &mipi_hsc1_clk,
};
#define DEFINE_CLOCK_CCGR(name, i, er, es, pfx, p, s) \
static struct clk name = { \
.id = i, \
.enable_reg = er, \
.enable_shift = es, \
.get_rate = pfx##_get_rate, \
.set_rate = pfx##_set_rate, \
.round_rate = pfx##_round_rate, \
.set_parent = pfx##_set_parent, \
.enable = _clk_ccgr_enable, \
.disable = _clk_ccgr_disable, \
.parent = p, \
.secondary = s, \
}
#define DEFINE_CLOCK_MAX(name, i, er, es, pfx, p, s) \
static struct clk name = { \
.id = i, \
.enable_reg = er, \
.enable_shift = es, \
.get_rate = pfx##_get_rate, \
.set_rate = pfx##_set_rate, \
.set_parent = pfx##_set_parent, \
.enable = _clk_max_enable, \
.disable = _clk_max_disable, \
.parent = p, \
.secondary = s, \
}
#define CLK_GET_RATE(name, nr, bitsname) \
static unsigned long clk_##name##_get_rate(struct clk *clk) \
{ \
u32 reg, pred, podf; \
\
reg = __raw_readl(MXC_CCM_CSCDR##nr); \
pred = (reg & MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK) \
>> MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET; \
podf = (reg & MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK) \
>> MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET; \
\
return DIV_ROUND_CLOSEST(clk_get_rate(clk->parent), \
(pred + 1) * (podf + 1)); \
}
#define CLK_SET_PARENT(name, nr, bitsname) \
static int clk_##name##_set_parent(struct clk *clk, struct clk *parent) \
{ \
u32 reg, mux; \
\
mux = _get_mux(parent, &pll1_sw_clk, &pll2_sw_clk, \
&pll3_sw_clk, &lp_apm_clk); \
reg = __raw_readl(MXC_CCM_CSCMR##nr) & \
~MXC_CCM_CSCMR##nr##_##bitsname##_CLK_SEL_MASK; \
reg |= mux << MXC_CCM_CSCMR##nr##_##bitsname##_CLK_SEL_OFFSET; \
__raw_writel(reg, MXC_CCM_CSCMR##nr); \
\
return 0; \
}
#define CLK_SET_RATE(name, nr, bitsname) \
static int clk_##name##_set_rate(struct clk *clk, unsigned long rate) \
{ \
u32 reg, div, parent_rate; \
u32 pre = 0, post = 0; \
\
parent_rate = clk_get_rate(clk->parent); \
div = parent_rate / rate; \
\
if ((parent_rate / div) != rate) \
return -EINVAL; \
\
__calc_pre_post_dividers(div, &pre, &post, \
(MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK >> \
MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET) + 1, \
(MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK >> \
MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET) + 1);\
\
/* Set sdhc1 clock divider */ \
reg = __raw_readl(MXC_CCM_CSCDR##nr) & \
~(MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK \
| MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK); \
reg |= (post - 1) << \
MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET; \
reg |= (pre - 1) << \
MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET; \
__raw_writel(reg, MXC_CCM_CSCDR##nr); \
\
return 0; \
}
/* UART */
CLK_GET_RATE(uart, 1, UART)
CLK_SET_PARENT(uart, 1, UART)
static struct clk uart_root_clk = {
.parent = &pll2_sw_clk,
.get_rate = clk_uart_get_rate,
.set_parent = clk_uart_set_parent,
};
/* USBOH3 */
CLK_GET_RATE(usboh3, 1, USBOH3)
CLK_SET_PARENT(usboh3, 1, USBOH3)
static struct clk usboh3_clk = {
.parent = &pll2_sw_clk,
.get_rate = clk_usboh3_get_rate,
.set_parent = clk_usboh3_set_parent,
.enable = _clk_ccgr_enable,
.disable = _clk_ccgr_disable,
.enable_reg = MXC_CCM_CCGR2,
.enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
};
static struct clk usb_ahb_clk = {
.parent = &ipg_clk,
.enable = _clk_ccgr_enable,
.disable = _clk_ccgr_disable,
.enable_reg = MXC_CCM_CCGR2,
.enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
};
static int clk_usb_phy1_set_parent(struct clk *clk, struct clk *parent)
{
u32 reg;
reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USB_PHY_CLK_SEL;
if (parent == &pll3_sw_clk)
reg |= 1 << MXC_CCM_CSCMR1_USB_PHY_CLK_SEL_OFFSET;
__raw_writel(reg, MXC_CCM_CSCMR1);
return 0;
}
static struct clk usb_phy1_clk = {
.parent = &pll3_sw_clk,
.set_parent = clk_usb_phy1_set_parent,
.enable = _clk_ccgr_enable,
.enable_reg = MXC_CCM_CCGR2,
.enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
.disable = _clk_ccgr_disable,
};
/* eCSPI */
CLK_GET_RATE(ecspi, 2, CSPI)
CLK_SET_PARENT(ecspi, 1, CSPI)
static struct clk ecspi_main_clk = {
.parent = &pll3_sw_clk,
.get_rate = clk_ecspi_get_rate,
.set_parent = clk_ecspi_set_parent,
};
/* eSDHC */
CLK_GET_RATE(esdhc1, 1, ESDHC1_MSHC1)
CLK_SET_PARENT(esdhc1, 1, ESDHC1_MSHC1)
CLK_SET_RATE(esdhc1, 1, ESDHC1_MSHC1)
/* mx51 specific */
CLK_GET_RATE(esdhc2, 1, ESDHC2_MSHC2)
CLK_SET_PARENT(esdhc2, 1, ESDHC2_MSHC2)
CLK_SET_RATE(esdhc2, 1, ESDHC2_MSHC2)
static int clk_esdhc3_set_parent(struct clk *clk, struct clk *parent)
{
u32 reg;
reg = __raw_readl(MXC_CCM_CSCMR1);
if (parent == &esdhc1_clk)
reg &= ~MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
else if (parent == &esdhc2_clk)
reg |= MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
else
return -EINVAL;
__raw_writel(reg, MXC_CCM_CSCMR1);
return 0;
}
static int clk_esdhc4_set_parent(struct clk *clk, struct clk *parent)
{
u32 reg;
reg = __raw_readl(MXC_CCM_CSCMR1);
if (parent == &esdhc1_clk)
reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
else if (parent == &esdhc2_clk)
reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
else
return -EINVAL;
__raw_writel(reg, MXC_CCM_CSCMR1);
return 0;
}
/* mx53 specific */
static int clk_esdhc2_mx53_set_parent(struct clk *clk, struct clk *parent)
{
u32 reg;
reg = __raw_readl(MXC_CCM_CSCMR1);
if (parent == &esdhc1_clk)
reg &= ~MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
else if (parent == &esdhc3_mx53_clk)
reg |= MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
else
return -EINVAL;
__raw_writel(reg, MXC_CCM_CSCMR1);
return 0;
}
CLK_GET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
CLK_SET_PARENT(esdhc3_mx53, 1, ESDHC3_MX53)
CLK_SET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
static int clk_esdhc4_mx53_set_parent(struct clk *clk, struct clk *parent)
{
u32 reg;
reg = __raw_readl(MXC_CCM_CSCMR1);
if (parent == &esdhc1_clk)
reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
else if (parent == &esdhc3_mx53_clk)
reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
else
return -EINVAL;
__raw_writel(reg, MXC_CCM_CSCMR1);
return 0;
}
#define DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, e, d, p, s) \
static struct clk name = { \
.id = i, \
.enable_reg = er, \
.enable_shift = es, \
.get_rate = gr, \
.set_rate = sr, \
.enable = e, \
.disable = d, \
.parent = p, \
.secondary = s, \
}
#define DEFINE_CLOCK(name, i, er, es, gr, sr, p, s) \
DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, _clk_ccgr_enable, _clk_ccgr_disable, p, s)
/* Shared peripheral bus arbiter */
DEFINE_CLOCK(spba_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG0_OFFSET,
NULL, NULL, &ipg_clk, NULL);
/* UART */
DEFINE_CLOCK(uart1_ipg_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG3_OFFSET,
NULL, NULL, &ipg_clk, &aips_tz1_clk);
DEFINE_CLOCK(uart2_ipg_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG5_OFFSET,
NULL, NULL, &ipg_clk, &aips_tz1_clk);
DEFINE_CLOCK(uart3_ipg_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG7_OFFSET,
NULL, NULL, &ipg_clk, &spba_clk);
DEFINE_CLOCK(uart4_ipg_clk, 3, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG4_OFFSET,
NULL, NULL, &ipg_clk, &spba_clk);
DEFINE_CLOCK(uart5_ipg_clk, 4, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG6_OFFSET,
NULL, NULL, &ipg_clk, &spba_clk);
DEFINE_CLOCK(uart1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG4_OFFSET,
NULL, NULL, &uart_root_clk, &uart1_ipg_clk);
DEFINE_CLOCK(uart2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG6_OFFSET,
NULL, NULL, &uart_root_clk, &uart2_ipg_clk);
DEFINE_CLOCK(uart3_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG8_OFFSET,
NULL, NULL, &uart_root_clk, &uart3_ipg_clk);
DEFINE_CLOCK(uart4_clk, 3, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG5_OFFSET,
NULL, NULL, &uart_root_clk, &uart4_ipg_clk);
DEFINE_CLOCK(uart5_clk, 4, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG7_OFFSET,
NULL, NULL, &uart_root_clk, &uart5_ipg_clk);
/* GPT */
DEFINE_CLOCK(gpt_ipg_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG10_OFFSET,
NULL, NULL, &ipg_clk, NULL);
DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET,
NULL, NULL, &ipg_clk, &gpt_ipg_clk);
DEFINE_CLOCK(pwm1_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG6_OFFSET,
NULL, NULL, &ipg_perclk, NULL);
DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET,
NULL, NULL, &ipg_perclk, NULL);
/* I2C */
DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET,
NULL, NULL, &ipg_perclk, NULL);
DEFINE_CLOCK(i2c2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG10_OFFSET,
NULL, NULL, &ipg_perclk, NULL);
DEFINE_CLOCK(hsi2c_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
NULL, NULL, &ipg_clk, NULL);
DEFINE_CLOCK(i2c3_mx53_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
NULL, NULL, &ipg_perclk, NULL);
/* FEC */
DEFINE_CLOCK(fec_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG12_OFFSET,
NULL, NULL, &ipg_clk, NULL);
/* NFC */
DEFINE_CLOCK_CCGR(nfc_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG10_OFFSET,
clk_nfc, &emi_slow_clk, NULL);
/* SSI */
DEFINE_CLOCK(ssi1_ipg_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG8_OFFSET,
NULL, NULL, &ipg_clk, NULL);
DEFINE_CLOCK(ssi1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG9_OFFSET,
NULL, NULL, &pll3_sw_clk, &ssi1_ipg_clk);
DEFINE_CLOCK(ssi2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG10_OFFSET,
NULL, NULL, &ipg_clk, NULL);
DEFINE_CLOCK(ssi2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG11_OFFSET,
NULL, NULL, &pll3_sw_clk, &ssi2_ipg_clk);
DEFINE_CLOCK(ssi3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG12_OFFSET,
NULL, NULL, &ipg_clk, NULL);
DEFINE_CLOCK(ssi3_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG13_OFFSET,
NULL, NULL, &pll3_sw_clk, &ssi3_ipg_clk);
/* eCSPI */
DEFINE_CLOCK_FULL(ecspi1_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG9_OFFSET,
NULL, NULL, _clk_ccgr_enable_inrun, _clk_ccgr_disable,
&ipg_clk, &spba_clk);
DEFINE_CLOCK(ecspi1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG10_OFFSET,
NULL, NULL, &ecspi_main_clk, &ecspi1_ipg_clk);
DEFINE_CLOCK_FULL(ecspi2_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG11_OFFSET,
NULL, NULL, _clk_ccgr_enable_inrun, _clk_ccgr_disable,
&ipg_clk, &aips_tz2_clk);
DEFINE_CLOCK(ecspi2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG12_OFFSET,
NULL, NULL, &ecspi_main_clk, &ecspi2_ipg_clk);
/* CSPI */
DEFINE_CLOCK(cspi_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG9_OFFSET,
NULL, NULL, &ipg_clk, &aips_tz2_clk);
DEFINE_CLOCK(cspi_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG13_OFFSET,
NULL, NULL, &ipg_clk, &cspi_ipg_clk);
/* SDMA */
DEFINE_CLOCK(sdma_clk, 1, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG15_OFFSET,
NULL, NULL, &ahb_clk, NULL);
/* eSDHC */
DEFINE_CLOCK_FULL(esdhc1_ipg_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG0_OFFSET,
NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
DEFINE_CLOCK_MAX(esdhc1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG1_OFFSET,
clk_esdhc1, &pll2_sw_clk, &esdhc1_ipg_clk);
DEFINE_CLOCK_FULL(esdhc2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG2_OFFSET,
NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
DEFINE_CLOCK_FULL(esdhc3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG4_OFFSET,
NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
DEFINE_CLOCK_FULL(esdhc4_ipg_clk, 3, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG6_OFFSET,
NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
/* mx51 specific */
DEFINE_CLOCK_MAX(esdhc2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG3_OFFSET,
clk_esdhc2, &pll2_sw_clk, &esdhc2_ipg_clk);
static struct clk esdhc3_clk = {
.id = 2,
.parent = &esdhc1_clk,
.set_parent = clk_esdhc3_set_parent,
.enable_reg = MXC_CCM_CCGR3,
.enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
.enable = _clk_max_enable,
.disable = _clk_max_disable,
.secondary = &esdhc3_ipg_clk,
};
static struct clk esdhc4_clk = {
.id = 3,
.parent = &esdhc1_clk,
.set_parent = clk_esdhc4_set_parent,
.enable_reg = MXC_CCM_CCGR3,
.enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
.enable = _clk_max_enable,
.disable = _clk_max_disable,
.secondary = &esdhc4_ipg_clk,
};
/* mx53 specific */
static struct clk esdhc2_mx53_clk = {
.id = 2,
.parent = &esdhc1_clk,
.set_parent = clk_esdhc2_mx53_set_parent,
.enable_reg = MXC_CCM_CCGR3,
.enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
.enable = _clk_max_enable,
.disable = _clk_max_disable,
.secondary = &esdhc3_ipg_clk,
};
DEFINE_CLOCK_MAX(esdhc3_mx53_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG5_OFFSET,
clk_esdhc3_mx53, &pll2_sw_clk, &esdhc2_ipg_clk);
static struct clk esdhc4_mx53_clk = {
.id = 3,
.parent = &esdhc1_clk,
.set_parent = clk_esdhc4_mx53_set_parent,
.enable_reg = MXC_CCM_CCGR3,
.enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
.enable = _clk_max_enable,
.disable = _clk_max_disable,
.secondary = &esdhc4_ipg_clk,
};
static struct clk sata_clk = {
.parent = &ipg_clk,
.enable = _clk_max_enable,
.enable_reg = MXC_CCM_CCGR4,
.enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
.disable = _clk_max_disable,
};
static struct clk ahci_phy_clk = {
.parent = &usb_phy1_clk,
};
static struct clk ahci_dma_clk = {
.parent = &ahb_clk,
};
DEFINE_CLOCK(mipi_esc_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG5_OFFSET, NULL, NULL, NULL, &pll2_sw_clk);
DEFINE_CLOCK(mipi_hsc2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG4_OFFSET, NULL, NULL, &mipi_esc_clk, &pll2_sw_clk);
DEFINE_CLOCK(mipi_hsc1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG3_OFFSET, NULL, NULL, &mipi_hsc2_clk, &pll2_sw_clk);
/* IPU */
DEFINE_CLOCK_FULL(ipu_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG5_OFFSET,
NULL, NULL, clk_ipu_enable, clk_ipu_disable, &ahb_clk, &ipu_sec_clk);
DEFINE_CLOCK_FULL(emi_fast_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG7_OFFSET,
NULL, NULL, _clk_ccgr_enable, _clk_ccgr_disable_inwait,
&ddr_clk, NULL);
DEFINE_CLOCK(ipu_di0_clk, 0, MXC_CCM_CCGR6, MXC_CCM_CCGRx_CG5_OFFSET,
NULL, NULL, &pll3_sw_clk, NULL);
DEFINE_CLOCK(ipu_di1_clk, 0, MXC_CCM_CCGR6, MXC_CCM_CCGRx_CG6_OFFSET,
NULL, NULL, &pll3_sw_clk, NULL);
/* PATA */
DEFINE_CLOCK(pata_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG0_OFFSET,
NULL, NULL, &ipg_clk, &spba_clk);
#define _REGISTER_CLOCK(d, n, c) \
{ \
.dev_id = d, \
.con_id = n, \
.clk = &c, \
},
static struct clk_lookup mx51_lookups[] = {
/* i.mx51 has the i.mx21 type uart */
_REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
_REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
_REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
_REGISTER_CLOCK(NULL, "gpt", gpt_clk)
/* i.mx51 has the i.mx27 type fec */
_REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
_REGISTER_CLOCK("mxc_pwm.0", "pwm", pwm1_clk)
_REGISTER_CLOCK("mxc_pwm.1", "pwm", pwm2_clk)
_REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
_REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
_REGISTER_CLOCK("imx-i2c.2", NULL, hsi2c_clk)
_REGISTER_CLOCK("mxc-ehci.0", "usb", usboh3_clk)
_REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_ahb_clk)
_REGISTER_CLOCK("mxc-ehci.0", "usb_phy1", usb_phy1_clk)
_REGISTER_CLOCK("mxc-ehci.1", "usb", usboh3_clk)
_REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_ahb_clk)
_REGISTER_CLOCK("mxc-ehci.2", "usb", usboh3_clk)
_REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_ahb_clk)
_REGISTER_CLOCK("fsl-usb2-udc", "usb", usboh3_clk)
_REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", ahb_clk)
_REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
_REGISTER_CLOCK("mxc_nand", NULL, nfc_clk)
_REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
_REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
_REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk)
/* i.mx51 has the i.mx35 type sdma */
_REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
_REGISTER_CLOCK(NULL, "ckih", ckih_clk)
_REGISTER_CLOCK(NULL, "ckih2", ckih2_clk)
_REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk)
_REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
_REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
/* i.mx51 has the i.mx35 type cspi */
_REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx51.0", NULL, esdhc1_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx51.1", NULL, esdhc2_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx51.2", NULL, esdhc3_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx51.3", NULL, esdhc4_clk)
_REGISTER_CLOCK(NULL, "cpu_clk", cpu_clk)
_REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
_REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
_REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
_REGISTER_CLOCK(NULL, "mipi_hsp", mipi_hsp_clk)
_REGISTER_CLOCK("imx-ipuv3", NULL, ipu_clk)
_REGISTER_CLOCK("imx-ipuv3", "di0", ipu_di0_clk)
_REGISTER_CLOCK("imx-ipuv3", "di1", ipu_di1_clk)
_REGISTER_CLOCK(NULL, "gpc_dvfs", gpc_dvfs_clk)
_REGISTER_CLOCK("pata_imx", NULL, pata_clk)
};
static struct clk_lookup mx53_lookups[] = {
/* i.mx53 has the i.mx21 type uart */
_REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
_REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
_REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
_REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
_REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
_REGISTER_CLOCK(NULL, "gpt", gpt_clk)
/* i.mx53 has the i.mx25 type fec */
_REGISTER_CLOCK("imx25-fec.0", NULL, fec_clk)
_REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
_REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
_REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
_REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_mx53_clk)
/* i.mx53 has the i.mx51 type ecspi */
_REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
_REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
/* i.mx53 has the i.mx25 type cspi */
_REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx53.0", NULL, esdhc1_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx53.1", NULL, esdhc2_mx53_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx53.2", NULL, esdhc3_mx53_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx53.3", NULL, esdhc4_mx53_clk)
_REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
_REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
/* i.mx53 has the i.mx35 type sdma */
_REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
_REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
_REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
_REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk)
_REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
_REGISTER_CLOCK("pata_imx", NULL, pata_clk)
_REGISTER_CLOCK("imx53-ahci.0", "ahci", sata_clk)
_REGISTER_CLOCK("imx53-ahci.0", "ahci_phy", ahci_phy_clk)
_REGISTER_CLOCK("imx53-ahci.0", "ahci_dma", ahci_dma_clk)
};
static void clk_tree_init(void)
{
u32 reg;
ipg_perclk.set_parent(&ipg_perclk, &lp_apm_clk);
/*
* Initialise the IPG PER CLK dividers to 3. IPG_PER_CLK should be at
* 8MHz, its derived from lp_apm.
*
* FIXME: Verify if true for all boards
*/
reg = __raw_readl(MXC_CCM_CBCDR);
reg &= ~MXC_CCM_CBCDR_PERCLK_PRED1_MASK;
reg &= ~MXC_CCM_CBCDR_PERCLK_PRED2_MASK;
reg &= ~MXC_CCM_CBCDR_PERCLK_PODF_MASK;
reg |= (2 << MXC_CCM_CBCDR_PERCLK_PRED1_OFFSET);
__raw_writel(reg, MXC_CCM_CBCDR);
}
int __init mx51_clocks_init(unsigned long ckil, unsigned long osc,
unsigned long ckih1, unsigned long ckih2)
{
int i;
external_low_reference = ckil;
external_high_reference = ckih1;
ckih2_reference = ckih2;
oscillator_reference = osc;
for (i = 0; i < ARRAY_SIZE(mx51_lookups); i++)
clkdev_add(&mx51_lookups[i]);
clk_tree_init();
clk_enable(&cpu_clk);
clk_enable(&main_bus_clk);
clk_enable(&iim_clk);
imx_print_silicon_rev("i.MX51", mx51_revision());
clk_disable(&iim_clk);
/* move usb_phy_clk to 24MHz */
clk_set_parent(&usb_phy1_clk, &osc_clk);
/* set the usboh3_clk parent to pll2_sw_clk */
clk_set_parent(&usboh3_clk, &pll2_sw_clk);
/* Set SDHC parents to be PLL2 */
clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
clk_set_parent(&esdhc2_clk, &pll2_sw_clk);
/* set SDHC root clock as 166.25MHZ*/
clk_set_rate(&esdhc1_clk, 166250000);
clk_set_rate(&esdhc2_clk, 166250000);
/* System timer */
mxc_timer_init(&gpt_clk, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
MX51_INT_GPT);
return 0;
}
int __init mx53_clocks_init(unsigned long ckil, unsigned long osc,
unsigned long ckih1, unsigned long ckih2)
{
int i;
external_low_reference = ckil;
external_high_reference = ckih1;
ckih2_reference = ckih2;
oscillator_reference = osc;
for (i = 0; i < ARRAY_SIZE(mx53_lookups); i++)
clkdev_add(&mx53_lookups[i]);
clk_tree_init();
clk_set_parent(&uart_root_clk, &pll3_sw_clk);
clk_enable(&cpu_clk);
clk_enable(&main_bus_clk);
clk_enable(&iim_clk);
imx_print_silicon_rev("i.MX53", mx53_revision());
clk_disable(&iim_clk);
/* Set SDHC parents to be PLL2 */
clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
clk_set_parent(&esdhc3_mx53_clk, &pll2_sw_clk);
/* set SDHC root clock as 200MHZ*/
clk_set_rate(&esdhc1_clk, 200000000);
clk_set_rate(&esdhc3_mx53_clk, 200000000);
/* System timer */
mxc_timer_init(&gpt_clk, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
MX53_INT_GPT);
return 0;
}
#ifdef CONFIG_OF
static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
unsigned long *ckih1, unsigned long *ckih2)
{
struct device_node *np;
/* retrieve the freqency of fixed clocks from device tree */
for_each_compatible_node(np, NULL, "fixed-clock") {
u32 rate;
if (of_property_read_u32(np, "clock-frequency", &rate))
continue;
if (of_device_is_compatible(np, "fsl,imx-ckil"))
*ckil = rate;
else if (of_device_is_compatible(np, "fsl,imx-osc"))
*osc = rate;
else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
*ckih1 = rate;
else if (of_device_is_compatible(np, "fsl,imx-ckih2"))
*ckih2 = rate;
}
}
int __init mx51_clocks_init_dt(void)
{
unsigned long ckil, osc, ckih1, ckih2;
clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
return mx51_clocks_init(ckil, osc, ckih1, ckih2);
}
int __init mx53_clocks_init_dt(void)
{
unsigned long ckil, osc, ckih1, ckih2;
clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
return mx53_clocks_init(ckil, osc, ckih1, ckih2);
}
#endif
| gpl-2.0 |
vakkov/android-n900-nitdroid_kernel | drivers/scsi/scsi_netlink.c | 945 | 16127 | /*
* scsi_netlink.c - SCSI Transport Netlink Interface
*
* Copyright (C) 2006 James Smart, Emulex Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/time.h>
#include <linux/jiffies.h>
#include <linux/security.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <scsi/scsi_netlink.h>
#include "scsi_priv.h"
struct sock *scsi_nl_sock = NULL;
EXPORT_SYMBOL_GPL(scsi_nl_sock);
static DEFINE_SPINLOCK(scsi_nl_lock);
static struct list_head scsi_nl_drivers;
static u32 scsi_nl_state;
#define STATE_EHANDLER_BSY 0x00000001
struct scsi_nl_transport {
int (*msg_handler)(struct sk_buff *);
void (*event_handler)(struct notifier_block *, unsigned long, void *);
unsigned int refcnt;
int flags;
};
/* flags values (bit flags) */
#define HANDLER_DELETING 0x1
static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] =
{ {NULL, }, };
struct scsi_nl_drvr {
struct list_head next;
int (*dmsg_handler)(struct Scsi_Host *shost, void *payload,
u32 len, u32 pid);
void (*devt_handler)(struct notifier_block *nb,
unsigned long event, void *notify_ptr);
struct scsi_host_template *hostt;
u64 vendor_id;
unsigned int refcnt;
int flags;
};
/**
* scsi_nl_rcv_msg - Receive message handler.
* @skb: socket receive buffer
*
* Description: Extracts message from a receive buffer.
* Validates message header and calls appropriate transport message handler
*
*
**/
static void
scsi_nl_rcv_msg(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
struct scsi_nl_hdr *hdr;
unsigned long flags;
u32 rlen;
int err, tport;
while (skb->len >= NLMSG_SPACE(0)) {
err = 0;
nlh = nlmsg_hdr(skb);
if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) ||
(skb->len < nlh->nlmsg_len)) {
printk(KERN_WARNING "%s: discarding partial skb\n",
__func__);
return;
}
rlen = NLMSG_ALIGN(nlh->nlmsg_len);
if (rlen > skb->len)
rlen = skb->len;
if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) {
err = -EBADMSG;
goto next_msg;
}
hdr = NLMSG_DATA(nlh);
if ((hdr->version != SCSI_NL_VERSION) ||
(hdr->magic != SCSI_NL_MAGIC)) {
err = -EPROTOTYPE;
goto next_msg;
}
if (security_netlink_recv(skb, CAP_SYS_ADMIN)) {
err = -EPERM;
goto next_msg;
}
if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
printk(KERN_WARNING "%s: discarding partial message\n",
__func__);
goto next_msg;
}
/*
* Deliver message to the appropriate transport
*/
spin_lock_irqsave(&scsi_nl_lock, flags);
tport = hdr->transport;
if ((tport < SCSI_NL_MAX_TRANSPORTS) &&
!(transports[tport].flags & HANDLER_DELETING) &&
(transports[tport].msg_handler)) {
transports[tport].refcnt++;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
err = transports[tport].msg_handler(skb);
spin_lock_irqsave(&scsi_nl_lock, flags);
transports[tport].refcnt--;
} else
err = -ENOENT;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
next_msg:
if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
netlink_ack(skb, nlh, err);
skb_pull(skb, rlen);
}
}
/**
* scsi_nl_rcv_event - Event handler for a netlink socket.
* @this: event notifier block
* @event: event type
* @ptr: event payload
*
**/
static int
scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct netlink_notify *n = ptr;
struct scsi_nl_drvr *driver;
unsigned long flags;
int tport;
if (n->protocol != NETLINK_SCSITRANSPORT)
return NOTIFY_DONE;
spin_lock_irqsave(&scsi_nl_lock, flags);
scsi_nl_state |= STATE_EHANDLER_BSY;
/*
* Pass event on to any transports that may be listening
*/
for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) {
if (!(transports[tport].flags & HANDLER_DELETING) &&
(transports[tport].event_handler)) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
transports[tport].event_handler(this, event, ptr);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
}
/*
* Pass event on to any drivers that may be listening
*/
list_for_each_entry(driver, &scsi_nl_drivers, next) {
if (!(driver->flags & HANDLER_DELETING) &&
(driver->devt_handler)) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
driver->devt_handler(this, event, ptr);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
}
scsi_nl_state &= ~STATE_EHANDLER_BSY;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return NOTIFY_DONE;
}
static struct notifier_block scsi_netlink_notifier = {
.notifier_call = scsi_nl_rcv_event,
};
/*
* GENERIC SCSI transport receive and event handlers
*/
/**
* scsi_generic_msg_handler - receive message handler for GENERIC transport messages
* @skb: socket receive buffer
**/
static int
scsi_generic_msg_handler(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh);
struct scsi_nl_drvr *driver;
struct Scsi_Host *shost;
unsigned long flags;
int err = 0, match, pid;
pid = NETLINK_CREDS(skb)->pid;
switch (snlh->msgtype) {
case SCSI_NL_SHOST_VENDOR:
{
struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh);
/* Locate the driver that corresponds to the message */
spin_lock_irqsave(&scsi_nl_lock, flags);
match = 0;
list_for_each_entry(driver, &scsi_nl_drivers, next) {
if (driver->vendor_id == msg->vendor_id) {
match = 1;
break;
}
}
if ((!match) || (!driver->dmsg_handler)) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
err = -ESRCH;
goto rcv_exit;
}
if (driver->flags & HANDLER_DELETING) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
err = -ESHUTDOWN;
goto rcv_exit;
}
driver->refcnt++;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
/* if successful, scsi_host_lookup takes a shost reference */
shost = scsi_host_lookup(msg->host_no);
if (!shost) {
err = -ENODEV;
goto driver_exit;
}
/* is this host owned by the vendor ? */
if (shost->hostt != driver->hostt) {
err = -EINVAL;
goto vendormsg_put;
}
/* pass message on to the driver */
err = driver->dmsg_handler(shost, (void *)&msg[1],
msg->vmsg_datalen, pid);
vendormsg_put:
/* release reference by scsi_host_lookup */
scsi_host_put(shost);
driver_exit:
/* release our own reference on the registration object */
spin_lock_irqsave(&scsi_nl_lock, flags);
driver->refcnt--;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
break;
}
default:
err = -EBADR;
break;
}
rcv_exit:
if (err)
printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
__func__, snlh->msgtype, err);
return err;
}
/**
* scsi_nl_add_transport -
* Registers message and event handlers for a transport. Enables
* receipt of netlink messages and events to a transport.
*
* @tport: transport registering handlers
* @msg_handler: receive message handler callback
* @event_handler: receive event handler callback
**/
int
scsi_nl_add_transport(u8 tport,
int (*msg_handler)(struct sk_buff *),
void (*event_handler)(struct notifier_block *, unsigned long, void *))
{
unsigned long flags;
int err = 0;
if (tport >= SCSI_NL_MAX_TRANSPORTS)
return -EINVAL;
spin_lock_irqsave(&scsi_nl_lock, flags);
if (scsi_nl_state & STATE_EHANDLER_BSY) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
msleep(1);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
if (transports[tport].msg_handler || transports[tport].event_handler) {
err = -EALREADY;
goto register_out;
}
transports[tport].msg_handler = msg_handler;
transports[tport].event_handler = event_handler;
transports[tport].flags = 0;
transports[tport].refcnt = 0;
register_out:
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return err;
}
EXPORT_SYMBOL_GPL(scsi_nl_add_transport);
/**
* scsi_nl_remove_transport -
* Disable transport receiption of messages and events
*
* @tport: transport deregistering handlers
*
**/
void
scsi_nl_remove_transport(u8 tport)
{
unsigned long flags;
spin_lock_irqsave(&scsi_nl_lock, flags);
if (scsi_nl_state & STATE_EHANDLER_BSY) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
msleep(1);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
if (tport < SCSI_NL_MAX_TRANSPORTS) {
transports[tport].flags |= HANDLER_DELETING;
while (transports[tport].refcnt != 0) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
schedule_timeout_uninterruptible(HZ/4);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
transports[tport].msg_handler = NULL;
transports[tport].event_handler = NULL;
transports[tport].flags = 0;
}
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return;
}
EXPORT_SYMBOL_GPL(scsi_nl_remove_transport);
/**
* scsi_nl_add_driver -
* A driver is registering its interfaces for SCSI netlink messages
*
* @vendor_id: A unique identification value for the driver.
* @hostt: address of the driver's host template. Used
* to verify an shost is bound to the driver
* @nlmsg_handler: receive message handler callback
* @nlevt_handler: receive event handler callback
*
* Returns:
* 0 on Success
* error result otherwise
**/
int
scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
u32 len, u32 pid),
void (*nlevt_handler)(struct notifier_block *nb,
unsigned long event, void *notify_ptr))
{
struct scsi_nl_drvr *driver;
unsigned long flags;
driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (unlikely(!driver)) {
printk(KERN_ERR "%s: allocation failure\n", __func__);
return -ENOMEM;
}
driver->dmsg_handler = nlmsg_handler;
driver->devt_handler = nlevt_handler;
driver->hostt = hostt;
driver->vendor_id = vendor_id;
spin_lock_irqsave(&scsi_nl_lock, flags);
if (scsi_nl_state & STATE_EHANDLER_BSY) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
msleep(1);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
list_add_tail(&driver->next, &scsi_nl_drivers);
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(scsi_nl_add_driver);
/**
* scsi_nl_remove_driver -
* An driver is unregistering with the SCSI netlink messages
*
* @vendor_id: The unique identification value for the driver.
**/
void
scsi_nl_remove_driver(u64 vendor_id)
{
struct scsi_nl_drvr *driver;
unsigned long flags;
spin_lock_irqsave(&scsi_nl_lock, flags);
if (scsi_nl_state & STATE_EHANDLER_BSY) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
msleep(1);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
list_for_each_entry(driver, &scsi_nl_drivers, next) {
if (driver->vendor_id == vendor_id) {
driver->flags |= HANDLER_DELETING;
while (driver->refcnt != 0) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
schedule_timeout_uninterruptible(HZ/4);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
list_del(&driver->next);
kfree(driver);
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return;
}
}
spin_unlock_irqrestore(&scsi_nl_lock, flags);
printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n",
__func__, (unsigned long long)vendor_id);
return;
}
EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
/**
* scsi_netlink_init - Called by SCSI subsystem to intialize
* the SCSI transport netlink interface
*
**/
void
scsi_netlink_init(void)
{
int error;
INIT_LIST_HEAD(&scsi_nl_drivers);
error = netlink_register_notifier(&scsi_netlink_notifier);
if (error) {
printk(KERN_ERR "%s: register of event handler failed - %d\n",
__func__, error);
return;
}
scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT,
SCSI_NL_GRP_CNT, scsi_nl_rcv_msg, NULL,
THIS_MODULE);
if (!scsi_nl_sock) {
printk(KERN_ERR "%s: register of recieve handler failed\n",
__func__);
netlink_unregister_notifier(&scsi_netlink_notifier);
return;
}
/* Register the entry points for the generic SCSI transport */
error = scsi_nl_add_transport(SCSI_NL_TRANSPORT,
scsi_generic_msg_handler, NULL);
if (error)
printk(KERN_ERR "%s: register of GENERIC transport handler"
" failed - %d\n", __func__, error);
return;
}
/**
* scsi_netlink_exit - Called by SCSI subsystem to disable the SCSI transport netlink interface
*
**/
void
scsi_netlink_exit(void)
{
scsi_nl_remove_transport(SCSI_NL_TRANSPORT);
if (scsi_nl_sock) {
netlink_kernel_release(scsi_nl_sock);
netlink_unregister_notifier(&scsi_netlink_notifier);
}
return;
}
/*
* Exported Interfaces
*/
/**
* scsi_nl_send_transport_msg -
* Generic function to send a single message from a SCSI transport to
* a single process
*
* @pid: receiving pid
* @hdr: message payload
*
**/
void
scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
const char *fn;
char *datab;
u32 len, skblen;
int err;
if (!scsi_nl_sock) {
err = -ENOENT;
fn = "netlink socket";
goto msg_fail;
}
len = NLMSG_SPACE(hdr->msglen);
skblen = NLMSG_SPACE(len);
skb = alloc_skb(skblen, GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
fn = "alloc_skb";
goto msg_fail;
}
nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0);
if (!nlh) {
err = -ENOBUFS;
fn = "nlmsg_put";
goto msg_fail_skb;
}
datab = NLMSG_DATA(nlh);
memcpy(datab, hdr, hdr->msglen);
err = nlmsg_unicast(scsi_nl_sock, skb, pid);
if (err < 0) {
fn = "nlmsg_unicast";
/* nlmsg_unicast already kfree_skb'd */
goto msg_fail;
}
return;
msg_fail_skb:
kfree_skb(skb);
msg_fail:
printk(KERN_WARNING
"%s: Dropped Message : pid %d Transport %d, msgtype x%x, "
"msglen %d: %s : err %d\n",
__func__, pid, hdr->transport, hdr->msgtype, hdr->msglen,
fn, err);
return;
}
EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg);
/**
* scsi_nl_send_vendor_msg - called to send a shost vendor unique message
* to a specific process id.
*
* @pid: process id of the receiver
* @host_no: host # sending the message
* @vendor_id: unique identifier for the driver's vendor
* @data_len: amount, in bytes, of vendor unique payload data
* @data_buf: pointer to vendor unique data buffer
*
* Returns:
* 0 on successful return
* otherwise, failing error code
*
* Notes:
* This routine assumes no locks are held on entry.
*/
int
scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
char *data_buf, u32 data_len)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
struct scsi_nl_host_vendor_msg *msg;
u32 len, skblen;
int err;
if (!scsi_nl_sock) {
err = -ENOENT;
goto send_vendor_fail;
}
len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len);
skblen = NLMSG_SPACE(len);
skb = alloc_skb(skblen, GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
goto send_vendor_fail;
}
nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
skblen - sizeof(*nlh), 0);
if (!nlh) {
err = -ENOBUFS;
goto send_vendor_fail_skb;
}
msg = NLMSG_DATA(nlh);
INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT,
SCSI_NL_SHOST_VENDOR, len);
msg->vendor_id = vendor_id;
msg->host_no = host_no;
msg->vmsg_datalen = data_len; /* bytes */
memcpy(&msg[1], data_buf, data_len);
err = nlmsg_unicast(scsi_nl_sock, skb, pid);
if (err)
/* nlmsg_multicast already kfree_skb'd */
goto send_vendor_fail;
return 0;
send_vendor_fail_skb:
kfree_skb(skb);
send_vendor_fail:
printk(KERN_WARNING
"%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n",
__func__, host_no, err);
return err;
}
EXPORT_SYMBOL(scsi_nl_send_vendor_msg);
| gpl-2.0 |
cowithgun/samsung-kernel-ancora | net/sched/sch_ingress.c | 945 | 3308 | /* net/sched/sch_ingress.c - Ingress qdisc
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Jamal Hadi Salim 1999
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
struct ingress_qdisc_data {
struct tcf_proto *filter_list;
};
/* ------------------------- Class/flow operations ------------------------- */
static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
{
return NULL;
}
static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
{
return TC_H_MIN(classid) + 1;
}
static unsigned long ingress_bind_filter(struct Qdisc *sch,
unsigned long parent, u32 classid)
{
return ingress_get(sch, classid);
}
static void ingress_put(struct Qdisc *sch, unsigned long cl)
{
}
static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
}
static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch, unsigned long cl)
{
struct ingress_qdisc_data *p = qdisc_priv(sch);
return &p->filter_list;
}
/* --------------------------- Qdisc operations ---------------------------- */
static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct ingress_qdisc_data *p = qdisc_priv(sch);
struct tcf_result res;
int result;
result = tc_classify(skb, p->filter_list, &res);
sch->bstats.packets++;
sch->bstats.bytes += qdisc_pkt_len(skb);
switch (result) {
case TC_ACT_SHOT:
result = TC_ACT_SHOT;
sch->qstats.drops++;
break;
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
result = TC_ACT_STOLEN;
break;
case TC_ACT_RECLASSIFY:
case TC_ACT_OK:
skb->tc_index = TC_H_MIN(res.classid);
default:
result = TC_ACT_OK;
break;
}
return result;
}
/* ------------------------------------------------------------- */
static void ingress_destroy(struct Qdisc *sch)
{
struct ingress_qdisc_data *p = qdisc_priv(sch);
tcf_destroy_chain(&p->filter_list);
}
static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct nlattr *nest;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
nla_nest_end(skb, nest);
return skb->len;
nla_put_failure:
nla_nest_cancel(skb, nest);
return -1;
}
static const struct Qdisc_class_ops ingress_class_ops = {
.leaf = ingress_leaf,
.get = ingress_get,
.put = ingress_put,
.walk = ingress_walk,
.tcf_chain = ingress_find_tcf,
.bind_tcf = ingress_bind_filter,
.unbind_tcf = ingress_put,
};
static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
.cl_ops = &ingress_class_ops,
.id = "ingress",
.priv_size = sizeof(struct ingress_qdisc_data),
.enqueue = ingress_enqueue,
.destroy = ingress_destroy,
.dump = ingress_dump,
.owner = THIS_MODULE,
};
static int __init ingress_module_init(void)
{
return register_qdisc(&ingress_qdisc_ops);
}
static void __exit ingress_module_exit(void)
{
unregister_qdisc(&ingress_qdisc_ops);
}
module_init(ingress_module_init)
module_exit(ingress_module_exit)
MODULE_LICENSE("GPL");
| gpl-2.0 |
MojieBuddhist/linux-1 | fs/nilfs2/segbuf.c | 1201 | 14085 | /*
* segbuf.c - NILFS segment buffer
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Written by Ryusuke Konishi <ryusuke@osrg.net>
*
*/
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/crc32.h>
#include <linux/backing-dev.h>
#include <linux/slab.h>
#include "page.h"
#include "segbuf.h"
struct nilfs_write_info {
struct the_nilfs *nilfs;
struct bio *bio;
int start, end; /* The region to be submitted */
int rest_blocks;
int max_pages;
int nr_vecs;
sector_t blocknr;
};
static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
struct the_nilfs *nilfs);
static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
{
struct nilfs_segment_buffer *segbuf;
segbuf = kmem_cache_alloc(nilfs_segbuf_cachep, GFP_NOFS);
if (unlikely(!segbuf))
return NULL;
segbuf->sb_super = sb;
INIT_LIST_HEAD(&segbuf->sb_list);
INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
segbuf->sb_super_root = NULL;
init_completion(&segbuf->sb_bio_event);
atomic_set(&segbuf->sb_err, 0);
segbuf->sb_nbio = 0;
return segbuf;
}
void nilfs_segbuf_free(struct nilfs_segment_buffer *segbuf)
{
kmem_cache_free(nilfs_segbuf_cachep, segbuf);
}
void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum,
unsigned long offset, struct the_nilfs *nilfs)
{
segbuf->sb_segnum = segnum;
nilfs_get_segment_range(nilfs, segnum, &segbuf->sb_fseg_start,
&segbuf->sb_fseg_end);
segbuf->sb_pseg_start = segbuf->sb_fseg_start + offset;
segbuf->sb_rest_blocks =
segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
}
/**
* nilfs_segbuf_map_cont - map a new log behind a given log
* @segbuf: new segment buffer
* @prev: segment buffer containing a log to be continued
*/
void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
struct nilfs_segment_buffer *prev)
{
segbuf->sb_segnum = prev->sb_segnum;
segbuf->sb_fseg_start = prev->sb_fseg_start;
segbuf->sb_fseg_end = prev->sb_fseg_end;
segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks;
segbuf->sb_rest_blocks =
segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
}
void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf,
__u64 nextnum, struct the_nilfs *nilfs)
{
segbuf->sb_nextnum = nextnum;
segbuf->sb_sum.next = nilfs_get_segment_start_blocknr(nilfs, nextnum);
}
int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
{
struct buffer_head *bh;
bh = sb_getblk(segbuf->sb_super,
segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk);
if (unlikely(!bh))
return -ENOMEM;
nilfs_segbuf_add_segsum_buffer(segbuf, bh);
return 0;
}
int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf,
struct buffer_head **bhp)
{
struct buffer_head *bh;
bh = sb_getblk(segbuf->sb_super,
segbuf->sb_pseg_start + segbuf->sb_sum.nblocks);
if (unlikely(!bh))
return -ENOMEM;
nilfs_segbuf_add_payload_buffer(segbuf, bh);
*bhp = bh;
return 0;
}
int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
time_t ctime, __u64 cno)
{
int err;
segbuf->sb_sum.nblocks = segbuf->sb_sum.nsumblk = 0;
err = nilfs_segbuf_extend_segsum(segbuf);
if (unlikely(err))
return err;
segbuf->sb_sum.flags = flags;
segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
segbuf->sb_sum.ctime = ctime;
segbuf->sb_sum.cno = cno;
return 0;
}
/*
* Setup segment summary
*/
void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf)
{
struct nilfs_segment_summary *raw_sum;
struct buffer_head *bh_sum;
bh_sum = list_entry(segbuf->sb_segsum_buffers.next,
struct buffer_head, b_assoc_buffers);
raw_sum = (struct nilfs_segment_summary *)bh_sum->b_data;
raw_sum->ss_magic = cpu_to_le32(NILFS_SEGSUM_MAGIC);
raw_sum->ss_bytes = cpu_to_le16(sizeof(*raw_sum));
raw_sum->ss_flags = cpu_to_le16(segbuf->sb_sum.flags);
raw_sum->ss_seq = cpu_to_le64(segbuf->sb_sum.seg_seq);
raw_sum->ss_create = cpu_to_le64(segbuf->sb_sum.ctime);
raw_sum->ss_next = cpu_to_le64(segbuf->sb_sum.next);
raw_sum->ss_nblocks = cpu_to_le32(segbuf->sb_sum.nblocks);
raw_sum->ss_nfinfo = cpu_to_le32(segbuf->sb_sum.nfinfo);
raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes);
raw_sum->ss_pad = 0;
raw_sum->ss_cno = cpu_to_le64(segbuf->sb_sum.cno);
}
/*
* CRC calculation routines
*/
static void
nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf, u32 seed)
{
struct buffer_head *bh;
struct nilfs_segment_summary *raw_sum;
unsigned long size, bytes = segbuf->sb_sum.sumbytes;
u32 crc;
bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
b_assoc_buffers);
raw_sum = (struct nilfs_segment_summary *)bh->b_data;
size = min_t(unsigned long, bytes, bh->b_size);
crc = crc32_le(seed,
(unsigned char *)raw_sum +
sizeof(raw_sum->ss_datasum) + sizeof(raw_sum->ss_sumsum),
size - (sizeof(raw_sum->ss_datasum) +
sizeof(raw_sum->ss_sumsum)));
list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) {
bytes -= size;
size = min_t(unsigned long, bytes, bh->b_size);
crc = crc32_le(crc, bh->b_data, size);
}
raw_sum->ss_sumsum = cpu_to_le32(crc);
}
static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
u32 seed)
{
struct buffer_head *bh;
struct nilfs_segment_summary *raw_sum;
void *kaddr;
u32 crc;
bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
b_assoc_buffers);
raw_sum = (struct nilfs_segment_summary *)bh->b_data;
crc = crc32_le(seed,
(unsigned char *)raw_sum + sizeof(raw_sum->ss_datasum),
bh->b_size - sizeof(raw_sum->ss_datasum));
list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) {
crc = crc32_le(crc, bh->b_data, bh->b_size);
}
list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
kaddr = kmap_atomic(bh->b_page);
crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
kunmap_atomic(kaddr);
}
raw_sum->ss_datasum = cpu_to_le32(crc);
}
static void
nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer *segbuf,
u32 seed)
{
struct nilfs_super_root *raw_sr;
struct the_nilfs *nilfs = segbuf->sb_super->s_fs_info;
unsigned srsize;
u32 crc;
raw_sr = (struct nilfs_super_root *)segbuf->sb_super_root->b_data;
srsize = NILFS_SR_BYTES(nilfs->ns_inode_size);
crc = crc32_le(seed,
(unsigned char *)raw_sr + sizeof(raw_sr->sr_sum),
srsize - sizeof(raw_sr->sr_sum));
raw_sr->sr_sum = cpu_to_le32(crc);
}
static void nilfs_release_buffers(struct list_head *list)
{
struct buffer_head *bh, *n;
list_for_each_entry_safe(bh, n, list, b_assoc_buffers) {
list_del_init(&bh->b_assoc_buffers);
brelse(bh);
}
}
static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
{
nilfs_release_buffers(&segbuf->sb_segsum_buffers);
nilfs_release_buffers(&segbuf->sb_payload_buffers);
segbuf->sb_super_root = NULL;
}
/*
* Iterators for segment buffers
*/
void nilfs_clear_logs(struct list_head *logs)
{
struct nilfs_segment_buffer *segbuf;
list_for_each_entry(segbuf, logs, sb_list)
nilfs_segbuf_clear(segbuf);
}
void nilfs_truncate_logs(struct list_head *logs,
struct nilfs_segment_buffer *last)
{
struct nilfs_segment_buffer *n, *segbuf;
segbuf = list_prepare_entry(last, logs, sb_list);
list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) {
list_del_init(&segbuf->sb_list);
nilfs_segbuf_clear(segbuf);
nilfs_segbuf_free(segbuf);
}
}
int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs)
{
struct nilfs_segment_buffer *segbuf;
int ret = 0;
list_for_each_entry(segbuf, logs, sb_list) {
ret = nilfs_segbuf_write(segbuf, nilfs);
if (ret)
break;
}
return ret;
}
int nilfs_wait_on_logs(struct list_head *logs)
{
struct nilfs_segment_buffer *segbuf;
int err, ret = 0;
list_for_each_entry(segbuf, logs, sb_list) {
err = nilfs_segbuf_wait(segbuf);
if (err && !ret)
ret = err;
}
return ret;
}
/**
* nilfs_add_checksums_on_logs - add checksums on the logs
* @logs: list of segment buffers storing target logs
* @seed: checksum seed value
*/
void nilfs_add_checksums_on_logs(struct list_head *logs, u32 seed)
{
struct nilfs_segment_buffer *segbuf;
list_for_each_entry(segbuf, logs, sb_list) {
if (segbuf->sb_super_root)
nilfs_segbuf_fill_in_super_root_crc(segbuf, seed);
nilfs_segbuf_fill_in_segsum_crc(segbuf, seed);
nilfs_segbuf_fill_in_data_crc(segbuf, seed);
}
}
/*
* BIO operations
*/
static void nilfs_end_bio_write(struct bio *bio, int err)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct nilfs_segment_buffer *segbuf = bio->bi_private;
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
/* to be detected by nilfs_segbuf_submit_bio() */
}
if (!uptodate)
atomic_inc(&segbuf->sb_err);
bio_put(bio);
complete(&segbuf->sb_bio_event);
}
static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
struct nilfs_write_info *wi, int mode)
{
struct bio *bio = wi->bio;
int err;
if (segbuf->sb_nbio > 0 &&
bdi_write_congested(segbuf->sb_super->s_bdi)) {
wait_for_completion(&segbuf->sb_bio_event);
segbuf->sb_nbio--;
if (unlikely(atomic_read(&segbuf->sb_err))) {
bio_put(bio);
err = -EIO;
goto failed;
}
}
bio->bi_end_io = nilfs_end_bio_write;
bio->bi_private = segbuf;
bio_get(bio);
submit_bio(mode, bio);
segbuf->sb_nbio++;
if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
bio_put(bio);
err = -EOPNOTSUPP;
goto failed;
}
bio_put(bio);
wi->bio = NULL;
wi->rest_blocks -= wi->end - wi->start;
wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
wi->start = wi->end;
return 0;
failed:
wi->bio = NULL;
return err;
}
/**
* nilfs_alloc_seg_bio - allocate a new bio for writing log
* @nilfs: nilfs object
* @start: start block number of the bio
* @nr_vecs: request size of page vector.
*
* Return Value: On success, pointer to the struct bio is returned.
* On error, NULL is returned.
*/
static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
int nr_vecs)
{
struct bio *bio;
bio = bio_alloc(GFP_NOIO, nr_vecs);
if (bio == NULL) {
while (!bio && (nr_vecs >>= 1))
bio = bio_alloc(GFP_NOIO, nr_vecs);
}
if (likely(bio)) {
bio->bi_bdev = nilfs->ns_bdev;
bio->bi_iter.bi_sector =
start << (nilfs->ns_blocksize_bits - 9);
}
return bio;
}
static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
struct nilfs_write_info *wi)
{
wi->bio = NULL;
wi->rest_blocks = segbuf->sb_sum.nblocks;
wi->max_pages = bio_get_nr_vecs(wi->nilfs->ns_bdev);
wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
wi->start = wi->end = 0;
wi->blocknr = segbuf->sb_pseg_start;
}
static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
struct nilfs_write_info *wi,
struct buffer_head *bh, int mode)
{
int len, err;
BUG_ON(wi->nr_vecs <= 0);
repeat:
if (!wi->bio) {
wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end,
wi->nr_vecs);
if (unlikely(!wi->bio))
return -ENOMEM;
}
len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh));
if (len == bh->b_size) {
wi->end++;
return 0;
}
/* bio is FULL */
err = nilfs_segbuf_submit_bio(segbuf, wi, mode);
/* never submit current bh */
if (likely(!err))
goto repeat;
return err;
}
/**
* nilfs_segbuf_write - submit write requests of a log
* @segbuf: buffer storing a log to be written
* @nilfs: nilfs object
*
* Return Value: On Success, 0 is returned. On Error, one of the following
* negative error code is returned.
*
* %-EIO - I/O error
*
* %-ENOMEM - Insufficient memory available.
*/
static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
struct the_nilfs *nilfs)
{
struct nilfs_write_info wi;
struct buffer_head *bh;
int res = 0, rw = WRITE;
wi.nilfs = nilfs;
nilfs_segbuf_prepare_write(segbuf, &wi);
list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
if (unlikely(res))
goto failed_bio;
}
list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
if (unlikely(res))
goto failed_bio;
}
if (wi.bio) {
/*
* Last BIO is always sent through the following
* submission.
*/
rw |= REQ_SYNC;
res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
}
failed_bio:
return res;
}
/**
* nilfs_segbuf_wait - wait for completion of requested BIOs
* @segbuf: segment buffer
*
* Return Value: On Success, 0 is returned. On Error, one of the following
* negative error code is returned.
*
* %-EIO - I/O error
*/
static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
{
int err = 0;
if (!segbuf->sb_nbio)
return 0;
do {
wait_for_completion(&segbuf->sb_bio_event);
} while (--segbuf->sb_nbio > 0);
if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
printk(KERN_ERR "NILFS: IO error writing segment\n");
err = -EIO;
}
return err;
}
| gpl-2.0 |
SM-G920P/S6-MM | drivers/idle/intel_idle.c | 1713 | 19355 | /*
* intel_idle.c - native hardware idle loop for modern Intel processors
*
* Copyright (c) 2010, Intel Corporation.
* Len Brown <len.brown@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
/*
* intel_idle is a cpuidle driver that loads on specific Intel processors
* in lieu of the legacy ACPI processor_idle driver. The intent is to
* make Linux more efficient on these processors, as intel_idle knows
* more than ACPI, as well as make Linux more immune to ACPI BIOS bugs.
*/
/*
* Design Assumptions
*
* All CPUs have same idle states as boot CPU
*
* Chipset BM_STS (bus master status) bit is a NOP
* for preventing entry into deep C-stats
*/
/*
* Known limitations
*
* The driver currently initializes for_each_online_cpu() upon modprobe.
* It it unaware of subsequent processors hot-added to the system.
* This means that if you boot with maxcpus=n and later online
* processors above n, those processors will use C1 only.
*
* ACPI has a .suspend hack to turn off deep c-statees during suspend
* to avoid complications with the lapic timer workaround.
* Have not seen issues with suspend, but may need same workaround here.
*
* There is currently no kernel-based automatic probing/loading mechanism
* if the driver is built as a module.
*/
/* un-comment DEBUG to enable pr_debug() statements */
#define DEBUG
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/clockchips.h>
#include <trace/events/power.h>
#include <linux/sched.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <asm/cpu_device_id.h>
#include <asm/mwait.h>
#include <asm/msr.h>
#define INTEL_IDLE_VERSION "0.4"
#define PREFIX "intel_idle: "
static struct cpuidle_driver intel_idle_driver = {
.name = "intel_idle",
.owner = THIS_MODULE,
};
/* intel_idle.max_cstate=0 disables driver */
static int max_cstate = CPUIDLE_STATE_MAX - 1;
static unsigned int mwait_substates;
#define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF
/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
struct idle_cpu {
struct cpuidle_state *state_table;
/*
* Hardware C-state auto-demotion may not always be optimal.
* Indicate which enable bits to clear here.
*/
unsigned long auto_demotion_disable_flags;
bool disable_promotion_to_c1e;
};
static const struct idle_cpu *icpu;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
static int intel_idle_cpu_init(int cpu);
static struct cpuidle_state *cpuidle_state_table;
/*
* Set this flag for states where the HW flushes the TLB for us
* and so we don't need cross-calls to keep it consistent.
* If this flag is set, SW flushes the TLB, so even if the
* HW doesn't do the flushing, this flag is safe to use.
*/
#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000
/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
* the C-state (top nibble) and sub-state (bottom nibble)
* 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
*
* We store the hint at the top of our "flags" for each state.
*/
#define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
#define MWAIT2flg(eax) ((eax & 0xFF) << 24)
/*
* States are indexed by the cstate number,
* which is also the index into the MWAIT hint array.
* Thus C0 is a dummy.
*/
static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
{
.name = "C1-NHM",
.desc = "MWAIT 0x00",
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 3,
.target_residency = 6,
.enter = &intel_idle },
{
.name = "C1E-NHM",
.desc = "MWAIT 0x01",
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle },
{
.name = "C3-NHM",
.desc = "MWAIT 0x10",
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 20,
.target_residency = 80,
.enter = &intel_idle },
{
.name = "C6-NHM",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle },
{
.enter = NULL }
};
static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
{
.name = "C1-SNB",
.desc = "MWAIT 0x00",
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle },
{
.name = "C1E-SNB",
.desc = "MWAIT 0x01",
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle },
{
.name = "C3-SNB",
.desc = "MWAIT 0x10",
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 211,
.enter = &intel_idle },
{
.name = "C6-SNB",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 104,
.target_residency = 345,
.enter = &intel_idle },
{
.name = "C7-SNB",
.desc = "MWAIT 0x30",
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 109,
.target_residency = 345,
.enter = &intel_idle },
{
.enter = NULL }
};
static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
{
.name = "C1-IVB",
.desc = "MWAIT 0x00",
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle },
{
.name = "C1E-IVB",
.desc = "MWAIT 0x01",
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle },
{
.name = "C3-IVB",
.desc = "MWAIT 0x10",
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 156,
.enter = &intel_idle },
{
.name = "C6-IVB",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 300,
.enter = &intel_idle },
{
.name = "C7-IVB",
.desc = "MWAIT 0x30",
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 87,
.target_residency = 300,
.enter = &intel_idle },
{
.enter = NULL }
};
static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
{
.name = "C1-HSW",
.desc = "MWAIT 0x00",
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle },
{
.name = "C1E-HSW",
.desc = "MWAIT 0x01",
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle },
{
.name = "C3-HSW",
.desc = "MWAIT 0x10",
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 33,
.target_residency = 100,
.enter = &intel_idle },
{
.name = "C6-HSW",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 133,
.target_residency = 400,
.enter = &intel_idle },
{
.name = "C7s-HSW",
.desc = "MWAIT 0x32",
.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 166,
.target_residency = 500,
.enter = &intel_idle },
{
.name = "C8-HSW",
.desc = "MWAIT 0x40",
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 300,
.target_residency = 900,
.enter = &intel_idle },
{
.name = "C9-HSW",
.desc = "MWAIT 0x50",
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 600,
.target_residency = 1800,
.enter = &intel_idle },
{
.name = "C10-HSW",
.desc = "MWAIT 0x60",
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 2600,
.target_residency = 7700,
.enter = &intel_idle },
{
.enter = NULL }
};
static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
{
.name = "C1E-ATM",
.desc = "MWAIT 0x00",
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle },
{
.name = "C2-ATM",
.desc = "MWAIT 0x10",
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 20,
.target_residency = 80,
.enter = &intel_idle },
{
.name = "C4-ATM",
.desc = "MWAIT 0x30",
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.target_residency = 400,
.enter = &intel_idle },
{
.name = "C6-ATM",
.desc = "MWAIT 0x52",
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 560,
.enter = &intel_idle },
{
.enter = NULL }
};
/**
* intel_idle
* @dev: cpuidle_device
* @drv: cpuidle driver
* @index: index of cpuidle state
*
* Must be called under local_irq_disable().
*/
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
unsigned long ecx = 1; /* break on interrupt flag */
struct cpuidle_state *state = &drv->states[index];
unsigned long eax = flg2MWAIT(state->flags);
unsigned int cstate;
int cpu = smp_processor_id();
cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
/*
* leave_mm() to avoid costly and often unnecessary wakeups
* for flushing the user TLB's associated with the active mm.
*/
if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
leave_mm(cpu);
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
if (!current_set_polling_and_test()) {
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)¤t_thread_info()->flags);
__monitor((void *)¤t_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
__mwait(eax, ecx);
}
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
return index;
}
static void __setup_broadcast_timer(void *arg)
{
unsigned long reason = (unsigned long)arg;
int cpu = smp_processor_id();
reason = reason ?
CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
clockevents_notify(reason, &cpu);
}
static int cpu_hotplug_notify(struct notifier_block *n,
unsigned long action, void *hcpu)
{
int hotcpu = (unsigned long)hcpu;
struct cpuidle_device *dev;
switch (action & 0xf) {
case CPU_ONLINE:
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
smp_call_function_single(hotcpu, __setup_broadcast_timer,
(void *)true, 1);
/*
* Some systems can hotplug a cpu at runtime after
* the kernel has booted, we have to initialize the
* driver in this case
*/
dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
if (!dev->registered)
intel_idle_cpu_init(hotcpu);
break;
}
return NOTIFY_OK;
}
static struct notifier_block cpu_hotplug_notifier = {
.notifier_call = cpu_hotplug_notify,
};
static void auto_demotion_disable(void *dummy)
{
unsigned long long msr_bits;
rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
msr_bits &= ~(icpu->auto_demotion_disable_flags);
wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
}
static void c1e_promotion_disable(void *dummy)
{
unsigned long long msr_bits;
rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
msr_bits &= ~0x2;
wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
}
static const struct idle_cpu idle_cpu_nehalem = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
.disable_promotion_to_c1e = true,
};
static const struct idle_cpu idle_cpu_atom = {
.state_table = atom_cstates,
};
static const struct idle_cpu idle_cpu_lincroft = {
.state_table = atom_cstates,
.auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
};
static const struct idle_cpu idle_cpu_snb = {
.state_table = snb_cstates,
.disable_promotion_to_c1e = true,
};
static const struct idle_cpu idle_cpu_ivb = {
.state_table = ivb_cstates,
.disable_promotion_to_c1e = true,
};
static const struct idle_cpu idle_cpu_hsw = {
.state_table = hsw_cstates,
.disable_promotion_to_c1e = true,
};
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x1a, idle_cpu_nehalem),
ICPU(0x1e, idle_cpu_nehalem),
ICPU(0x1f, idle_cpu_nehalem),
ICPU(0x25, idle_cpu_nehalem),
ICPU(0x2c, idle_cpu_nehalem),
ICPU(0x2e, idle_cpu_nehalem),
ICPU(0x1c, idle_cpu_atom),
ICPU(0x26, idle_cpu_lincroft),
ICPU(0x2f, idle_cpu_nehalem),
ICPU(0x2a, idle_cpu_snb),
ICPU(0x2d, idle_cpu_snb),
ICPU(0x3a, idle_cpu_ivb),
ICPU(0x3e, idle_cpu_ivb),
ICPU(0x3c, idle_cpu_hsw),
ICPU(0x3f, idle_cpu_hsw),
ICPU(0x45, idle_cpu_hsw),
ICPU(0x46, idle_cpu_hsw),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
/*
* intel_idle_probe()
*/
static int intel_idle_probe(void)
{
unsigned int eax, ebx, ecx;
const struct x86_cpu_id *id;
if (max_cstate == 0) {
pr_debug(PREFIX "disabled\n");
return -EPERM;
}
id = x86_match_cpu(intel_idle_ids);
if (!id) {
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6)
pr_debug(PREFIX "does not run on family %d model %d\n",
boot_cpu_data.x86, boot_cpu_data.x86_model);
return -ENODEV;
}
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
return -ENODEV;
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
!mwait_substates)
return -ENODEV;
pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
icpu = (const struct idle_cpu *)id->driver_data;
cpuidle_state_table = icpu->state_table;
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
else
on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
pr_debug(PREFIX "v" INTEL_IDLE_VERSION
" model 0x%X\n", boot_cpu_data.x86_model);
pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
lapic_timer_reliable_states);
return 0;
}
/*
* intel_idle_cpuidle_devices_uninit()
* unregister, free cpuidle_devices
*/
static void intel_idle_cpuidle_devices_uninit(void)
{
int i;
struct cpuidle_device *dev;
for_each_online_cpu(i) {
dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
cpuidle_unregister_device(dev);
}
free_percpu(intel_idle_cpuidle_devices);
return;
}
/*
* intel_idle_cpuidle_driver_init()
* allocate, initialize cpuidle_states
*/
static int intel_idle_cpuidle_driver_init(void)
{
int cstate;
struct cpuidle_driver *drv = &intel_idle_driver;
drv->state_count = 1;
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
int num_substates, mwait_hint, mwait_cstate, mwait_substate;
if (cpuidle_state_table[cstate].enter == NULL)
break;
if (cstate + 1 > max_cstate) {
printk(PREFIX "max_cstate %d reached\n",
max_cstate);
break;
}
mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
/* does the state exist in CPUID.MWAIT? */
num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
& MWAIT_SUBSTATE_MASK;
/* if sub-state in table is not enumerated by CPUID */
if ((mwait_substate + 1) > num_substates)
continue;
if (((mwait_cstate + 1) > 2) &&
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halts in idle"
" states deeper than C2");
drv->states[drv->state_count] = /* structure copy */
cpuidle_state_table[cstate];
drv->state_count += 1;
}
if (icpu->auto_demotion_disable_flags)
on_each_cpu(auto_demotion_disable, NULL, 1);
if (icpu->disable_promotion_to_c1e) /* each-cpu is redundant */
on_each_cpu(c1e_promotion_disable, NULL, 1);
return 0;
}
/*
* intel_idle_cpu_init()
* allocate, initialize, register cpuidle_devices
* @cpu: cpu/core to initialize
*/
static int intel_idle_cpu_init(int cpu)
{
int cstate;
struct cpuidle_device *dev;
dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
dev->state_count = 1;
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
int num_substates, mwait_hint, mwait_cstate, mwait_substate;
if (cpuidle_state_table[cstate].enter == NULL)
continue;
if (cstate + 1 > max_cstate) {
printk(PREFIX "max_cstate %d reached\n", max_cstate);
break;
}
mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
/* does the state exist in CPUID.MWAIT? */
num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
& MWAIT_SUBSTATE_MASK;
/* if sub-state in table is not enumerated by CPUID */
if ((mwait_substate + 1) > num_substates)
continue;
dev->state_count += 1;
}
dev->cpu = cpu;
if (cpuidle_register_device(dev)) {
pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
intel_idle_cpuidle_devices_uninit();
return -EIO;
}
if (icpu->auto_demotion_disable_flags)
smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
return 0;
}
static int __init intel_idle_init(void)
{
int retval, i;
/* Do not load intel_idle at all for now if idle= is passed */
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
return -ENODEV;
retval = intel_idle_probe();
if (retval)
return retval;
intel_idle_cpuidle_driver_init();
retval = cpuidle_register_driver(&intel_idle_driver);
if (retval) {
struct cpuidle_driver *drv = cpuidle_get_driver();
printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
drv ? drv->name : "none");
return retval;
}
intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
if (intel_idle_cpuidle_devices == NULL)
return -ENOMEM;
for_each_online_cpu(i) {
retval = intel_idle_cpu_init(i);
if (retval) {
cpuidle_unregister_driver(&intel_idle_driver);
return retval;
}
}
register_cpu_notifier(&cpu_hotplug_notifier);
return 0;
}
static void __exit intel_idle_exit(void)
{
intel_idle_cpuidle_devices_uninit();
cpuidle_unregister_driver(&intel_idle_driver);
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
unregister_cpu_notifier(&cpu_hotplug_notifier);
return;
}
module_init(intel_idle_init);
module_exit(intel_idle_exit);
module_param(max_cstate, int, 0444);
MODULE_AUTHOR("Len Brown <len.brown@intel.com>");
MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Chairshot215/starship_kernel_moto_shamu | arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c | 1713 | 22908 | /*
* omap_hwmod_2xxx_ipblock_data.c - common IP block data for OMAP2xxx
*
* Copyright (C) 2011 Nokia Corporation
* Paul Walmsley
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/platform_data/gpio-omap.h>
#include <linux/omap-dma.h>
#include <plat/dmtimer.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#include "omap_hwmod.h"
#include "omap_hwmod_common_data.h"
#include "cm-regbits-24xx.h"
#include "prm-regbits-24xx.h"
#include "wd_timer.h"
struct omap_hwmod_irq_info omap2xxx_timer12_mpu_irqs[] = {
{ .irq = 48 + OMAP_INTC_START, },
{ .irq = -1 },
};
struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[] = {
{ .name = "dispc", .dma_req = 5 },
{ .dma_req = -1 }
};
/*
* 'dispc' class
* display controller
*/
static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
struct omap_hwmod_class omap2_dispc_hwmod_class = {
.name = "dispc",
.sysc = &omap2_dispc_sysc,
};
/* OMAP2xxx Timer Common */
static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.clockact = CLOCKACT_TEST_ICLK,
.sysc_fields = &omap_hwmod_sysc_type1,
};
struct omap_hwmod_class omap2xxx_timer_hwmod_class = {
.name = "timer",
.sysc = &omap2xxx_timer_sysc,
};
/*
* 'wd_timer' class
* 32-bit watchdog upward counter that generates a pulse on the reset pin on
* overflow condition
*/
static struct omap_hwmod_class_sysconfig omap2xxx_wd_timer_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_SOFTRESET |
SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
.sysc_fields = &omap_hwmod_sysc_type1,
};
struct omap_hwmod_class omap2xxx_wd_timer_hwmod_class = {
.name = "wd_timer",
.sysc = &omap2xxx_wd_timer_sysc,
.pre_shutdown = &omap2_wd_timer_disable,
.reset = &omap2_wd_timer_reset,
};
/*
* 'gpio' class
* general purpose io module
*/
static struct omap_hwmod_class_sysconfig omap2xxx_gpio_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
struct omap_hwmod_class omap2xxx_gpio_hwmod_class = {
.name = "gpio",
.sysc = &omap2xxx_gpio_sysc,
.rev = 0,
};
/* system dma */
static struct omap_hwmod_class_sysconfig omap2xxx_dma_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x002c,
.syss_offs = 0x0028,
.sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_MIDLEMODE |
SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE |
SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
.idlemodes = (MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
struct omap_hwmod_class omap2xxx_dma_hwmod_class = {
.name = "dma",
.sysc = &omap2xxx_dma_sysc,
};
/*
* 'mailbox' class
* mailbox module allowing communication between the on-chip processors
* using a queued mailbox-interrupt mechanism.
*/
static struct omap_hwmod_class_sysconfig omap2xxx_mailbox_sysc = {
.rev_offs = 0x000,
.sysc_offs = 0x010,
.syss_offs = 0x014,
.sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
struct omap_hwmod_class omap2xxx_mailbox_hwmod_class = {
.name = "mailbox",
.sysc = &omap2xxx_mailbox_sysc,
};
/*
* 'mcspi' class
* multichannel serial port interface (mcspi) / master/slave synchronous serial
* bus
*/
static struct omap_hwmod_class_sysconfig omap2xxx_mcspi_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
struct omap_hwmod_class omap2xxx_mcspi_class = {
.name = "mcspi",
.sysc = &omap2xxx_mcspi_sysc,
.rev = OMAP2_MCSPI_REV,
};
/*
* 'gpmc' class
* general purpose memory controller
*/
static struct omap_hwmod_class_sysconfig omap2xxx_gpmc_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap2xxx_gpmc_hwmod_class = {
.name = "gpmc",
.sysc = &omap2xxx_gpmc_sysc,
};
/*
* IP blocks
*/
/* L3 */
struct omap_hwmod omap2xxx_l3_main_hwmod = {
.name = "l3_main",
.class = &l3_hwmod_class,
.flags = HWMOD_NO_IDLEST,
};
/* L4 CORE */
struct omap_hwmod omap2xxx_l4_core_hwmod = {
.name = "l4_core",
.class = &l4_hwmod_class,
.flags = HWMOD_NO_IDLEST,
};
/* L4 WKUP */
struct omap_hwmod omap2xxx_l4_wkup_hwmod = {
.name = "l4_wkup",
.class = &l4_hwmod_class,
.flags = HWMOD_NO_IDLEST,
};
/* MPU */
static struct omap_hwmod_irq_info omap2xxx_mpu_irqs[] = {
{ .name = "pmu", .irq = 3 + OMAP_INTC_START },
{ .irq = -1 }
};
struct omap_hwmod omap2xxx_mpu_hwmod = {
.name = "mpu",
.mpu_irqs = omap2xxx_mpu_irqs,
.class = &mpu_hwmod_class,
.main_clk = "mpu_ck",
};
/* IVA2 */
struct omap_hwmod omap2xxx_iva_hwmod = {
.name = "iva",
.class = &iva_hwmod_class,
};
/* always-on timers dev attribute */
static struct omap_timer_capability_dev_attr capability_alwon_dev_attr = {
.timer_capability = OMAP_TIMER_ALWON,
};
/* pwm timers dev attribute */
static struct omap_timer_capability_dev_attr capability_pwm_dev_attr = {
.timer_capability = OMAP_TIMER_HAS_PWM,
};
/* timers with DSP interrupt dev attribute */
static struct omap_timer_capability_dev_attr capability_dsp_dev_attr = {
.timer_capability = OMAP_TIMER_HAS_DSP_IRQ,
};
/* timer1 */
struct omap_hwmod omap2xxx_timer1_hwmod = {
.name = "timer1",
.mpu_irqs = omap2_timer1_mpu_irqs,
.main_clk = "gpt1_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPT1_SHIFT,
.module_offs = WKUP_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPT1_SHIFT,
},
},
.dev_attr = &capability_alwon_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
/* timer2 */
struct omap_hwmod omap2xxx_timer2_hwmod = {
.name = "timer2",
.mpu_irqs = omap2_timer2_mpu_irqs,
.main_clk = "gpt2_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPT2_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPT2_SHIFT,
},
},
.class = &omap2xxx_timer_hwmod_class,
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
/* timer3 */
struct omap_hwmod omap2xxx_timer3_hwmod = {
.name = "timer3",
.mpu_irqs = omap2_timer3_mpu_irqs,
.main_clk = "gpt3_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPT3_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPT3_SHIFT,
},
},
.class = &omap2xxx_timer_hwmod_class,
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
/* timer4 */
struct omap_hwmod omap2xxx_timer4_hwmod = {
.name = "timer4",
.mpu_irqs = omap2_timer4_mpu_irqs,
.main_clk = "gpt4_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPT4_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPT4_SHIFT,
},
},
.class = &omap2xxx_timer_hwmod_class,
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
/* timer5 */
struct omap_hwmod omap2xxx_timer5_hwmod = {
.name = "timer5",
.mpu_irqs = omap2_timer5_mpu_irqs,
.main_clk = "gpt5_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPT5_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPT5_SHIFT,
},
},
.dev_attr = &capability_dsp_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
/* timer6 */
struct omap_hwmod omap2xxx_timer6_hwmod = {
.name = "timer6",
.mpu_irqs = omap2_timer6_mpu_irqs,
.main_clk = "gpt6_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPT6_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPT6_SHIFT,
},
},
.dev_attr = &capability_dsp_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
/* timer7 */
struct omap_hwmod omap2xxx_timer7_hwmod = {
.name = "timer7",
.mpu_irqs = omap2_timer7_mpu_irqs,
.main_clk = "gpt7_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPT7_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPT7_SHIFT,
},
},
.dev_attr = &capability_dsp_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
/* timer8 */
struct omap_hwmod omap2xxx_timer8_hwmod = {
.name = "timer8",
.mpu_irqs = omap2_timer8_mpu_irqs,
.main_clk = "gpt8_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPT8_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPT8_SHIFT,
},
},
.dev_attr = &capability_dsp_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
/* timer9 */
struct omap_hwmod omap2xxx_timer9_hwmod = {
.name = "timer9",
.mpu_irqs = omap2_timer9_mpu_irqs,
.main_clk = "gpt9_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPT9_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPT9_SHIFT,
},
},
.dev_attr = &capability_pwm_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
/* timer10 */
struct omap_hwmod omap2xxx_timer10_hwmod = {
.name = "timer10",
.mpu_irqs = omap2_timer10_mpu_irqs,
.main_clk = "gpt10_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPT10_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPT10_SHIFT,
},
},
.dev_attr = &capability_pwm_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
/* timer11 */
struct omap_hwmod omap2xxx_timer11_hwmod = {
.name = "timer11",
.mpu_irqs = omap2_timer11_mpu_irqs,
.main_clk = "gpt11_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPT11_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPT11_SHIFT,
},
},
.dev_attr = &capability_pwm_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
/* timer12 */
struct omap_hwmod omap2xxx_timer12_hwmod = {
.name = "timer12",
.mpu_irqs = omap2xxx_timer12_mpu_irqs,
.main_clk = "gpt12_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPT12_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPT12_SHIFT,
},
},
.dev_attr = &capability_pwm_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
/* wd_timer2 */
struct omap_hwmod omap2xxx_wd_timer2_hwmod = {
.name = "wd_timer2",
.class = &omap2xxx_wd_timer_hwmod_class,
.main_clk = "mpu_wdt_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
.module_offs = WKUP_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_MPU_WDT_SHIFT,
},
},
};
/* UART1 */
struct omap_hwmod omap2xxx_uart1_hwmod = {
.name = "uart1",
.mpu_irqs = omap2_uart1_mpu_irqs,
.sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_UART1_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_EN_UART1_SHIFT,
},
},
.class = &omap2_uart_class,
};
/* UART2 */
struct omap_hwmod omap2xxx_uart2_hwmod = {
.name = "uart2",
.mpu_irqs = omap2_uart2_mpu_irqs,
.sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_UART2_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_EN_UART2_SHIFT,
},
},
.class = &omap2_uart_class,
};
/* UART3 */
struct omap_hwmod omap2xxx_uart3_hwmod = {
.name = "uart3",
.mpu_irqs = omap2_uart3_mpu_irqs,
.sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 2,
.module_bit = OMAP24XX_EN_UART3_SHIFT,
.idlest_reg_id = 2,
.idlest_idle_bit = OMAP24XX_EN_UART3_SHIFT,
},
},
.class = &omap2_uart_class,
};
/* dss */
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
/*
* The DSS HW needs all DSS clocks enabled during reset. The dss_core
* driver does not use these clocks.
*/
{ .role = "tv_clk", .clk = "dss_54m_fck" },
{ .role = "sys_clk", .clk = "dss2_fck" },
};
struct omap_hwmod omap2xxx_dss_core_hwmod = {
.name = "dss_core",
.class = &omap2_dss_hwmod_class,
.main_clk = "dss1_fck", /* instead of dss_fck */
.sdma_reqs = omap2xxx_dss_sdma_chs,
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_DSS1_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_stdby_bit = OMAP24XX_ST_DSS_SHIFT,
},
},
.opt_clks = dss_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(dss_opt_clks),
.flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
};
struct omap_hwmod omap2xxx_dss_dispc_hwmod = {
.name = "dss_dispc",
.class = &omap2_dispc_hwmod_class,
.mpu_irqs = omap2_dispc_irqs,
.main_clk = "dss1_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_DSS1_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_stdby_bit = OMAP24XX_ST_DSS_SHIFT,
},
},
.flags = HWMOD_NO_IDLEST,
.dev_attr = &omap2_3_dss_dispc_dev_attr
};
static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
{ .role = "ick", .clk = "dss_ick" },
};
struct omap_hwmod omap2xxx_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap2_rfbi_hwmod_class,
.main_clk = "dss1_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_DSS1_SHIFT,
.module_offs = CORE_MOD,
},
},
.opt_clks = dss_rfbi_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
.flags = HWMOD_NO_IDLEST,
};
struct omap_hwmod omap2xxx_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap2_venc_hwmod_class,
.main_clk = "dss_54m_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_DSS1_SHIFT,
.module_offs = CORE_MOD,
},
},
.flags = HWMOD_NO_IDLEST,
};
/* gpio dev_attr */
struct omap_gpio_dev_attr omap2xxx_gpio_dev_attr = {
.bank_width = 32,
.dbck_flag = false,
};
/* gpio1 */
struct omap_hwmod omap2xxx_gpio1_hwmod = {
.name = "gpio1",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap2_gpio1_irqs,
.main_clk = "gpios_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPIOS_SHIFT,
.module_offs = WKUP_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT,
},
},
.class = &omap2xxx_gpio_hwmod_class,
.dev_attr = &omap2xxx_gpio_dev_attr,
};
/* gpio2 */
struct omap_hwmod omap2xxx_gpio2_hwmod = {
.name = "gpio2",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap2_gpio2_irqs,
.main_clk = "gpios_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPIOS_SHIFT,
.module_offs = WKUP_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT,
},
},
.class = &omap2xxx_gpio_hwmod_class,
.dev_attr = &omap2xxx_gpio_dev_attr,
};
/* gpio3 */
struct omap_hwmod omap2xxx_gpio3_hwmod = {
.name = "gpio3",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap2_gpio3_irqs,
.main_clk = "gpios_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPIOS_SHIFT,
.module_offs = WKUP_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT,
},
},
.class = &omap2xxx_gpio_hwmod_class,
.dev_attr = &omap2xxx_gpio_dev_attr,
};
/* gpio4 */
struct omap_hwmod omap2xxx_gpio4_hwmod = {
.name = "gpio4",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap2_gpio4_irqs,
.main_clk = "gpios_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_GPIOS_SHIFT,
.module_offs = WKUP_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT,
},
},
.class = &omap2xxx_gpio_hwmod_class,
.dev_attr = &omap2xxx_gpio_dev_attr,
};
/* mcspi1 */
static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
.num_chipselect = 4,
};
struct omap_hwmod omap2xxx_mcspi1_hwmod = {
.name = "mcspi1",
.mpu_irqs = omap2_mcspi1_mpu_irqs,
.sdma_reqs = omap2_mcspi1_sdma_reqs,
.main_clk = "mcspi1_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_MCSPI1_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_MCSPI1_SHIFT,
},
},
.class = &omap2xxx_mcspi_class,
.dev_attr = &omap_mcspi1_dev_attr,
};
/* mcspi2 */
static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
.num_chipselect = 2,
};
struct omap_hwmod omap2xxx_mcspi2_hwmod = {
.name = "mcspi2",
.mpu_irqs = omap2_mcspi2_mpu_irqs,
.sdma_reqs = omap2_mcspi2_sdma_reqs,
.main_clk = "mcspi2_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP24XX_EN_MCSPI2_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_MCSPI2_SHIFT,
},
},
.class = &omap2xxx_mcspi_class,
.dev_attr = &omap_mcspi2_dev_attr,
};
static struct omap_hwmod_class omap2xxx_counter_hwmod_class = {
.name = "counter",
};
struct omap_hwmod omap2xxx_counter_32k_hwmod = {
.name = "counter_32k",
.main_clk = "func_32k_ck",
.prcm = {
.omap2 = {
.module_offs = WKUP_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP24XX_ST_32KSYNC_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP24XX_ST_32KSYNC_SHIFT,
},
},
.class = &omap2xxx_counter_hwmod_class,
};
/* gpmc */
static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
{ .irq = 20 + OMAP_INTC_START, },
{ .irq = -1 }
};
struct omap_hwmod omap2xxx_gpmc_hwmod = {
.name = "gpmc",
.class = &omap2xxx_gpmc_hwmod_class,
.mpu_irqs = omap2xxx_gpmc_irqs,
.main_clk = "gpmc_fck",
/*
* XXX HWMOD_INIT_NO_RESET should not be needed for this IP
* block. It is not being added due to any known bugs with
* resetting the GPMC IP block, but rather because any timings
* set by the bootloader are not being correctly programmed by
* the kernel from the board file or DT data.
* HWMOD_INIT_NO_RESET should be removed ASAP.
*/
.flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET |
HWMOD_NO_IDLEST),
.prcm = {
.omap2 = {
.prcm_reg_id = 3,
.module_bit = OMAP24XX_EN_GPMC_MASK,
.module_offs = CORE_MOD,
},
},
};
/* RNG */
static struct omap_hwmod_class_sysconfig omap2_rng_sysc = {
.rev_offs = 0x3c,
.sysc_offs = 0x40,
.syss_offs = 0x44,
.sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
SYSS_HAS_RESET_STATUS),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap2_rng_hwmod_class = {
.name = "rng",
.sysc = &omap2_rng_sysc,
};
static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
{ .irq = 52 + OMAP_INTC_START, },
{ .irq = -1 }
};
struct omap_hwmod omap2xxx_rng_hwmod = {
.name = "rng",
.mpu_irqs = omap2_rng_mpu_irqs,
.main_clk = "l4_ck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 4,
.module_bit = OMAP24XX_EN_RNG_SHIFT,
.idlest_reg_id = 4,
.idlest_idle_bit = OMAP24XX_ST_RNG_SHIFT,
},
},
/*
* XXX The first read from the SYSSTATUS register of the RNG
* after the SYSCONFIG SOFTRESET bit is set triggers an
* imprecise external abort. It's unclear why this happens.
* Until this is analyzed, skip the IP block reset.
*/
.flags = HWMOD_INIT_NO_RESET,
.class = &omap2_rng_hwmod_class,
};
/* SHAM */
static struct omap_hwmod_class_sysconfig omap2_sham_sysc = {
.rev_offs = 0x5c,
.sysc_offs = 0x60,
.syss_offs = 0x64,
.sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
SYSS_HAS_RESET_STATUS),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap2xxx_sham_class = {
.name = "sham",
.sysc = &omap2_sham_sysc,
};
static struct omap_hwmod_irq_info omap2_sham_mpu_irqs[] = {
{ .irq = 51 + OMAP_INTC_START, },
{ .irq = -1 }
};
static struct omap_hwmod_dma_info omap2_sham_sdma_chs[] = {
{ .name = "rx", .dma_req = 13 },
{ .dma_req = -1 }
};
struct omap_hwmod omap2xxx_sham_hwmod = {
.name = "sham",
.mpu_irqs = omap2_sham_mpu_irqs,
.sdma_reqs = omap2_sham_sdma_chs,
.main_clk = "l4_ck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 4,
.module_bit = OMAP24XX_EN_SHA_SHIFT,
.idlest_reg_id = 4,
.idlest_idle_bit = OMAP24XX_ST_SHA_SHIFT,
},
},
.class = &omap2xxx_sham_class,
};
/* AES */
static struct omap_hwmod_class_sysconfig omap2_aes_sysc = {
.rev_offs = 0x44,
.sysc_offs = 0x48,
.syss_offs = 0x4c,
.sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
SYSS_HAS_RESET_STATUS),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap2xxx_aes_class = {
.name = "aes",
.sysc = &omap2_aes_sysc,
};
static struct omap_hwmod_dma_info omap2_aes_sdma_chs[] = {
{ .name = "tx", .dma_req = 9 },
{ .name = "rx", .dma_req = 10 },
{ .dma_req = -1 }
};
struct omap_hwmod omap2xxx_aes_hwmod = {
.name = "aes",
.sdma_reqs = omap2_aes_sdma_chs,
.main_clk = "l4_ck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 4,
.module_bit = OMAP24XX_EN_AES_SHIFT,
.idlest_reg_id = 4,
.idlest_idle_bit = OMAP24XX_ST_AES_SHIFT,
},
},
.class = &omap2xxx_aes_class,
};
| gpl-2.0 |
holyangel/HTC_M8_GPE-4.4.4 | arch/arm/mach-msm/rpc_server_time_remote.c | 3505 | 4346 | /* arch/arm/mach-msm/rpc_server_time_remote.c
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2009-2011 The Linux Foundation. All rights reserved.
* Author: Iliyan Malchev <ibm@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <mach/msm_rpcrouter.h>
#include "rpc_server_time_remote.h"
#include <linux/rtc.h>
#include <linux/android_alarm.h>
#include <linux/rtc-msm.h>
/* time_remote_mtoa server definitions. */
#define TIME_REMOTE_MTOA_PROG 0x3000005d
#define TIME_REMOTE_MTOA_VERS_OLD 0
#define TIME_REMOTE_MTOA_VERS 0x9202a8e4
#define TIME_REMOTE_MTOA_VERS_COMP 0x00010002
#define RPC_TIME_REMOTE_MTOA_NULL 0
#define RPC_TIME_TOD_SET_APPS_BASES 2
#define RPC_TIME_GET_APPS_USER_TIME 3
struct rpc_time_tod_set_apps_bases_args {
uint32_t tick;
uint64_t stamp;
};
static int read_rtc0_time(struct msm_rpc_server *server,
struct rpc_request_hdr *req,
unsigned len)
{
int err;
unsigned long tm_sec;
uint32_t size = 0;
void *reply;
uint32_t output_valid;
uint32_t rpc_status = RPC_ACCEPTSTAT_SYSTEM_ERR;
struct rtc_time tm;
struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
if (rtc == NULL) {
pr_err("%s: unable to open rtc device (%s)\n",
__FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
goto send_reply;
}
err = rtc_read_time(rtc, &tm);
if (err) {
pr_err("%s: Error reading rtc device (%s) : %d\n",
__FILE__, CONFIG_RTC_HCTOSYS_DEVICE, err);
goto close_dev;
}
err = rtc_valid_tm(&tm);
if (err) {
pr_err("%s: Invalid RTC time (%s)\n",
__FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
goto close_dev;
}
rtc_tm_to_time(&tm, &tm_sec);
rpc_status = RPC_ACCEPTSTAT_SUCCESS;
close_dev:
rtc_class_close(rtc);
send_reply:
reply = msm_rpc_server_start_accepted_reply(server, req->xid,
rpc_status);
if (rpc_status == RPC_ACCEPTSTAT_SUCCESS) {
output_valid = *((uint32_t *)(req + 1));
*(uint32_t *)reply = output_valid;
size = sizeof(uint32_t);
if (be32_to_cpu(output_valid)) {
reply += sizeof(uint32_t);
*(uint32_t *)reply = cpu_to_be32(tm_sec);
size += sizeof(uint32_t);
}
}
err = msm_rpc_server_send_accepted_reply(server, size);
if (err)
pr_err("%s: send accepted reply failed: %d\n", __func__, err);
return 1;
}
static int handle_rpc_call(struct msm_rpc_server *server,
struct rpc_request_hdr *req, unsigned len)
{
struct timespec ts, tv;
switch (req->procedure) {
case RPC_TIME_REMOTE_MTOA_NULL:
return 0;
case RPC_TIME_TOD_SET_APPS_BASES: {
struct rpc_time_tod_set_apps_bases_args *args;
args = (struct rpc_time_tod_set_apps_bases_args *)(req + 1);
args->tick = be32_to_cpu(args->tick);
args->stamp = be64_to_cpu(args->stamp);
printk(KERN_INFO "RPC_TIME_TOD_SET_APPS_BASES:\n"
"\ttick = %d\n"
"\tstamp = %lld\n",
args->tick, args->stamp);
getnstimeofday(&ts);
msmrtc_updateatsuspend(&ts);
rtc_hctosys();
getnstimeofday(&tv);
/* Update the alarm information with the new time info. */
alarm_update_timedelta(ts, tv);
return 0;
}
case RPC_TIME_GET_APPS_USER_TIME:
return read_rtc0_time(server, req, len);
default:
return -ENODEV;
}
}
static struct msm_rpc_server rpc_server[] = {
{
.prog = TIME_REMOTE_MTOA_PROG,
.vers = TIME_REMOTE_MTOA_VERS_OLD,
.rpc_call = handle_rpc_call,
},
{
.prog = TIME_REMOTE_MTOA_PROG,
.vers = TIME_REMOTE_MTOA_VERS,
.rpc_call = handle_rpc_call,
},
{
.prog = TIME_REMOTE_MTOA_PROG,
.vers = TIME_REMOTE_MTOA_VERS_COMP,
.rpc_call = handle_rpc_call,
},
};
static int __init rpc_server_init(void)
{
/* Dual server registration to support backwards compatibility vers */
int ret;
ret = msm_rpc_create_server(&rpc_server[2]);
if (ret < 0)
return ret;
ret = msm_rpc_create_server(&rpc_server[1]);
if (ret < 0)
return ret;
return msm_rpc_create_server(&rpc_server[0]);
}
module_init(rpc_server_init);
| gpl-2.0 |
kangsterizer/linux-3.1.y-rsbac | sound/ppc/keywest.c | 4017 | 3663 | /*
* common keywest i2c layer
*
* Copyright (c) by Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <sound/core.h>
#include "pmac.h"
/*
* we have to keep a static variable here since i2c attach_adapter
* callback cannot pass a private data.
*/
static struct pmac_keywest *keywest_ctx;
static int keywest_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
i2c_set_clientdata(client, keywest_ctx);
return 0;
}
/*
* This is kind of a hack, best would be to turn powermac to fixed i2c
* bus numbers and declare the sound device as part of platform
* initialization
*/
static int keywest_attach_adapter(struct i2c_adapter *adapter)
{
struct i2c_board_info info;
if (! keywest_ctx)
return -EINVAL;
if (strncmp(adapter->name, "mac-io", 6))
return 0; /* ignored */
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, "keywest", I2C_NAME_SIZE);
info.addr = keywest_ctx->addr;
keywest_ctx->client = i2c_new_device(adapter, &info);
if (!keywest_ctx->client)
return -ENODEV;
/*
* We know the driver is already loaded, so the device should be
* already bound. If not it means binding failed, and then there
* is no point in keeping the device instantiated.
*/
if (!keywest_ctx->client->driver) {
i2c_unregister_device(keywest_ctx->client);
keywest_ctx->client = NULL;
return -ENODEV;
}
/*
* Let i2c-core delete that device on driver removal.
* This is safe because i2c-core holds the core_lock mutex for us.
*/
list_add_tail(&keywest_ctx->client->detected,
&keywest_ctx->client->driver->clients);
return 0;
}
static int keywest_remove(struct i2c_client *client)
{
i2c_set_clientdata(client, NULL);
if (! keywest_ctx)
return 0;
if (client == keywest_ctx->client)
keywest_ctx->client = NULL;
return 0;
}
static const struct i2c_device_id keywest_i2c_id[] = {
{ "keywest", 0 },
{ }
};
static struct i2c_driver keywest_driver = {
.driver = {
.name = "PMac Keywest Audio",
},
.attach_adapter = keywest_attach_adapter,
.probe = keywest_probe,
.remove = keywest_remove,
.id_table = keywest_i2c_id,
};
/* exported */
void snd_pmac_keywest_cleanup(struct pmac_keywest *i2c)
{
if (keywest_ctx && keywest_ctx == i2c) {
i2c_del_driver(&keywest_driver);
keywest_ctx = NULL;
}
}
int __devinit snd_pmac_tumbler_post_init(void)
{
int err;
if (!keywest_ctx || !keywest_ctx->client)
return -ENXIO;
if ((err = keywest_ctx->init_client(keywest_ctx)) < 0) {
snd_printk(KERN_ERR "tumbler: %i :cannot initialize the MCS\n", err);
return err;
}
return 0;
}
/* exported */
int __devinit snd_pmac_keywest_init(struct pmac_keywest *i2c)
{
int err;
if (keywest_ctx)
return -EBUSY;
keywest_ctx = i2c;
if ((err = i2c_add_driver(&keywest_driver))) {
snd_printk(KERN_ERR "cannot register keywest i2c driver\n");
return err;
}
return 0;
}
| gpl-2.0 |
psomas/lguest64 | drivers/isdn/gigaset/proc.c | 4273 | 2079 | /*
* Stuff used by all variants of the driver
*
* Copyright (c) 2001 by Stefan Eilers,
* Hansjoerg Lipp <hjlipp@web.de>,
* Tilman Schmidt <tilman@imap.cc>.
*
* =====================================================================
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
* =====================================================================
*/
#include "gigaset.h"
static ssize_t show_cidmode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cardstate *cs = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", cs->cidmode);
}
static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct cardstate *cs = dev_get_drvdata(dev);
long int value;
char *end;
value = simple_strtol(buf, &end, 0);
while (*end)
if (!isspace(*end++))
return -EINVAL;
if (value < 0 || value > 1)
return -EINVAL;
if (mutex_lock_interruptible(&cs->mutex))
return -ERESTARTSYS;
cs->waiting = 1;
if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE,
NULL, value, NULL)) {
cs->waiting = 0;
mutex_unlock(&cs->mutex);
return -ENOMEM;
}
gigaset_schedule_event(cs);
wait_event(cs->waitqueue, !cs->waiting);
mutex_unlock(&cs->mutex);
return count;
}
static DEVICE_ATTR(cidmode, S_IRUGO|S_IWUSR, show_cidmode, set_cidmode);
/* free sysfs for device */
void gigaset_free_dev_sysfs(struct cardstate *cs)
{
if (!cs->tty_dev)
return;
gig_dbg(DEBUG_INIT, "removing sysfs entries");
device_remove_file(cs->tty_dev, &dev_attr_cidmode);
}
/* initialize sysfs for device */
void gigaset_init_dev_sysfs(struct cardstate *cs)
{
if (!cs->tty_dev)
return;
gig_dbg(DEBUG_INIT, "setting up sysfs");
if (device_create_file(cs->tty_dev, &dev_attr_cidmode))
pr_err("could not create sysfs attribute\n");
}
| gpl-2.0 |
Trinityhaxxor/platform_kernel_msm8x60_stock | arch/powerpc/kernel/sys_ppc32.c | 4529 | 20487 | /*
* sys_ppc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 2001 IBM
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*
* These routines maintain argument size conversion between 32bit and 64bit
* environment.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
#include <linux/mman.h>
#include <linux/in.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/sysctl.h>
#include <linux/binfmts.h>
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/ptrace.h>
#include <linux/elf.h>
#include <linux/ipc.h>
#include <linux/slab.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/time.h>
#include <asm/mmu_context.h>
#include <asm/ppc-pci.h>
#include <asm/syscalls.h>
#include <asm/switch_to.h>
asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
compat_uptr_t tvp_x)
{
/* sign extend n */
return compat_sys_select((int)n, inp, outp, exp, compat_ptr(tvp_x));
}
/* Note: it is necessary to treat option as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2)
{
return sys_sysfs((int)option, arg1, arg2);
}
#ifdef CONFIG_SYSVIPC
long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
u32 fifth)
{
int version;
version = call >> 16; /* hack for backward compatibility */
call &= 0xffff;
switch (call) {
case SEMTIMEDOP:
if (fifth)
/* sign extend semid */
return compat_sys_semtimedop((int)first,
compat_ptr(ptr), second,
compat_ptr(fifth));
/* else fall through for normal semop() */
case SEMOP:
/* struct sembuf is the same on 32 and 64bit :)) */
/* sign extend semid */
return sys_semtimedop((int)first, compat_ptr(ptr), second,
NULL);
case SEMGET:
/* sign extend key, nsems */
return sys_semget((int)first, (int)second, third);
case SEMCTL:
/* sign extend semid, semnum */
return compat_sys_semctl((int)first, (int)second, third,
compat_ptr(ptr));
case MSGSND:
/* sign extend msqid */
return compat_sys_msgsnd((int)first, (int)second, third,
compat_ptr(ptr));
case MSGRCV:
/* sign extend msqid, msgtyp */
return compat_sys_msgrcv((int)first, second, (int)fifth,
third, version, compat_ptr(ptr));
case MSGGET:
/* sign extend key */
return sys_msgget((int)first, second);
case MSGCTL:
/* sign extend msqid */
return compat_sys_msgctl((int)first, second, compat_ptr(ptr));
case SHMAT:
/* sign extend shmid */
return compat_sys_shmat((int)first, second, third, version,
compat_ptr(ptr));
case SHMDT:
return sys_shmdt(compat_ptr(ptr));
case SHMGET:
/* sign extend key_t */
return sys_shmget((int)first, second, third);
case SHMCTL:
/* sign extend shmid */
return compat_sys_shmctl((int)first, second, compat_ptr(ptr));
default:
return -ENOSYS;
}
return -ENOSYS;
}
#endif
/* Note: it is necessary to treat out_fd and in_fd as unsigned ints,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offset, u32 count)
{
mm_segment_t old_fs = get_fs();
int ret;
off_t of;
off_t __user *up;
if (offset && get_user(of, offset))
return -EFAULT;
/* The __user pointer cast is valid because of the set_fs() */
set_fs(KERNEL_DS);
up = offset ? (off_t __user *) &of : NULL;
ret = sys_sendfile((int)out_fd, (int)in_fd, up, count);
set_fs(old_fs);
if (offset && put_user(of, offset))
return -EFAULT;
return ret;
}
asmlinkage int compat_sys_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
{
mm_segment_t old_fs = get_fs();
int ret;
loff_t lof;
loff_t __user *up;
if (offset && get_user(lof, offset))
return -EFAULT;
/* The __user pointer cast is valid because of the set_fs() */
set_fs(KERNEL_DS);
up = offset ? (loff_t __user *) &lof : NULL;
ret = sys_sendfile64(out_fd, in_fd, up, count);
set_fs(old_fs);
if (offset && put_user(lof, offset))
return -EFAULT;
return ret;
}
long compat_sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4, unsigned long a5,
struct pt_regs *regs)
{
int error;
char * filename;
filename = getname((char __user *) a0);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
flush_fp_to_thread(current);
flush_altivec_to_thread(current);
error = compat_do_execve(filename, compat_ptr(a1), compat_ptr(a2), regs);
putname(filename);
out:
return error;
}
/* Note: it is necessary to treat option as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
{
return sys_prctl((int)option,
(unsigned long) arg2,
(unsigned long) arg3,
(unsigned long) arg4,
(unsigned long) arg5);
}
/* Note: it is necessary to treat pid as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sched_rr_get_interval(u32 pid, struct compat_timespec __user *interval)
{
struct timespec t;
int ret;
mm_segment_t old_fs = get_fs ();
/* The __user pointer cast is valid because of the set_fs() */
set_fs (KERNEL_DS);
ret = sys_sched_rr_get_interval((int)pid, (struct timespec __user *) &t);
set_fs (old_fs);
if (put_compat_timespec(&t, interval))
return -EFAULT;
return ret;
}
/* Note: it is necessary to treat mode as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_access(const char __user * filename, u32 mode)
{
return sys_access(filename, (int)mode);
}
/* Note: it is necessary to treat mode as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_creat(const char __user * pathname, u32 mode)
{
return sys_creat(pathname, (int)mode);
}
/* Note: it is necessary to treat pid and options as unsigned ints,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_waitpid(u32 pid, unsigned int __user * stat_addr, u32 options)
{
return sys_waitpid((int)pid, stat_addr, (int)options);
}
/* Note: it is necessary to treat gidsetsize as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_getgroups(u32 gidsetsize, gid_t __user *grouplist)
{
return sys_getgroups((int)gidsetsize, grouplist);
}
/* Note: it is necessary to treat pid as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_getpgid(u32 pid)
{
return sys_getpgid((int)pid);
}
/* Note: it is necessary to treat pid as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_getsid(u32 pid)
{
return sys_getsid((int)pid);
}
/* Note: it is necessary to treat pid and sig as unsigned ints,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_kill(u32 pid, u32 sig)
{
return sys_kill((int)pid, (int)sig);
}
/* Note: it is necessary to treat mode as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_mkdir(const char __user * pathname, u32 mode)
{
return sys_mkdir(pathname, (int)mode);
}
long compat_sys_nice(u32 increment)
{
/* sign extend increment */
return sys_nice((int)increment);
}
off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
{
/* sign extend n */
return sys_lseek(fd, (int)offset, origin);
}
long compat_sys_truncate(const char __user * path, u32 length)
{
/* sign extend length */
return sys_truncate(path, (int)length);
}
long compat_sys_ftruncate(int fd, u32 length)
{
/* sign extend length */
return sys_ftruncate(fd, (int)length);
}
/* Note: it is necessary to treat bufsiz as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_readlink(const char __user * path, char __user * buf, u32 bufsiz)
{
return sys_readlink(path, buf, (int)bufsiz);
}
/* Note: it is necessary to treat option as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sched_get_priority_max(u32 policy)
{
return sys_sched_get_priority_max((int)policy);
}
/* Note: it is necessary to treat policy as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sched_get_priority_min(u32 policy)
{
return sys_sched_get_priority_min((int)policy);
}
/* Note: it is necessary to treat pid as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sched_getparam(u32 pid, struct sched_param __user *param)
{
return sys_sched_getparam((int)pid, param);
}
/* Note: it is necessary to treat pid as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sched_getscheduler(u32 pid)
{
return sys_sched_getscheduler((int)pid);
}
/* Note: it is necessary to treat pid as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sched_setparam(u32 pid, struct sched_param __user *param)
{
return sys_sched_setparam((int)pid, param);
}
/* Note: it is necessary to treat pid and policy as unsigned ints,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sched_setscheduler(u32 pid, u32 policy, struct sched_param __user *param)
{
return sys_sched_setscheduler((int)pid, (int)policy, param);
}
/* Note: it is necessary to treat len as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_setdomainname(char __user *name, u32 len)
{
return sys_setdomainname(name, (int)len);
}
/* Note: it is necessary to treat gidsetsize as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_setgroups(u32 gidsetsize, gid_t __user *grouplist)
{
return sys_setgroups((int)gidsetsize, grouplist);
}
asmlinkage long compat_sys_sethostname(char __user *name, u32 len)
{
/* sign extend len */
return sys_sethostname(name, (int)len);
}
/* Note: it is necessary to treat pid and pgid as unsigned ints,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_setpgid(u32 pid, u32 pgid)
{
return sys_setpgid((int)pid, (int)pgid);
}
long compat_sys_getpriority(u32 which, u32 who)
{
/* sign extend which and who */
return sys_getpriority((int)which, (int)who);
}
long compat_sys_setpriority(u32 which, u32 who, u32 niceval)
{
/* sign extend which, who and niceval */
return sys_setpriority((int)which, (int)who, (int)niceval);
}
long compat_sys_ioprio_get(u32 which, u32 who)
{
/* sign extend which and who */
return sys_ioprio_get((int)which, (int)who);
}
long compat_sys_ioprio_set(u32 which, u32 who, u32 ioprio)
{
/* sign extend which, who and ioprio */
return sys_ioprio_set((int)which, (int)who, (int)ioprio);
}
/* Note: it is necessary to treat newmask as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_ssetmask(u32 newmask)
{
return sys_ssetmask((int) newmask);
}
asmlinkage long compat_sys_syslog(u32 type, char __user * buf, u32 len)
{
/* sign extend len */
return sys_syslog(type, buf, (int)len);
}
/* Note: it is necessary to treat mask as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_umask(u32 mask)
{
return sys_umask((int)mask);
}
unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
/* This should remain 12 even if PAGE_SIZE changes */
return sys_mmap(addr, len, prot, flags, fd, pgoff << 12);
}
long compat_sys_tgkill(u32 tgid, u32 pid, int sig)
{
/* sign extend tgid, pid */
return sys_tgkill((int)tgid, (int)pid, sig);
}
/*
* long long munging:
* The 32 bit ABI passes long longs in an odd even register pair.
*/
compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
u32 reg6, u32 poshi, u32 poslo)
{
return sys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
}
compat_ssize_t compat_sys_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count,
u32 reg6, u32 poshi, u32 poslo)
{
return sys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
}
compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count)
{
return sys_readahead(fd, ((loff_t)offhi << 32) | offlo, count);
}
asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4,
unsigned long high, unsigned long low)
{
return sys_truncate(path, (high << 32) | low);
}
asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
u32 lenhi, u32 lenlo)
{
return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
((loff_t)lenhi << 32) | lenlo);
}
asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long high,
unsigned long low)
{
return sys_ftruncate(fd, (high << 32) | low);
}
long ppc32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
size_t len)
{
return sys_lookup_dcookie((u64)cookie_high << 32 | cookie_low,
buf, len);
}
long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
size_t len, int advice)
{
return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low, len,
advice);
}
asmlinkage long compat_sys_add_key(const char __user *_type,
const char __user *_description,
const void __user *_payload,
u32 plen,
u32 ringid)
{
return sys_add_key(_type, _description, _payload, plen, ringid);
}
asmlinkage long compat_sys_request_key(const char __user *_type,
const char __user *_description,
const char __user *_callout_info,
u32 destringid)
{
return sys_request_key(_type, _description, _callout_info, destringid);
}
asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
unsigned offset_hi, unsigned offset_lo,
unsigned nbytes_hi, unsigned nbytes_lo)
{
loff_t offset = ((loff_t)offset_hi << 32) | offset_lo;
loff_t nbytes = ((loff_t)nbytes_hi << 32) | nbytes_lo;
return sys_sync_file_range(fd, offset, nbytes, flags);
}
asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags,
unsigned mask_hi, unsigned mask_lo,
int dfd, const char __user *pathname)
{
u64 mask = ((u64)mask_hi << 32) | mask_lo;
return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
}
| gpl-2.0 |
championswimmer/kernel_sony_msm8960t | drivers/net/wireless/ath/ath9k/hif_usb.c | 4785 | 31162 | /*
* Copyright (c) 2010-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <asm/unaligned.h>
#include "htc.h"
/* identify firmware images */
#define FIRMWARE_AR7010_1_1 "htc_7010.fw"
#define FIRMWARE_AR9271 "htc_9271.fw"
MODULE_FIRMWARE(FIRMWARE_AR7010_1_1);
MODULE_FIRMWARE(FIRMWARE_AR9271);
static struct usb_device_id ath9k_hif_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */
{ USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */
{ USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */
{ USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
{ USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
{ USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
{ USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */
{ USB_DEVICE(0x13D3, 0x3348) }, /* Azurewave */
{ USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */
{ USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */
{ USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
{ USB_DEVICE(0x040D, 0x3801) }, /* VIA */
{ USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
{ USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
{ USB_DEVICE(0x0cf3, 0x7015),
.driver_info = AR9287_USB }, /* Atheros */
{ USB_DEVICE(0x1668, 0x1200),
.driver_info = AR9287_USB }, /* Verizon */
{ USB_DEVICE(0x0cf3, 0x7010),
.driver_info = AR9280_USB }, /* Atheros */
{ USB_DEVICE(0x0846, 0x9018),
.driver_info = AR9280_USB }, /* Netgear WNDA3200 */
{ USB_DEVICE(0x083A, 0xA704),
.driver_info = AR9280_USB }, /* SMC Networks */
{ USB_DEVICE(0x0411, 0x017f),
.driver_info = AR9280_USB }, /* Sony UWA-BR100 */
{ USB_DEVICE(0x0cf3, 0x20ff),
.driver_info = STORAGE_DEVICE },
{ },
};
MODULE_DEVICE_TABLE(usb, ath9k_hif_usb_ids);
static int __hif_usb_tx(struct hif_device_usb *hif_dev);
static void hif_usb_regout_cb(struct urb *urb)
{
struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
switch (urb->status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ENODEV:
case -ESHUTDOWN:
goto free;
default:
break;
}
if (cmd) {
ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
cmd->skb, true);
kfree(cmd);
}
return;
free:
kfree_skb(cmd->skb);
kfree(cmd);
}
static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
struct sk_buff *skb)
{
struct urb *urb;
struct cmd_buf *cmd;
int ret = 0;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL)
return -ENOMEM;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL) {
usb_free_urb(urb);
return -ENOMEM;
}
cmd->skb = skb;
cmd->hif_dev = hif_dev;
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_sndbulkpipe(hif_dev->udev, USB_REG_OUT_PIPE),
skb->data, skb->len,
hif_usb_regout_cb, cmd);
usb_anchor_urb(urb, &hif_dev->regout_submitted);
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
usb_unanchor_urb(urb);
kfree(cmd);
}
usb_free_urb(urb);
return ret;
}
static void hif_usb_mgmt_cb(struct urb *urb)
{
struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
struct hif_device_usb *hif_dev;
bool txok = true;
if (!cmd || !cmd->skb || !cmd->hif_dev)
return;
hif_dev = cmd->hif_dev;
switch (urb->status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ENODEV:
case -ESHUTDOWN:
txok = false;
/*
* If the URBs are being flushed, no need to complete
* this packet.
*/
spin_lock(&hif_dev->tx.tx_lock);
if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
spin_unlock(&hif_dev->tx.tx_lock);
dev_kfree_skb_any(cmd->skb);
kfree(cmd);
return;
}
spin_unlock(&hif_dev->tx.tx_lock);
break;
default:
txok = false;
break;
}
skb_pull(cmd->skb, 4);
ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
cmd->skb, txok);
kfree(cmd);
}
static int hif_usb_send_mgmt(struct hif_device_usb *hif_dev,
struct sk_buff *skb)
{
struct urb *urb;
struct cmd_buf *cmd;
int ret = 0;
__le16 *hdr;
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb == NULL)
return -ENOMEM;
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (cmd == NULL) {
usb_free_urb(urb);
return -ENOMEM;
}
cmd->skb = skb;
cmd->hif_dev = hif_dev;
hdr = (__le16 *) skb_push(skb, 4);
*hdr++ = cpu_to_le16(skb->len - 4);
*hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
skb->data, skb->len,
hif_usb_mgmt_cb, cmd);
usb_anchor_urb(urb, &hif_dev->mgmt_submitted);
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
usb_unanchor_urb(urb);
kfree(cmd);
}
usb_free_urb(urb);
return ret;
}
static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev,
struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(list)) != NULL) {
dev_kfree_skb_any(skb);
}
}
static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev,
struct sk_buff_head *queue,
bool txok)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(queue)) != NULL) {
ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
skb, txok);
if (txok)
TX_STAT_INC(skb_success);
else
TX_STAT_INC(skb_failed);
}
}
static void hif_usb_tx_cb(struct urb *urb)
{
struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
struct hif_device_usb *hif_dev;
bool txok = true;
if (!tx_buf || !tx_buf->hif_dev)
return;
hif_dev = tx_buf->hif_dev;
switch (urb->status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ENODEV:
case -ESHUTDOWN:
txok = false;
/*
* If the URBs are being flushed, no need to add this
* URB to the free list.
*/
spin_lock(&hif_dev->tx.tx_lock);
if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
spin_unlock(&hif_dev->tx.tx_lock);
ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
return;
}
spin_unlock(&hif_dev->tx.tx_lock);
break;
default:
txok = false;
break;
}
ath9k_skb_queue_complete(hif_dev, &tx_buf->skb_queue, txok);
/* Re-initialize the SKB queue */
tx_buf->len = tx_buf->offset = 0;
__skb_queue_head_init(&tx_buf->skb_queue);
/* Add this TX buffer to the free list */
spin_lock(&hif_dev->tx.tx_lock);
list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
hif_dev->tx.tx_buf_cnt++;
if (!(hif_dev->tx.flags & HIF_USB_TX_STOP))
__hif_usb_tx(hif_dev); /* Check for pending SKBs */
TX_STAT_INC(buf_completed);
spin_unlock(&hif_dev->tx.tx_lock);
}
/* TX lock has to be taken */
static int __hif_usb_tx(struct hif_device_usb *hif_dev)
{
struct tx_buf *tx_buf = NULL;
struct sk_buff *nskb = NULL;
int ret = 0, i;
u16 tx_skb_cnt = 0;
u8 *buf;
__le16 *hdr;
if (hif_dev->tx.tx_skb_cnt == 0)
return 0;
/* Check if a free TX buffer is available */
if (list_empty(&hif_dev->tx.tx_buf))
return 0;
tx_buf = list_first_entry(&hif_dev->tx.tx_buf, struct tx_buf, list);
list_move_tail(&tx_buf->list, &hif_dev->tx.tx_pending);
hif_dev->tx.tx_buf_cnt--;
tx_skb_cnt = min_t(u16, hif_dev->tx.tx_skb_cnt, MAX_TX_AGGR_NUM);
for (i = 0; i < tx_skb_cnt; i++) {
nskb = __skb_dequeue(&hif_dev->tx.tx_skb_queue);
/* Should never be NULL */
BUG_ON(!nskb);
hif_dev->tx.tx_skb_cnt--;
buf = tx_buf->buf;
buf += tx_buf->offset;
hdr = (__le16 *)buf;
*hdr++ = cpu_to_le16(nskb->len);
*hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
buf += 4;
memcpy(buf, nskb->data, nskb->len);
tx_buf->len = nskb->len + 4;
if (i < (tx_skb_cnt - 1))
tx_buf->offset += (((tx_buf->len - 1) / 4) + 1) * 4;
if (i == (tx_skb_cnt - 1))
tx_buf->len += tx_buf->offset;
__skb_queue_tail(&tx_buf->skb_queue, nskb);
TX_STAT_INC(skb_queued);
}
usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev,
usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
tx_buf->buf, tx_buf->len,
hif_usb_tx_cb, tx_buf);
ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC);
if (ret) {
tx_buf->len = tx_buf->offset = 0;
ath9k_skb_queue_complete(hif_dev, &tx_buf->skb_queue, false);
__skb_queue_head_init(&tx_buf->skb_queue);
list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
hif_dev->tx.tx_buf_cnt++;
}
if (!ret)
TX_STAT_INC(buf_queued);
return ret;
}
static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb)
{
struct ath9k_htc_tx_ctl *tx_ctl;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
return -ENODEV;
}
/* Check if the max queue count has been reached */
if (hif_dev->tx.tx_skb_cnt > MAX_TX_BUF_NUM) {
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
return -ENOMEM;
}
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
tx_ctl = HTC_SKB_CB(skb);
/* Mgmt/Beacon frames don't use the TX buffer pool */
if ((tx_ctl->type == ATH9K_HTC_MGMT) ||
(tx_ctl->type == ATH9K_HTC_BEACON)) {
ret = hif_usb_send_mgmt(hif_dev, skb);
}
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
if ((tx_ctl->type == ATH9K_HTC_NORMAL) ||
(tx_ctl->type == ATH9K_HTC_AMPDU)) {
__skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb);
hif_dev->tx.tx_skb_cnt++;
}
/* Check if AMPDUs have to be sent immediately */
if ((hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) &&
(hif_dev->tx.tx_skb_cnt < 2)) {
__hif_usb_tx(hif_dev);
}
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
return ret;
}
static void hif_usb_start(void *hif_handle)
{
struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
unsigned long flags;
hif_dev->flags |= HIF_USB_START;
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
hif_dev->tx.flags &= ~HIF_USB_TX_STOP;
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
}
static void hif_usb_stop(void *hif_handle)
{
struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
unsigned long flags;
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
ath9k_skb_queue_complete(hif_dev, &hif_dev->tx.tx_skb_queue, false);
hif_dev->tx.tx_skb_cnt = 0;
hif_dev->tx.flags |= HIF_USB_TX_STOP;
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
/* The pending URBs have to be canceled. */
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_pending, list) {
usb_kill_urb(tx_buf->urb);
}
usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
}
static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb)
{
struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
int ret = 0;
switch (pipe_id) {
case USB_WLAN_TX_PIPE:
ret = hif_usb_send_tx(hif_dev, skb);
break;
case USB_REG_OUT_PIPE:
ret = hif_usb_send_regout(hif_dev, skb);
break;
default:
dev_err(&hif_dev->udev->dev,
"ath9k_htc: Invalid TX pipe: %d\n", pipe_id);
ret = -EINVAL;
break;
}
return ret;
}
static inline bool check_index(struct sk_buff *skb, u8 idx)
{
struct ath9k_htc_tx_ctl *tx_ctl;
tx_ctl = HTC_SKB_CB(skb);
if ((tx_ctl->type == ATH9K_HTC_AMPDU) &&
(tx_ctl->sta_idx == idx))
return true;
return false;
}
static void hif_usb_sta_drain(void *hif_handle, u8 idx)
{
struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
struct sk_buff *skb, *tmp;
unsigned long flags;
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
skb_queue_walk_safe(&hif_dev->tx.tx_skb_queue, skb, tmp) {
if (check_index(skb, idx)) {
__skb_unlink(skb, &hif_dev->tx.tx_skb_queue);
ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
skb, false);
hif_dev->tx.tx_skb_cnt--;
TX_STAT_INC(skb_failed);
}
}
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
}
static struct ath9k_htc_hif hif_usb = {
.transport = ATH9K_HIF_USB,
.name = "ath9k_hif_usb",
.control_ul_pipe = USB_REG_OUT_PIPE,
.control_dl_pipe = USB_REG_IN_PIPE,
.start = hif_usb_start,
.stop = hif_usb_stop,
.sta_drain = hif_usb_sta_drain,
.send = hif_usb_send,
};
static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
struct sk_buff *skb)
{
struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
int index = 0, i = 0, len = skb->len;
int rx_remain_len, rx_pkt_len;
u16 pool_index = 0;
u8 *ptr;
spin_lock(&hif_dev->rx_lock);
rx_remain_len = hif_dev->rx_remain_len;
rx_pkt_len = hif_dev->rx_transfer_len;
if (rx_remain_len != 0) {
struct sk_buff *remain_skb = hif_dev->remain_skb;
if (remain_skb) {
ptr = (u8 *) remain_skb->data;
index = rx_remain_len;
rx_remain_len -= hif_dev->rx_pad_len;
ptr += rx_pkt_len;
memcpy(ptr, skb->data, rx_remain_len);
rx_pkt_len += rx_remain_len;
hif_dev->rx_remain_len = 0;
skb_put(remain_skb, rx_pkt_len);
skb_pool[pool_index++] = remain_skb;
} else {
index = rx_remain_len;
}
}
spin_unlock(&hif_dev->rx_lock);
while (index < len) {
u16 pkt_len;
u16 pkt_tag;
u16 pad_len;
int chk_idx;
ptr = (u8 *) skb->data;
pkt_len = get_unaligned_le16(ptr + index);
pkt_tag = get_unaligned_le16(ptr + index + 2);
if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
RX_STAT_INC(skb_dropped);
return;
}
pad_len = 4 - (pkt_len & 0x3);
if (pad_len == 4)
pad_len = 0;
chk_idx = index;
index = index + 4 + pkt_len + pad_len;
if (index > MAX_RX_BUF_SIZE) {
spin_lock(&hif_dev->rx_lock);
hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
hif_dev->rx_transfer_len =
MAX_RX_BUF_SIZE - chk_idx - 4;
hif_dev->rx_pad_len = pad_len;
nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
if (!nskb) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: RX memory allocation error\n");
spin_unlock(&hif_dev->rx_lock);
goto err;
}
skb_reserve(nskb, 32);
RX_STAT_INC(skb_allocated);
memcpy(nskb->data, &(skb->data[chk_idx+4]),
hif_dev->rx_transfer_len);
/* Record the buffer pointer */
hif_dev->remain_skb = nskb;
spin_unlock(&hif_dev->rx_lock);
} else {
nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
if (!nskb) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: RX memory allocation error\n");
goto err;
}
skb_reserve(nskb, 32);
RX_STAT_INC(skb_allocated);
memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
skb_put(nskb, pkt_len);
skb_pool[pool_index++] = nskb;
}
}
err:
for (i = 0; i < pool_index; i++) {
ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
skb_pool[i]->len, USB_WLAN_RX_PIPE);
RX_STAT_INC(skb_completed);
}
}
static void ath9k_hif_usb_rx_cb(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct hif_device_usb *hif_dev =
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
int ret;
if (!skb)
return;
if (!hif_dev)
goto free;
switch (urb->status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ENODEV:
case -ESHUTDOWN:
goto free;
default:
goto resubmit;
}
if (likely(urb->actual_length != 0)) {
skb_put(skb, urb->actual_length);
ath9k_hif_usb_rx_stream(hif_dev, skb);
}
resubmit:
skb_reset_tail_pointer(skb);
skb_trim(skb, 0);
usb_anchor_urb(urb, &hif_dev->rx_submitted);
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
usb_unanchor_urb(urb);
goto free;
}
return;
free:
kfree_skb(skb);
}
static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct sk_buff *nskb;
struct hif_device_usb *hif_dev =
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
int ret;
if (!skb)
return;
if (!hif_dev)
goto free;
switch (urb->status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ENODEV:
case -ESHUTDOWN:
goto free;
default:
skb_reset_tail_pointer(skb);
skb_trim(skb, 0);
goto resubmit;
}
if (likely(urb->actual_length != 0)) {
skb_put(skb, urb->actual_length);
/* Process the command first */
ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
skb->len, USB_REG_IN_PIPE);
nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
if (!nskb) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: REG_IN memory allocation failure\n");
urb->context = NULL;
return;
}
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_REG_IN_PIPE),
nskb->data, MAX_REG_IN_BUF_SIZE,
ath9k_hif_usb_reg_in_cb, nskb);
}
resubmit:
usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
usb_unanchor_urb(urb);
goto free;
}
return;
free:
kfree_skb(skb);
urb->context = NULL;
}
static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
{
struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
unsigned long flags;
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_buf, list) {
usb_kill_urb(tx_buf->urb);
list_del(&tx_buf->list);
usb_free_urb(tx_buf->urb);
kfree(tx_buf->buf);
kfree(tx_buf);
}
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_pending, list) {
usb_kill_urb(tx_buf->urb);
list_del(&tx_buf->list);
usb_free_urb(tx_buf->urb);
kfree(tx_buf->buf);
kfree(tx_buf);
}
usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
}
static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
{
struct tx_buf *tx_buf;
int i;
INIT_LIST_HEAD(&hif_dev->tx.tx_buf);
INIT_LIST_HEAD(&hif_dev->tx.tx_pending);
spin_lock_init(&hif_dev->tx.tx_lock);
__skb_queue_head_init(&hif_dev->tx.tx_skb_queue);
init_usb_anchor(&hif_dev->mgmt_submitted);
for (i = 0; i < MAX_TX_URB_NUM; i++) {
tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
if (!tx_buf)
goto err;
tx_buf->buf = kzalloc(MAX_TX_BUF_SIZE, GFP_KERNEL);
if (!tx_buf->buf)
goto err;
tx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!tx_buf->urb)
goto err;
tx_buf->hif_dev = hif_dev;
__skb_queue_head_init(&tx_buf->skb_queue);
list_add_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
}
hif_dev->tx.tx_buf_cnt = MAX_TX_URB_NUM;
return 0;
err:
if (tx_buf) {
kfree(tx_buf->buf);
kfree(tx_buf);
}
ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
return -ENOMEM;
}
static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
{
usb_kill_anchored_urbs(&hif_dev->rx_submitted);
}
static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
{
struct urb *urb = NULL;
struct sk_buff *skb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->rx_submitted);
spin_lock_init(&hif_dev->rx_lock);
for (i = 0; i < MAX_RX_URB_NUM; i++) {
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
ret = -ENOMEM;
goto err_urb;
}
/* Allocate buffer */
skb = alloc_skb(MAX_RX_BUF_SIZE, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto err_skb;
}
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_WLAN_RX_PIPE),
skb->data, MAX_RX_BUF_SIZE,
ath9k_hif_usb_rx_cb, skb);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->rx_submitted);
/* Submit URB */
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
usb_unanchor_urb(urb);
goto err_submit;
}
/*
* Drop reference count.
* This ensures that the URB is freed when killing them.
*/
usb_free_urb(urb);
}
return 0;
err_submit:
kfree_skb(skb);
err_skb:
usb_free_urb(urb);
err_urb:
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
return ret;
}
static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
{
usb_kill_anchored_urbs(&hif_dev->reg_in_submitted);
}
static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
{
struct urb *urb = NULL;
struct sk_buff *skb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->reg_in_submitted);
for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
ret = -ENOMEM;
goto err_urb;
}
/* Allocate buffer */
skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto err_skb;
}
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_REG_IN_PIPE),
skb->data, MAX_REG_IN_BUF_SIZE,
ath9k_hif_usb_reg_in_cb, skb);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
/* Submit URB */
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
usb_unanchor_urb(urb);
goto err_submit;
}
/*
* Drop reference count.
* This ensures that the URB is freed when killing them.
*/
usb_free_urb(urb);
}
return 0;
err_submit:
kfree_skb(skb);
err_skb:
usb_free_urb(urb);
err_urb:
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
return ret;
}
static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
{
/* Register Write */
init_usb_anchor(&hif_dev->regout_submitted);
/* TX */
if (ath9k_hif_usb_alloc_tx_urbs(hif_dev) < 0)
goto err;
/* RX */
if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0)
goto err_rx;
/* Register Read */
if (ath9k_hif_usb_alloc_reg_in_urbs(hif_dev) < 0)
goto err_reg;
return 0;
err_reg:
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
err_rx:
ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
err:
return -ENOMEM;
}
static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
{
usb_kill_anchored_urbs(&hif_dev->regout_submitted);
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
}
static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
{
int transfer, err;
const void *data = hif_dev->firmware->data;
size_t len = hif_dev->firmware->size;
u32 addr = AR9271_FIRMWARE;
u8 *buf = kzalloc(4096, GFP_KERNEL);
u32 firm_offset;
if (!buf)
return -ENOMEM;
while (len) {
transfer = min_t(size_t, len, 4096);
memcpy(buf, data, transfer);
err = usb_control_msg(hif_dev->udev,
usb_sndctrlpipe(hif_dev->udev, 0),
FIRMWARE_DOWNLOAD, 0x40 | USB_DIR_OUT,
addr >> 8, 0, buf, transfer, HZ);
if (err < 0) {
kfree(buf);
return err;
}
len -= transfer;
data += transfer;
addr += transfer;
}
kfree(buf);
if (IS_AR7010_DEVICE(hif_dev->usb_device_id->driver_info))
firm_offset = AR7010_FIRMWARE_TEXT;
else
firm_offset = AR9271_FIRMWARE_TEXT;
/*
* Issue FW download complete command to firmware.
*/
err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0),
FIRMWARE_DOWNLOAD_COMP,
0x40 | USB_DIR_OUT,
firm_offset >> 8, 0, NULL, 0, HZ);
if (err)
return -EIO;
dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
hif_dev->fw_name, (unsigned long) hif_dev->firmware->size);
return 0;
}
static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
{
struct usb_host_interface *alt = &hif_dev->interface->altsetting[0];
struct usb_endpoint_descriptor *endp;
int ret, idx;
ret = ath9k_hif_usb_download_fw(hif_dev);
if (ret) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: Firmware - %s download failed\n",
hif_dev->fw_name);
return ret;
}
/* On downloading the firmware to the target, the USB descriptor of EP4
* is 'patched' to change the type of the endpoint to Bulk. This will
* bring down CPU usage during the scan period.
*/
for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) {
endp = &alt->endpoint[idx].desc;
if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_INT) {
endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK;
endp->bmAttributes |= USB_ENDPOINT_XFER_BULK;
endp->bInterval = 0;
}
}
/* Alloc URBs */
ret = ath9k_hif_usb_alloc_urbs(hif_dev);
if (ret) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: Unable to allocate URBs\n");
return ret;
}
return 0;
}
static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
{
ath9k_hif_usb_dealloc_urbs(hif_dev);
}
/*
* If initialization fails or the FW cannot be retrieved,
* detach the device.
*/
static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev)
{
struct device *parent = hif_dev->udev->dev.parent;
complete(&hif_dev->fw_done);
if (parent)
device_lock(parent);
device_release_driver(&hif_dev->udev->dev);
if (parent)
device_unlock(parent);
}
static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
{
struct hif_device_usb *hif_dev = context;
int ret;
if (!fw) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: Failed to get firmware %s\n",
hif_dev->fw_name);
goto err_fw;
}
hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb,
&hif_dev->udev->dev);
if (hif_dev->htc_handle == NULL) {
goto err_fw;
}
hif_dev->firmware = fw;
/* Proceed with initialization */
ret = ath9k_hif_usb_dev_init(hif_dev);
if (ret)
goto err_dev_init;
ret = ath9k_htc_hw_init(hif_dev->htc_handle,
&hif_dev->interface->dev,
hif_dev->usb_device_id->idProduct,
hif_dev->udev->product,
hif_dev->usb_device_id->driver_info);
if (ret) {
ret = -EINVAL;
goto err_htc_hw_init;
}
complete(&hif_dev->fw_done);
return;
err_htc_hw_init:
ath9k_hif_usb_dev_deinit(hif_dev);
err_dev_init:
ath9k_htc_hw_free(hif_dev->htc_handle);
release_firmware(fw);
hif_dev->firmware = NULL;
err_fw:
ath9k_hif_usb_firmware_fail(hif_dev);
}
/*
* An exact copy of the function from zd1211rw.
*/
static int send_eject_command(struct usb_interface *interface)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_host_interface *iface_desc = &interface->altsetting[0];
struct usb_endpoint_descriptor *endpoint;
unsigned char *cmd;
u8 bulk_out_ep;
int r;
/* Find bulk out endpoint */
for (r = 1; r >= 0; r--) {
endpoint = &iface_desc->endpoint[r].desc;
if (usb_endpoint_dir_out(endpoint) &&
usb_endpoint_xfer_bulk(endpoint)) {
bulk_out_ep = endpoint->bEndpointAddress;
break;
}
}
if (r == -1) {
dev_err(&udev->dev,
"ath9k_htc: Could not find bulk out endpoint\n");
return -ENODEV;
}
cmd = kzalloc(31, GFP_KERNEL);
if (cmd == NULL)
return -ENODEV;
/* USB bulk command block */
cmd[0] = 0x55; /* bulk command signature */
cmd[1] = 0x53; /* bulk command signature */
cmd[2] = 0x42; /* bulk command signature */
cmd[3] = 0x43; /* bulk command signature */
cmd[14] = 6; /* command length */
cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */
cmd[19] = 0x2; /* eject disc */
dev_info(&udev->dev, "Ejecting storage device...\n");
r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep),
cmd, 31, NULL, 2000);
kfree(cmd);
if (r)
return r;
/* At this point, the device disconnects and reconnects with the real
* ID numbers. */
usb_set_intfdata(interface, NULL);
return 0;
}
static int ath9k_hif_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct hif_device_usb *hif_dev;
int ret = 0;
if (id->driver_info == STORAGE_DEVICE)
return send_eject_command(interface);
hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
if (!hif_dev) {
ret = -ENOMEM;
goto err_alloc;
}
usb_get_dev(udev);
hif_dev->udev = udev;
hif_dev->interface = interface;
hif_dev->usb_device_id = id;
#ifdef CONFIG_PM
udev->reset_resume = 1;
#endif
usb_set_intfdata(interface, hif_dev);
init_completion(&hif_dev->fw_done);
/* Find out which firmware to load */
if (IS_AR7010_DEVICE(id->driver_info))
hif_dev->fw_name = FIRMWARE_AR7010_1_1;
else
hif_dev->fw_name = FIRMWARE_AR9271;
ret = request_firmware_nowait(THIS_MODULE, true, hif_dev->fw_name,
&hif_dev->udev->dev, GFP_KERNEL,
hif_dev, ath9k_hif_usb_firmware_cb);
if (ret) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: Async request for firmware %s failed\n",
hif_dev->fw_name);
goto err_fw_req;
}
dev_info(&hif_dev->udev->dev, "ath9k_htc: Firmware %s requested\n",
hif_dev->fw_name);
return 0;
err_fw_req:
usb_set_intfdata(interface, NULL);
kfree(hif_dev);
usb_put_dev(udev);
err_alloc:
return ret;
}
static void ath9k_hif_usb_reboot(struct usb_device *udev)
{
u32 reboot_cmd = 0xffffffff;
void *buf;
int ret;
buf = kmemdup(&reboot_cmd, 4, GFP_KERNEL);
if (!buf)
return;
ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
buf, 4, NULL, HZ);
if (ret)
dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");
kfree(buf);
}
static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false;
if (!hif_dev)
return;
wait_for_completion(&hif_dev->fw_done);
if (hif_dev->firmware) {
ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
ath9k_htc_hw_free(hif_dev->htc_handle);
ath9k_hif_usb_dev_deinit(hif_dev);
release_firmware(hif_dev->firmware);
}
usb_set_intfdata(interface, NULL);
if (!unplugged && (hif_dev->flags & HIF_USB_START))
ath9k_hif_usb_reboot(udev);
kfree(hif_dev);
dev_info(&udev->dev, "ath9k_htc: USB layer deinitialized\n");
usb_put_dev(udev);
}
#ifdef CONFIG_PM
static int ath9k_hif_usb_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
/*
* The device has to be set to FULLSLEEP mode in case no
* interface is up.
*/
if (!(hif_dev->flags & HIF_USB_START))
ath9k_htc_suspend(hif_dev->htc_handle);
ath9k_hif_usb_dealloc_urbs(hif_dev);
return 0;
}
static int ath9k_hif_usb_resume(struct usb_interface *interface)
{
struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
struct htc_target *htc_handle = hif_dev->htc_handle;
int ret;
ret = ath9k_hif_usb_alloc_urbs(hif_dev);
if (ret)
return ret;
if (hif_dev->firmware) {
ret = ath9k_hif_usb_download_fw(hif_dev);
if (ret)
goto fail_resume;
} else {
ath9k_hif_usb_dealloc_urbs(hif_dev);
return -EIO;
}
mdelay(100);
ret = ath9k_htc_resume(htc_handle);
if (ret)
goto fail_resume;
return 0;
fail_resume:
ath9k_hif_usb_dealloc_urbs(hif_dev);
return ret;
}
#endif
static struct usb_driver ath9k_hif_usb_driver = {
.name = KBUILD_MODNAME,
.probe = ath9k_hif_usb_probe,
.disconnect = ath9k_hif_usb_disconnect,
#ifdef CONFIG_PM
.suspend = ath9k_hif_usb_suspend,
.resume = ath9k_hif_usb_resume,
.reset_resume = ath9k_hif_usb_resume,
#endif
.id_table = ath9k_hif_usb_ids,
.soft_unbind = 1,
};
int ath9k_hif_usb_init(void)
{
return usb_register(&ath9k_hif_usb_driver);
}
void ath9k_hif_usb_exit(void)
{
usb_deregister(&ath9k_hif_usb_driver);
}
| gpl-2.0 |
ShinySide/The-Imperius-Curse | kernel/trace/trace_mmiotrace.c | 5809 | 9198 | /*
* Memory mapped I/O tracing
*
* Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
*/
#define DEBUG 1
#include <linux/kernel.h>
#include <linux/mmiotrace.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/atomic.h>
#include "trace.h"
#include "trace_output.h"
struct header_iter {
struct pci_dev *dev;
};
static struct trace_array *mmio_trace_array;
static bool overrun_detected;
static unsigned long prev_overruns;
static atomic_t dropped_count;
static void mmio_reset_data(struct trace_array *tr)
{
overrun_detected = false;
prev_overruns = 0;
tracing_reset_online_cpus(tr);
}
static int mmio_trace_init(struct trace_array *tr)
{
pr_debug("in %s\n", __func__);
mmio_trace_array = tr;
mmio_reset_data(tr);
enable_mmiotrace();
return 0;
}
static void mmio_trace_reset(struct trace_array *tr)
{
pr_debug("in %s\n", __func__);
disable_mmiotrace();
mmio_reset_data(tr);
mmio_trace_array = NULL;
}
static void mmio_trace_start(struct trace_array *tr)
{
pr_debug("in %s\n", __func__);
mmio_reset_data(tr);
}
static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
{
int ret = 0;
int i;
resource_size_t start, end;
const struct pci_driver *drv = pci_dev_driver(dev);
/* XXX: incomplete checks for trace_seq_printf() return value */
ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x",
dev->bus->number, dev->devfn,
dev->vendor, dev->device, dev->irq);
/*
* XXX: is pci_resource_to_user() appropriate, since we are
* supposed to interpret the __ioremap() phys_addr argument based on
* these printed values?
*/
for (i = 0; i < 7; i++) {
pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
ret += trace_seq_printf(s, " %llx",
(unsigned long long)(start |
(dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
}
for (i = 0; i < 7; i++) {
pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
ret += trace_seq_printf(s, " %llx",
dev->resource[i].start < dev->resource[i].end ?
(unsigned long long)(end - start) + 1 : 0);
}
if (drv)
ret += trace_seq_printf(s, " %s\n", drv->name);
else
ret += trace_seq_printf(s, " \n");
return ret;
}
static void destroy_header_iter(struct header_iter *hiter)
{
if (!hiter)
return;
pci_dev_put(hiter->dev);
kfree(hiter);
}
static void mmio_pipe_open(struct trace_iterator *iter)
{
struct header_iter *hiter;
struct trace_seq *s = &iter->seq;
trace_seq_printf(s, "VERSION 20070824\n");
hiter = kzalloc(sizeof(*hiter), GFP_KERNEL);
if (!hiter)
return;
hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
iter->private = hiter;
}
/* XXX: This is not called when the pipe is closed! */
static void mmio_close(struct trace_iterator *iter)
{
struct header_iter *hiter = iter->private;
destroy_header_iter(hiter);
iter->private = NULL;
}
static unsigned long count_overruns(struct trace_iterator *iter)
{
unsigned long cnt = atomic_xchg(&dropped_count, 0);
unsigned long over = ring_buffer_overruns(iter->tr->buffer);
if (over > prev_overruns)
cnt += over - prev_overruns;
prev_overruns = over;
return cnt;
}
static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp,
char __user *ubuf, size_t cnt, loff_t *ppos)
{
ssize_t ret;
struct header_iter *hiter = iter->private;
struct trace_seq *s = &iter->seq;
unsigned long n;
n = count_overruns(iter);
if (n) {
/* XXX: This is later than where events were lost. */
trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n);
if (!overrun_detected)
pr_warning("mmiotrace has lost events.\n");
overrun_detected = true;
goto print_out;
}
if (!hiter)
return 0;
mmio_print_pcidev(s, hiter->dev);
hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev);
if (!hiter->dev) {
destroy_header_iter(hiter);
iter->private = NULL;
}
print_out:
ret = trace_seq_to_user(s, ubuf, cnt);
return (ret == -EBUSY) ? 0 : ret;
}
static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
{
struct trace_entry *entry = iter->ent;
struct trace_mmiotrace_rw *field;
struct mmiotrace_rw *rw;
struct trace_seq *s = &iter->seq;
unsigned long long t = ns2usecs(iter->ts);
unsigned long usec_rem = do_div(t, USEC_PER_SEC);
unsigned secs = (unsigned long)t;
int ret = 1;
trace_assign_type(field, entry);
rw = &field->rw;
switch (rw->opcode) {
case MMIO_READ:
ret = trace_seq_printf(s,
"R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
rw->width, secs, usec_rem, rw->map_id,
(unsigned long long)rw->phys,
rw->value, rw->pc, 0);
break;
case MMIO_WRITE:
ret = trace_seq_printf(s,
"W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
rw->width, secs, usec_rem, rw->map_id,
(unsigned long long)rw->phys,
rw->value, rw->pc, 0);
break;
case MMIO_UNKNOWN_OP:
ret = trace_seq_printf(s,
"UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx,"
"%02lx 0x%lx %d\n",
secs, usec_rem, rw->map_id,
(unsigned long long)rw->phys,
(rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff,
(rw->value >> 0) & 0xff, rw->pc, 0);
break;
default:
ret = trace_seq_printf(s, "rw what?\n");
break;
}
if (ret)
return TRACE_TYPE_HANDLED;
return TRACE_TYPE_PARTIAL_LINE;
}
static enum print_line_t mmio_print_map(struct trace_iterator *iter)
{
struct trace_entry *entry = iter->ent;
struct trace_mmiotrace_map *field;
struct mmiotrace_map *m;
struct trace_seq *s = &iter->seq;
unsigned long long t = ns2usecs(iter->ts);
unsigned long usec_rem = do_div(t, USEC_PER_SEC);
unsigned secs = (unsigned long)t;
int ret;
trace_assign_type(field, entry);
m = &field->map;
switch (m->opcode) {
case MMIO_PROBE:
ret = trace_seq_printf(s,
"MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
secs, usec_rem, m->map_id,
(unsigned long long)m->phys, m->virt, m->len,
0UL, 0);
break;
case MMIO_UNPROBE:
ret = trace_seq_printf(s,
"UNMAP %u.%06lu %d 0x%lx %d\n",
secs, usec_rem, m->map_id, 0UL, 0);
break;
default:
ret = trace_seq_printf(s, "map what?\n");
break;
}
if (ret)
return TRACE_TYPE_HANDLED;
return TRACE_TYPE_PARTIAL_LINE;
}
static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
{
struct trace_entry *entry = iter->ent;
struct print_entry *print = (struct print_entry *)entry;
const char *msg = print->buf;
struct trace_seq *s = &iter->seq;
unsigned long long t = ns2usecs(iter->ts);
unsigned long usec_rem = do_div(t, USEC_PER_SEC);
unsigned secs = (unsigned long)t;
int ret;
/* The trailing newline must be in the message. */
ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
static enum print_line_t mmio_print_line(struct trace_iterator *iter)
{
switch (iter->ent->type) {
case TRACE_MMIO_RW:
return mmio_print_rw(iter);
case TRACE_MMIO_MAP:
return mmio_print_map(iter);
case TRACE_PRINT:
return mmio_print_mark(iter);
default:
return TRACE_TYPE_HANDLED; /* ignore unknown entries */
}
}
static struct tracer mmio_tracer __read_mostly =
{
.name = "mmiotrace",
.init = mmio_trace_init,
.reset = mmio_trace_reset,
.start = mmio_trace_start,
.pipe_open = mmio_pipe_open,
.close = mmio_close,
.read = mmio_read,
.print_line = mmio_print_line,
};
__init static int init_mmio_trace(void)
{
return register_tracer(&mmio_tracer);
}
device_initcall(init_mmio_trace);
static void __trace_mmiotrace_rw(struct trace_array *tr,
struct trace_array_cpu *data,
struct mmiotrace_rw *rw)
{
struct ftrace_event_call *call = &event_mmiotrace_rw;
struct ring_buffer *buffer = tr->buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry;
int pc = preempt_count();
event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
sizeof(*entry), 0, pc);
if (!event) {
atomic_inc(&dropped_count);
return;
}
entry = ring_buffer_event_data(event);
entry->rw = *rw;
if (!filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, 0, pc);
}
void mmio_trace_rw(struct mmiotrace_rw *rw)
{
struct trace_array *tr = mmio_trace_array;
struct trace_array_cpu *data = tr->data[smp_processor_id()];
__trace_mmiotrace_rw(tr, data, rw);
}
static void __trace_mmiotrace_map(struct trace_array *tr,
struct trace_array_cpu *data,
struct mmiotrace_map *map)
{
struct ftrace_event_call *call = &event_mmiotrace_map;
struct ring_buffer *buffer = tr->buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry;
int pc = preempt_count();
event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
sizeof(*entry), 0, pc);
if (!event) {
atomic_inc(&dropped_count);
return;
}
entry = ring_buffer_event_data(event);
entry->map = *map;
if (!filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, 0, pc);
}
void mmio_trace_mapping(struct mmiotrace_map *map)
{
struct trace_array *tr = mmio_trace_array;
struct trace_array_cpu *data;
preempt_disable();
data = tr->data[smp_processor_id()];
__trace_mmiotrace_map(tr, data, map);
preempt_enable();
}
int mmio_trace_printk(const char *fmt, va_list args)
{
return trace_vprintk(0, fmt, args);
}
| gpl-2.0 |
DespairFactor/N6 | arch/powerpc/platforms/ps3/interrupt.c | 7601 | 20118 | /*
* PS3 interrupt routines.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/smp.h>
#include "platform.h"
#if defined(DEBUG)
#define DBG udbg_printf
#define FAIL udbg_printf
#else
#define DBG pr_devel
#define FAIL pr_debug
#endif
/**
* struct ps3_bmp - a per cpu irq status and mask bitmap structure
* @status: 256 bit status bitmap indexed by plug
* @unused_1: Alignment
* @mask: 256 bit mask bitmap indexed by plug
* @unused_2: Alignment
*
* The HV maintains per SMT thread mappings of HV outlet to HV plug on
* behalf of the guest. These mappings are implemented as 256 bit guest
* supplied bitmaps indexed by plug number. The addresses of the bitmaps
* are registered with the HV through lv1_configure_irq_state_bitmap().
* The HV requires that the 512 bits of status + mask not cross a page
* boundary. PS3_BMP_MINALIGN is used to define this minimal 64 byte
* alignment.
*
* The HV supports 256 plugs per thread, assigned as {0..255}, for a total
* of 512 plugs supported on a processor. To simplify the logic this
* implementation equates HV plug value to Linux virq value, constrains each
* interrupt to have a system wide unique plug number, and limits the range
* of the plug values to map into the first dword of the bitmaps. This
* gives a usable range of plug values of {NUM_ISA_INTERRUPTS..63}. Note
* that there is no constraint on how many in this set an individual thread
* can acquire.
*
* The mask is declared as unsigned long so we can use set/clear_bit on it.
*/
#define PS3_BMP_MINALIGN 64
struct ps3_bmp {
struct {
u64 status;
u64 unused_1[3];
unsigned long mask;
u64 unused_2[3];
};
};
/**
* struct ps3_private - a per cpu data structure
* @bmp: ps3_bmp structure
* @bmp_lock: Syncronize access to bmp.
* @ipi_debug_brk_mask: Mask for debug break IPIs
* @ppe_id: HV logical_ppe_id
* @thread_id: HV thread_id
* @ipi_mask: Mask of IPI virqs
*/
struct ps3_private {
struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
spinlock_t bmp_lock;
u64 ppe_id;
u64 thread_id;
unsigned long ipi_debug_brk_mask;
unsigned long ipi_mask;
};
static DEFINE_PER_CPU(struct ps3_private, ps3_private);
/**
* ps3_chip_mask - Set an interrupt mask bit in ps3_bmp.
* @virq: The assigned Linux virq.
*
* Sets ps3_bmp.mask and calls lv1_did_update_interrupt_mask().
*/
static void ps3_chip_mask(struct irq_data *d)
{
struct ps3_private *pd = irq_data_get_irq_chip_data(d);
unsigned long flags;
DBG("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
pd->thread_id, d->irq);
local_irq_save(flags);
clear_bit(63 - d->irq, &pd->bmp.mask);
lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id);
local_irq_restore(flags);
}
/**
* ps3_chip_unmask - Clear an interrupt mask bit in ps3_bmp.
* @virq: The assigned Linux virq.
*
* Clears ps3_bmp.mask and calls lv1_did_update_interrupt_mask().
*/
static void ps3_chip_unmask(struct irq_data *d)
{
struct ps3_private *pd = irq_data_get_irq_chip_data(d);
unsigned long flags;
DBG("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
pd->thread_id, d->irq);
local_irq_save(flags);
set_bit(63 - d->irq, &pd->bmp.mask);
lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id);
local_irq_restore(flags);
}
/**
* ps3_chip_eoi - HV end-of-interrupt.
* @virq: The assigned Linux virq.
*
* Calls lv1_end_of_interrupt_ext().
*/
static void ps3_chip_eoi(struct irq_data *d)
{
const struct ps3_private *pd = irq_data_get_irq_chip_data(d);
/* non-IPIs are EOIed here. */
if (!test_bit(63 - d->irq, &pd->ipi_mask))
lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
}
/**
* ps3_irq_chip - Represents the ps3_bmp as a Linux struct irq_chip.
*/
static struct irq_chip ps3_irq_chip = {
.name = "ps3",
.irq_mask = ps3_chip_mask,
.irq_unmask = ps3_chip_unmask,
.irq_eoi = ps3_chip_eoi,
};
/**
* ps3_virq_setup - virq related setup.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @outlet: The HV outlet from the various create outlet routines.
* @virq: The assigned Linux virq.
*
* Calls irq_create_mapping() to get a virq and sets the chip data to
* ps3_private data.
*/
static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
unsigned int *virq)
{
int result;
struct ps3_private *pd;
/* This defines the default interrupt distribution policy. */
if (cpu == PS3_BINDING_CPU_ANY)
cpu = 0;
pd = &per_cpu(ps3_private, cpu);
*virq = irq_create_mapping(NULL, outlet);
if (*virq == NO_IRQ) {
FAIL("%s:%d: irq_create_mapping failed: outlet %lu\n",
__func__, __LINE__, outlet);
result = -ENOMEM;
goto fail_create;
}
DBG("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__,
outlet, cpu, *virq);
result = irq_set_chip_data(*virq, pd);
if (result) {
FAIL("%s:%d: irq_set_chip_data failed\n",
__func__, __LINE__);
goto fail_set;
}
ps3_chip_mask(irq_get_irq_data(*virq));
return result;
fail_set:
irq_dispose_mapping(*virq);
fail_create:
return result;
}
/**
* ps3_virq_destroy - virq related teardown.
* @virq: The assigned Linux virq.
*
* Clears chip data and calls irq_dispose_mapping() for the virq.
*/
static int ps3_virq_destroy(unsigned int virq)
{
const struct ps3_private *pd = irq_get_chip_data(virq);
DBG("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
irq_set_chip_data(virq, NULL);
irq_dispose_mapping(virq);
DBG("%s:%d <-\n", __func__, __LINE__);
return 0;
}
/**
* ps3_irq_plug_setup - Generic outlet and virq related setup.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @outlet: The HV outlet from the various create outlet routines.
* @virq: The assigned Linux virq.
*
* Sets up virq and connects the irq plug.
*/
int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
unsigned int *virq)
{
int result;
struct ps3_private *pd;
result = ps3_virq_setup(cpu, outlet, virq);
if (result) {
FAIL("%s:%d: ps3_virq_setup failed\n", __func__, __LINE__);
goto fail_setup;
}
pd = irq_get_chip_data(*virq);
/* Binds outlet to cpu + virq. */
result = lv1_connect_irq_plug_ext(pd->ppe_id, pd->thread_id, *virq,
outlet, 0);
if (result) {
FAIL("%s:%d: lv1_connect_irq_plug_ext failed: %s\n",
__func__, __LINE__, ps3_result(result));
result = -EPERM;
goto fail_connect;
}
return result;
fail_connect:
ps3_virq_destroy(*virq);
fail_setup:
return result;
}
EXPORT_SYMBOL_GPL(ps3_irq_plug_setup);
/**
* ps3_irq_plug_destroy - Generic outlet and virq related teardown.
* @virq: The assigned Linux virq.
*
* Disconnects the irq plug and tears down virq.
* Do not call for system bus event interrupts setup with
* ps3_sb_event_receive_port_setup().
*/
int ps3_irq_plug_destroy(unsigned int virq)
{
int result;
const struct ps3_private *pd = irq_get_chip_data(virq);
DBG("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq);
if (result)
FAIL("%s:%d: lv1_disconnect_irq_plug_ext failed: %s\n",
__func__, __LINE__, ps3_result(result));
ps3_virq_destroy(virq);
return result;
}
EXPORT_SYMBOL_GPL(ps3_irq_plug_destroy);
/**
* ps3_event_receive_port_setup - Setup an event receive port.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @virq: The assigned Linux virq.
*
* The virq can be used with lv1_connect_interrupt_event_receive_port() to
* arrange to receive interrupts from system-bus devices, or with
* ps3_send_event_locally() to signal events.
*/
int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq)
{
int result;
u64 outlet;
result = lv1_construct_event_receive_port(&outlet);
if (result) {
FAIL("%s:%d: lv1_construct_event_receive_port failed: %s\n",
__func__, __LINE__, ps3_result(result));
*virq = NO_IRQ;
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_event_receive_port_setup);
/**
* ps3_event_receive_port_destroy - Destroy an event receive port.
* @virq: The assigned Linux virq.
*
* Since ps3_event_receive_port_destroy destroys the receive port outlet,
* SB devices need to call disconnect_interrupt_event_receive_port() before
* this.
*/
int ps3_event_receive_port_destroy(unsigned int virq)
{
int result;
DBG(" -> %s:%d virq %u\n", __func__, __LINE__, virq);
ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_destruct_event_receive_port(virq_to_hw(virq));
if (result)
FAIL("%s:%d: lv1_destruct_event_receive_port failed: %s\n",
__func__, __LINE__, ps3_result(result));
/*
* Don't call ps3_virq_destroy() here since ps3_smp_cleanup_cpu()
* calls from interrupt context (smp_call_function) when kexecing.
*/
DBG(" <- %s:%d\n", __func__, __LINE__);
return result;
}
int ps3_send_event_locally(unsigned int virq)
{
return lv1_send_event_locally(virq_to_hw(virq));
}
/**
* ps3_sb_event_receive_port_setup - Setup a system bus event receive port.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @dev: The system bus device instance.
* @virq: The assigned Linux virq.
*
* An event irq represents a virtual device interrupt. The interrupt_id
* coresponds to the software interrupt number.
*/
int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev,
enum ps3_cpu_binding cpu, unsigned int *virq)
{
/* this should go in system-bus.c */
int result;
result = ps3_event_receive_port_setup(cpu, virq);
if (result)
return result;
result = lv1_connect_interrupt_event_receive_port(dev->bus_id,
dev->dev_id, virq_to_hw(*virq), dev->interrupt_id);
if (result) {
FAIL("%s:%d: lv1_connect_interrupt_event_receive_port"
" failed: %s\n", __func__, __LINE__,
ps3_result(result));
ps3_event_receive_port_destroy(*virq);
*virq = NO_IRQ;
return result;
}
DBG("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
dev->interrupt_id, *virq);
return 0;
}
EXPORT_SYMBOL(ps3_sb_event_receive_port_setup);
int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev,
unsigned int virq)
{
/* this should go in system-bus.c */
int result;
DBG(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
dev->interrupt_id, virq);
result = lv1_disconnect_interrupt_event_receive_port(dev->bus_id,
dev->dev_id, virq_to_hw(virq), dev->interrupt_id);
if (result)
FAIL("%s:%d: lv1_disconnect_interrupt_event_receive_port"
" failed: %s\n", __func__, __LINE__,
ps3_result(result));
result = ps3_event_receive_port_destroy(virq);
BUG_ON(result);
/*
* ps3_event_receive_port_destroy() destroys the IRQ plug,
* so don't call ps3_irq_plug_destroy() here.
*/
result = ps3_virq_destroy(virq);
BUG_ON(result);
DBG(" <- %s:%d\n", __func__, __LINE__);
return result;
}
EXPORT_SYMBOL(ps3_sb_event_receive_port_destroy);
/**
* ps3_io_irq_setup - Setup a system bus io irq.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @interrupt_id: The device interrupt id read from the system repository.
* @virq: The assigned Linux virq.
*
* An io irq represents a non-virtualized device interrupt. interrupt_id
* coresponds to the interrupt number of the interrupt controller.
*/
int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
unsigned int *virq)
{
int result;
u64 outlet;
result = lv1_construct_io_irq_outlet(interrupt_id, &outlet);
if (result) {
FAIL("%s:%d: lv1_construct_io_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_io_irq_setup);
int ps3_io_irq_destroy(unsigned int virq)
{
int result;
unsigned long outlet = virq_to_hw(virq);
ps3_chip_mask(irq_get_irq_data(virq));
/*
* lv1_destruct_io_irq_outlet() will destroy the IRQ plug,
* so call ps3_irq_plug_destroy() first.
*/
result = ps3_irq_plug_destroy(virq);
BUG_ON(result);
result = lv1_destruct_io_irq_outlet(outlet);
if (result)
FAIL("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
EXPORT_SYMBOL_GPL(ps3_io_irq_destroy);
/**
* ps3_vuart_irq_setup - Setup the system virtual uart virq.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @virt_addr_bmp: The caller supplied virtual uart interrupt bitmap.
* @virq: The assigned Linux virq.
*
* The system supports only a single virtual uart, so multiple calls without
* freeing the interrupt will return a wrong state error.
*/
int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
unsigned int *virq)
{
int result;
u64 outlet;
u64 lpar_addr;
BUG_ON(!is_kernel_addr((u64)virt_addr_bmp));
lpar_addr = ps3_mm_phys_to_lpar(__pa(virt_addr_bmp));
result = lv1_configure_virtual_uart_irq(lpar_addr, &outlet);
if (result) {
FAIL("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_vuart_irq_setup);
int ps3_vuart_irq_destroy(unsigned int virq)
{
int result;
ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_deconfigure_virtual_uart_irq();
if (result) {
FAIL("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_destroy(virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_vuart_irq_destroy);
/**
* ps3_spe_irq_setup - Setup an spe virq.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @spe_id: The spe_id returned from lv1_construct_logical_spe().
* @class: The spe interrupt class {0,1,2}.
* @virq: The assigned Linux virq.
*
*/
int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
unsigned int class, unsigned int *virq)
{
int result;
u64 outlet;
BUG_ON(class > 2);
result = lv1_get_spe_irq_outlet(spe_id, class, &outlet);
if (result) {
FAIL("%s:%d: lv1_get_spe_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
int ps3_spe_irq_destroy(unsigned int virq)
{
int result;
ps3_chip_mask(irq_get_irq_data(virq));
result = ps3_irq_plug_destroy(virq);
BUG_ON(result);
return result;
}
#define PS3_INVALID_OUTLET ((irq_hw_number_t)-1)
#define PS3_PLUG_MAX 63
#if defined(DEBUG)
static void _dump_64_bmp(const char *header, const u64 *p, unsigned cpu,
const char* func, int line)
{
pr_debug("%s:%d: %s %u {%04llx_%04llx_%04llx_%04llx}\n",
func, line, header, cpu,
*p >> 48, (*p >> 32) & 0xffff, (*p >> 16) & 0xffff,
*p & 0xffff);
}
static void __maybe_unused _dump_256_bmp(const char *header,
const u64 *p, unsigned cpu, const char* func, int line)
{
pr_debug("%s:%d: %s %u {%016llx:%016llx:%016llx:%016llx}\n",
func, line, header, cpu, p[0], p[1], p[2], p[3]);
}
#define dump_bmp(_x) _dump_bmp(_x, __func__, __LINE__)
static void _dump_bmp(struct ps3_private* pd, const char* func, int line)
{
unsigned long flags;
spin_lock_irqsave(&pd->bmp_lock, flags);
_dump_64_bmp("stat", &pd->bmp.status, pd->thread_id, func, line);
_dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line);
spin_unlock_irqrestore(&pd->bmp_lock, flags);
}
#define dump_mask(_x) _dump_mask(_x, __func__, __LINE__)
static void __maybe_unused _dump_mask(struct ps3_private *pd,
const char* func, int line)
{
unsigned long flags;
spin_lock_irqsave(&pd->bmp_lock, flags);
_dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line);
spin_unlock_irqrestore(&pd->bmp_lock, flags);
}
#else
static void dump_bmp(struct ps3_private* pd) {};
#endif /* defined(DEBUG) */
static int ps3_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
DBG("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
virq);
irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq);
return 0;
}
static int ps3_host_match(struct irq_domain *h, struct device_node *np)
{
/* Match all */
return 1;
}
static const struct irq_domain_ops ps3_host_ops = {
.map = ps3_host_map,
.match = ps3_host_match,
};
void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
{
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
set_bit(63 - virq, &pd->ipi_debug_brk_mask);
DBG("%s:%d: cpu %u, virq %u, mask %lxh\n", __func__, __LINE__,
cpu, virq, pd->ipi_debug_brk_mask);
}
void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
{
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
set_bit(63 - virq, &pd->ipi_mask);
DBG("%s:%d: cpu %u, virq %u, ipi_mask %lxh\n", __func__, __LINE__,
cpu, virq, pd->ipi_mask);
}
static unsigned int ps3_get_irq(void)
{
struct ps3_private *pd = &__get_cpu_var(ps3_private);
u64 x = (pd->bmp.status & pd->bmp.mask);
unsigned int plug;
/* check for ipi break first to stop this cpu ASAP */
if (x & pd->ipi_debug_brk_mask)
x &= pd->ipi_debug_brk_mask;
asm volatile("cntlzd %0,%1" : "=r" (plug) : "r" (x));
plug &= 0x3f;
if (unlikely(plug == NO_IRQ)) {
DBG("%s:%d: no plug found: thread_id %llu\n", __func__,
__LINE__, pd->thread_id);
dump_bmp(&per_cpu(ps3_private, 0));
dump_bmp(&per_cpu(ps3_private, 1));
return NO_IRQ;
}
#if defined(DEBUG)
if (unlikely(plug < NUM_ISA_INTERRUPTS || plug > PS3_PLUG_MAX)) {
dump_bmp(&per_cpu(ps3_private, 0));
dump_bmp(&per_cpu(ps3_private, 1));
BUG();
}
#endif
/* IPIs are EOIed here. */
if (test_bit(63 - plug, &pd->ipi_mask))
lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, plug);
return plug;
}
void __init ps3_init_IRQ(void)
{
int result;
unsigned cpu;
struct irq_domain *host;
host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL);
irq_set_default_host(host);
for_each_possible_cpu(cpu) {
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
lv1_get_logical_ppe_id(&pd->ppe_id);
pd->thread_id = get_hard_smp_processor_id(cpu);
spin_lock_init(&pd->bmp_lock);
DBG("%s:%d: ppe_id %llu, thread_id %llu, bmp %lxh\n",
__func__, __LINE__, pd->ppe_id, pd->thread_id,
ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
result = lv1_configure_irq_state_bitmap(pd->ppe_id,
pd->thread_id, ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
if (result)
FAIL("%s:%d: lv1_configure_irq_state_bitmap failed:"
" %s\n", __func__, __LINE__,
ps3_result(result));
}
ppc_md.get_irq = ps3_get_irq;
}
void ps3_shutdown_IRQ(int cpu)
{
int result;
u64 ppe_id;
u64 thread_id = get_hard_smp_processor_id(cpu);
lv1_get_logical_ppe_id(&ppe_id);
result = lv1_configure_irq_state_bitmap(ppe_id, thread_id, 0);
DBG("%s:%d: lv1_configure_irq_state_bitmap (%llu:%llu/%d) %s\n", __func__,
__LINE__, ppe_id, thread_id, cpu, ps3_result(result));
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.